blob: 1616531a71f7bf20af431338d98f6ce8a4e110df [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
26 *
27 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050028 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080029 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * last descriptor of the ring.
37 *
38 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050039 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040043 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020045 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050046 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
54 *
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
63 */
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/string.h>
67#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040068#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/slab.h>
70#include <linux/interrupt.h>
71#include <linux/init.h>
72#include <linux/delay.h>
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050076#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/spinlock.h>
78#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000079#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080080#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050081#include <linux/ip.h>
82#include <linux/tcp.h>
83#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080084#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86#include <asm/io.h>
87#include <asm/irq.h>
88#include <asm/uaccess.h>
89#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#include <linux/dma-mapping.h>
91#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040092#include <linux/mii.h>
93#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080094#include <linux/phy_fixed.h>
95#include <linux/of.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97#include "gianfar.h"
Andy Fleming1577ece2009-02-04 16:42:12 -080098#include "fsl_pq_mdio.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#undef BRIEF_GFAR_ERRORS
102#undef VERBOSE_GFAR_ERRORS
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104const char gfar_driver_name[] = "Gianfar Ethernet";
Andy Fleming7f7f5312005-11-11 12:38:59 -0600105const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107static int gfar_enet_open(struct net_device *dev);
108static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200109static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static void gfar_timeout(struct net_device *dev);
111static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500112struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -0500114 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static int gfar_set_mac_address(struct net_device *dev);
116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100117static irqreturn_t gfar_error(int irq, void *dev_id);
118static irqreturn_t gfar_transmit(int irq, void *dev_id);
119static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120static void adjust_link(struct net_device *dev);
121static void init_registers(struct net_device *dev);
122static int init_phy(struct net_device *dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800123static int gfar_probe(struct of_device *ofdev,
124 const struct of_device_id *match);
125static int gfar_remove(struct of_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400126static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static void gfar_set_multi(struct net_device *dev);
128static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500129static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700130static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300131#ifdef CONFIG_NET_POLL_CONTROLLER
132static void gfar_netpoll(struct net_device *dev);
133#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137 int amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500138static void gfar_vlan_rx_register(struct net_device *netdev,
139 struct vlan_group *grp);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600140void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500141static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600142void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev);
144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150MODULE_LICENSE("GPL");
151
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000153 dma_addr_t buf)
154{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000155 u32 lstatus;
156
157 bdp->bufPtr = buf;
158
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163 eieio();
164
165 bdp->lstatus = lstatus;
166}
167
Anton Vorontsov87283272009-10-12 06:00:39 +0000168static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000169{
Anton Vorontsov87283272009-10-12 06:00:39 +0000170 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000173 struct txbd8 *txbdp;
174 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000175 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000176
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000185
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000192 }
193
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000194 /* Set the last descriptor in the ring to indicate wrap */
195 txbdp--;
196 txbdp->status |= TXBD_WRAP;
197 }
198
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
204
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208 if (skb) {
209 gfar_init_rxbdp(rx_queue, rxbdp,
210 rxbdp->bufPtr);
211 } else {
212 skb = gfar_new_skb(ndev);
213 if (!skb) {
214 pr_err("%s: Can't allocate RX buffers\n",
215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 }
222
223 rxbdp++;
224 }
225
Anton Vorontsov87283272009-10-12 06:00:39 +0000226 }
227
228 return 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
Anton Vorontsov87283272009-10-12 06:00:39 +0000233}
234
235static int gfar_alloc_skb_resources(struct net_device *ndev)
236{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000237 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000238 dma_addr_t addr;
239 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000240 struct gfar_private *priv = netdev_priv(ndev);
241 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
244
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000252
253 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000254 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000255 sizeof(struct txbd8) * priv->total_tx_ring_size +
256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
257 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000258 if (!vaddr) {
259 if (netif_msg_ifup(priv))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
261 ndev->name);
262 return -ENOMEM;
263 }
264
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000274
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000275 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000276 for (i = 0; i < priv->num_rx_queues; i++) {
277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000284
285 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000289 tx_queue->tx_ring_size, GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000290 if (!tx_queue->tx_skbuff) {
291 if (netif_msg_ifup(priv))
292 pr_err("%s: Could not allocate tx_skbuff\n",
293 ndev->name);
294 goto cleanup;
295 }
296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000299 }
300
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000304 rx_queue->rx_ring_size, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000305
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000306 if (!rx_queue->rx_skbuff) {
307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
309 ndev->name);
310 goto cleanup;
311 }
312
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
315 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000316
Anton Vorontsov87283272009-10-12 06:00:39 +0000317 if (gfar_init_bds(ndev))
318 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000319
320 return 0;
321
322cleanup:
323 free_skb_resources(priv);
324 return -ENOMEM;
325}
326
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000330 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000346static void gfar_init_mac(struct net_device *ndev)
347{
348 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000350 u32 rctrl = 0;
351 u32 tctrl = 0;
352 u32 attrs = 0;
353
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000356
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000357 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000359
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000360 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000361 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000362 /* Program the RIR0 reg with the required distribution */
363 gfar_write(&regs->rir0, DEFAULT_RIR0);
364 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000365
366 if (priv->rx_csum_enable)
367 rctrl |= RCTRL_CHECKSUMMING;
368
369 if (priv->extended_hash) {
370 rctrl |= RCTRL_EXTHASH;
371
372 gfar_clear_exact_match(ndev);
373 rctrl |= RCTRL_EMEN;
374 }
375
376 if (priv->padding) {
377 rctrl &= ~RCTRL_PAL_MASK;
378 rctrl |= RCTRL_PADDING(priv->padding);
379 }
380
381 /* keep vlan related bits if it's enabled */
382 if (priv->vlgrp) {
383 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
384 tctrl |= TCTRL_VLINS;
385 }
386
387 /* Init rctrl based on our settings */
388 gfar_write(&regs->rctrl, rctrl);
389
390 if (ndev->features & NETIF_F_IP_CSUM)
391 tctrl |= TCTRL_INIT_CSUM;
392
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000393 tctrl |= TCTRL_TXSCHED_PRIO;
394
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000395 gfar_write(&regs->tctrl, tctrl);
396
397 /* Set the extraction length and index */
398 attrs = ATTRELI_EL(priv->rx_stash_size) |
399 ATTRELI_EI(priv->rx_stash_index);
400
401 gfar_write(&regs->attreli, attrs);
402
403 /* Start with defaults, and add stashing or locking
404 * depending on the approprate variables */
405 attrs = ATTR_INIT_SETTINGS;
406
407 if (priv->bd_stash_en)
408 attrs |= ATTR_BDSTASH;
409
410 if (priv->rx_stash_size != 0)
411 attrs |= ATTR_BUFSTASH;
412
413 gfar_write(&regs->attr, attrs);
414
415 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
416 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
417 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
418}
419
Andy Fleming26ccfc32009-03-10 12:58:28 +0000420static const struct net_device_ops gfar_netdev_ops = {
421 .ndo_open = gfar_enet_open,
422 .ndo_start_xmit = gfar_start_xmit,
423 .ndo_stop = gfar_close,
424 .ndo_change_mtu = gfar_change_mtu,
425 .ndo_set_multicast_list = gfar_set_multi,
426 .ndo_tx_timeout = gfar_timeout,
427 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000428 .ndo_select_queue = gfar_select_queue,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000429 .ndo_vlan_rx_register = gfar_vlan_rx_register,
Ben Hutchings240c1022009-07-09 17:54:35 +0000430 .ndo_set_mac_address = eth_mac_addr,
431 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000432#ifdef CONFIG_NET_POLL_CONTROLLER
433 .ndo_poll_controller = gfar_netpoll,
434#endif
435};
436
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000437unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
438unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
439
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000440void lock_rx_qs(struct gfar_private *priv)
441{
442 int i = 0x0;
443
444 for (i = 0; i < priv->num_rx_queues; i++)
445 spin_lock(&priv->rx_queue[i]->rxlock);
446}
447
448void lock_tx_qs(struct gfar_private *priv)
449{
450 int i = 0x0;
451
452 for (i = 0; i < priv->num_tx_queues; i++)
453 spin_lock(&priv->tx_queue[i]->txlock);
454}
455
456void unlock_rx_qs(struct gfar_private *priv)
457{
458 int i = 0x0;
459
460 for (i = 0; i < priv->num_rx_queues; i++)
461 spin_unlock(&priv->rx_queue[i]->rxlock);
462}
463
464void unlock_tx_qs(struct gfar_private *priv)
465{
466 int i = 0x0;
467
468 for (i = 0; i < priv->num_tx_queues; i++)
469 spin_unlock(&priv->tx_queue[i]->txlock);
470}
471
Andy Fleming7f7f5312005-11-11 12:38:59 -0600472/* Returns 1 if incoming frames use an FCB */
473static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500474{
Dai Haruki77ecaf22008-12-16 15:30:48 -0800475 return priv->vlgrp || priv->rx_csum_enable;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500476}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400477
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000478u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
479{
480 return skb_get_queue_mapping(skb);
481}
482static void free_tx_pointers(struct gfar_private *priv)
483{
484 int i = 0;
485
486 for (i = 0; i < priv->num_tx_queues; i++)
487 kfree(priv->tx_queue[i]);
488}
489
490static void free_rx_pointers(struct gfar_private *priv)
491{
492 int i = 0;
493
494 for (i = 0; i < priv->num_rx_queues; i++)
495 kfree(priv->rx_queue[i]);
496}
497
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000498static void unmap_group_regs(struct gfar_private *priv)
499{
500 int i = 0;
501
502 for (i = 0; i < MAXGROUPS; i++)
503 if (priv->gfargrp[i].regs)
504 iounmap(priv->gfargrp[i].regs);
505}
506
507static void disable_napi(struct gfar_private *priv)
508{
509 int i = 0;
510
511 for (i = 0; i < priv->num_grps; i++)
512 napi_disable(&priv->gfargrp[i].napi);
513}
514
515static void enable_napi(struct gfar_private *priv)
516{
517 int i = 0;
518
519 for (i = 0; i < priv->num_grps; i++)
520 napi_enable(&priv->gfargrp[i].napi);
521}
522
523static int gfar_parse_group(struct device_node *np,
524 struct gfar_private *priv, const char *model)
525{
526 u32 *queue_mask;
527 u64 addr, size;
528
529 addr = of_translate_address(np,
530 of_get_address(np, 0, &size, NULL));
531 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
532
533 if (!priv->gfargrp[priv->num_grps].regs)
534 return -ENOMEM;
535
536 priv->gfargrp[priv->num_grps].interruptTransmit =
537 irq_of_parse_and_map(np, 0);
538
539 /* If we aren't the FEC we have multiple interrupts */
540 if (model && strcasecmp(model, "FEC")) {
541 priv->gfargrp[priv->num_grps].interruptReceive =
542 irq_of_parse_and_map(np, 1);
543 priv->gfargrp[priv->num_grps].interruptError =
544 irq_of_parse_and_map(np,2);
545 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
546 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
547 priv->gfargrp[priv->num_grps].interruptError < 0) {
548 return -EINVAL;
549 }
550 }
551
552 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
553 priv->gfargrp[priv->num_grps].priv = priv;
554 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
555 if(priv->mode == MQ_MG_MODE) {
556 queue_mask = (u32 *)of_get_property(np,
557 "fsl,rx-bit-map", NULL);
558 priv->gfargrp[priv->num_grps].rx_bit_map =
559 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
560 queue_mask = (u32 *)of_get_property(np,
561 "fsl,tx-bit-map", NULL);
562 priv->gfargrp[priv->num_grps].tx_bit_map =
563 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
564 } else {
565 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
566 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
567 }
568 priv->num_grps++;
569
570 return 0;
571}
572
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000573static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800574{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800575 const char *model;
576 const char *ctype;
577 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000578 int err = 0, i;
579 struct net_device *dev = NULL;
580 struct gfar_private *priv = NULL;
581 struct device_node *np = ofdev->node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000582 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800583 const u32 *stash;
584 const u32 *stash_len;
585 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000586 unsigned int num_tx_qs, num_rx_qs;
587 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800588
589 if (!np || !of_device_is_available(np))
590 return -ENODEV;
591
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000592 /* parse the num of tx and rx queues */
593 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
594 num_tx_qs = tx_queues ? *tx_queues : 1;
595
596 if (num_tx_qs > MAX_TX_QS) {
597 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
598 num_tx_qs, MAX_TX_QS);
599 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
600 return -EINVAL;
601 }
602
603 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
604 num_rx_qs = rx_queues ? *rx_queues : 1;
605
606 if (num_rx_qs > MAX_RX_QS) {
607 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
608 num_tx_qs, MAX_TX_QS);
609 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
610 return -EINVAL;
611 }
612
613 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
614 dev = *pdev;
615 if (NULL == dev)
616 return -ENOMEM;
617
618 priv = netdev_priv(dev);
619 priv->node = ofdev->node;
620 priv->ndev = dev;
621
622 dev->num_tx_queues = num_tx_qs;
623 dev->real_num_tx_queues = num_tx_qs;
624 priv->num_tx_queues = num_tx_qs;
625 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000626 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800627
628 model = of_get_property(np, "model", NULL);
629
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000630 for (i = 0; i < MAXGROUPS; i++)
631 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800632
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000633 /* Parse and initialize group specific information */
634 if (of_device_is_compatible(np, "fsl,etsec2")) {
635 priv->mode = MQ_MG_MODE;
636 for_each_child_of_node(np, child) {
637 err = gfar_parse_group(child, priv, model);
638 if (err)
639 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800640 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000641 } else {
642 priv->mode = SQ_SG_MODE;
643 err = gfar_parse_group(np, priv, model);
644 if(err)
645 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800646 }
647
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000648 for (i = 0; i < priv->num_tx_queues; i++)
649 priv->tx_queue[i] = NULL;
650 for (i = 0; i < priv->num_rx_queues; i++)
651 priv->rx_queue[i] = NULL;
652
653 for (i = 0; i < priv->num_tx_queues; i++) {
654 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
655 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
656 if (!priv->tx_queue[i]) {
657 err = -ENOMEM;
658 goto tx_alloc_failed;
659 }
660 priv->tx_queue[i]->tx_skbuff = NULL;
661 priv->tx_queue[i]->qindex = i;
662 priv->tx_queue[i]->dev = dev;
663 spin_lock_init(&(priv->tx_queue[i]->txlock));
664 }
665
666 for (i = 0; i < priv->num_rx_queues; i++) {
667 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
668 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
669 if (!priv->rx_queue[i]) {
670 err = -ENOMEM;
671 goto rx_alloc_failed;
672 }
673 priv->rx_queue[i]->rx_skbuff = NULL;
674 priv->rx_queue[i]->qindex = i;
675 priv->rx_queue[i]->dev = dev;
676 spin_lock_init(&(priv->rx_queue[i]->rxlock));
677 }
678
679
Andy Fleming4d7902f2009-02-04 16:43:44 -0800680 stash = of_get_property(np, "bd-stash", NULL);
681
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000682 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800683 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
684 priv->bd_stash_en = 1;
685 }
686
687 stash_len = of_get_property(np, "rx-stash-len", NULL);
688
689 if (stash_len)
690 priv->rx_stash_size = *stash_len;
691
692 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
693
694 if (stash_idx)
695 priv->rx_stash_index = *stash_idx;
696
697 if (stash_len || stash_idx)
698 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
699
Andy Flemingb31a1d82008-12-16 15:29:15 -0800700 mac_addr = of_get_mac_address(np);
701 if (mac_addr)
702 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
703
704 if (model && !strcasecmp(model, "TSEC"))
705 priv->device_flags =
706 FSL_GIANFAR_DEV_HAS_GIGABIT |
707 FSL_GIANFAR_DEV_HAS_COALESCE |
708 FSL_GIANFAR_DEV_HAS_RMON |
709 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
710 if (model && !strcasecmp(model, "eTSEC"))
711 priv->device_flags =
712 FSL_GIANFAR_DEV_HAS_GIGABIT |
713 FSL_GIANFAR_DEV_HAS_COALESCE |
714 FSL_GIANFAR_DEV_HAS_RMON |
715 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Dai Haruki2c2db482008-12-16 15:31:15 -0800716 FSL_GIANFAR_DEV_HAS_PADDING |
Andy Flemingb31a1d82008-12-16 15:29:15 -0800717 FSL_GIANFAR_DEV_HAS_CSUM |
718 FSL_GIANFAR_DEV_HAS_VLAN |
719 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
720 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
721
722 ctype = of_get_property(np, "phy-connection-type", NULL);
723
724 /* We only care about rgmii-id. The rest are autodetected */
725 if (ctype && !strcmp(ctype, "rgmii-id"))
726 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
727 else
728 priv->interface = PHY_INTERFACE_MODE_MII;
729
730 if (of_get_property(np, "fsl,magic-packet", NULL))
731 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
732
Grant Likelyfe192a42009-04-25 12:53:12 +0000733 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800734
735 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000736 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800737
738 return 0;
739
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000740rx_alloc_failed:
741 free_rx_pointers(priv);
742tx_alloc_failed:
743 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000744err_grp_init:
745 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000746 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800747 return err;
748}
749
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000750/* Ioctl MII Interface */
751static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
752{
753 struct gfar_private *priv = netdev_priv(dev);
754
755 if (!netif_running(dev))
756 return -EINVAL;
757
758 if (!priv->phydev)
759 return -ENODEV;
760
761 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
762}
763
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000764static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
765{
766 unsigned int new_bit_map = 0x0;
767 int mask = 0x1 << (max_qs - 1), i;
768 for (i = 0; i < max_qs; i++) {
769 if (bit_map & mask)
770 new_bit_map = new_bit_map + (1 << i);
771 mask = mask >> 0x1;
772 }
773 return new_bit_map;
774}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000775
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000776static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
777 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000778{
779 u32 rqfpr = FPR_FILER_MASK;
780 u32 rqfcr = 0x0;
781
782 rqfar--;
783 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
784 ftp_rqfpr[rqfar] = rqfpr;
785 ftp_rqfcr[rqfar] = rqfcr;
786 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
787
788 rqfar--;
789 rqfcr = RQFCR_CMP_NOMATCH;
790 ftp_rqfpr[rqfar] = rqfpr;
791 ftp_rqfcr[rqfar] = rqfcr;
792 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
793
794 rqfar--;
795 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
796 rqfpr = class;
797 ftp_rqfcr[rqfar] = rqfcr;
798 ftp_rqfpr[rqfar] = rqfpr;
799 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
800
801 rqfar--;
802 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
803 rqfpr = class;
804 ftp_rqfcr[rqfar] = rqfcr;
805 ftp_rqfpr[rqfar] = rqfpr;
806 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
807
808 return rqfar;
809}
810
811static void gfar_init_filer_table(struct gfar_private *priv)
812{
813 int i = 0x0;
814 u32 rqfar = MAX_FILER_IDX;
815 u32 rqfcr = 0x0;
816 u32 rqfpr = FPR_FILER_MASK;
817
818 /* Default rule */
819 rqfcr = RQFCR_CMP_MATCH;
820 ftp_rqfcr[rqfar] = rqfcr;
821 ftp_rqfpr[rqfar] = rqfpr;
822 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
823
824 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
825 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
826 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
827 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
828 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
829 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
830
831 /* cur_filer_idx indicated the fisrt non-masked rule */
832 priv->cur_filer_idx = rqfar;
833
834 /* Rest are masked rules */
835 rqfcr = RQFCR_CMP_NOMATCH;
836 for (i = 0; i < rqfar; i++) {
837 ftp_rqfcr[i] = rqfcr;
838 ftp_rqfpr[i] = rqfpr;
839 gfar_write_filer(priv, i, rqfcr, rqfpr);
840 }
841}
842
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400843/* Set up the ethernet device structure, private data,
844 * and anything else we need before we start */
Andy Flemingb31a1d82008-12-16 15:29:15 -0800845static int gfar_probe(struct of_device *ofdev,
846 const struct of_device_id *match)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
848 u32 tempval;
849 struct net_device *dev = NULL;
850 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000851 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000852 int err = 0, i, grp_idx = 0;
Dai Harukic50a5d92008-12-17 16:51:32 -0800853 int len_devname;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000854 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000855 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000856 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000858 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000860 if (err)
861 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700864 priv->ndev = dev;
865 priv->ofdev = ofdev;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800866 priv->node = ofdev->node;
Kumar Gala48268572009-03-18 23:28:22 -0700867 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Scott Woodd87eb122008-07-11 18:04:45 -0500869 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200870 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Andy Flemingb31a1d82008-12-16 15:29:15 -0800872 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000873 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 /* Stop the DMA engine now, in case it was running before */
876 /* (The firmware could have used it, and left it running). */
Andy Fleming257d9382008-12-16 15:25:45 -0800877 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000880 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Andy Flemingb98ac702009-02-04 16:38:05 -0800882 /* We need to delay at least 3 TX clocks */
883 udelay(2);
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000886 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 /* Initialize MACCFG2. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000889 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000892 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000895 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
Andy Flemingb31a1d82008-12-16 15:29:15 -0800897 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +0000902 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500903 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000905 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000906 for (i = 0; i < priv->num_grps; i++)
907 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000908
Andy Flemingb31a1d82008-12-16 15:29:15 -0800909 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500910 priv->rx_csum_enable = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -0800911 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500912 } else
913 priv->rx_csum_enable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
Kumar Gala0bbaf062005-06-20 10:54:21 -0500915 priv->vlgrp = NULL;
916
Andy Fleming26ccfc32009-03-10 12:58:28 +0000917 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500918 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500919
Andy Flemingb31a1d82008-12-16 15:29:15 -0800920 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500921 priv->extended_hash = 1;
922 priv->hash_width = 9;
923
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000924 priv->hash_regs[0] = &regs->igaddr0;
925 priv->hash_regs[1] = &regs->igaddr1;
926 priv->hash_regs[2] = &regs->igaddr2;
927 priv->hash_regs[3] = &regs->igaddr3;
928 priv->hash_regs[4] = &regs->igaddr4;
929 priv->hash_regs[5] = &regs->igaddr5;
930 priv->hash_regs[6] = &regs->igaddr6;
931 priv->hash_regs[7] = &regs->igaddr7;
932 priv->hash_regs[8] = &regs->gaddr0;
933 priv->hash_regs[9] = &regs->gaddr1;
934 priv->hash_regs[10] = &regs->gaddr2;
935 priv->hash_regs[11] = &regs->gaddr3;
936 priv->hash_regs[12] = &regs->gaddr4;
937 priv->hash_regs[13] = &regs->gaddr5;
938 priv->hash_regs[14] = &regs->gaddr6;
939 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500940
941 } else {
942 priv->extended_hash = 0;
943 priv->hash_width = 8;
944
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000945 priv->hash_regs[0] = &regs->gaddr0;
946 priv->hash_regs[1] = &regs->gaddr1;
947 priv->hash_regs[2] = &regs->gaddr2;
948 priv->hash_regs[3] = &regs->gaddr3;
949 priv->hash_regs[4] = &regs->gaddr4;
950 priv->hash_regs[5] = &regs->gaddr5;
951 priv->hash_regs[6] = &regs->gaddr6;
952 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500953 }
954
Andy Flemingb31a1d82008-12-16 15:29:15 -0800955 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500956 priv->padding = DEFAULT_PADDING;
957 else
958 priv->padding = 0;
959
Kumar Gala0bbaf062005-06-20 10:54:21 -0500960 if (dev->features & NETIF_F_IP_CSUM)
961 dev->hard_header_len += GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000963 /* Program the isrg regs only if number of grps > 1 */
964 if (priv->num_grps > 1) {
965 baddr = &regs->isrg0;
966 for (i = 0; i < priv->num_grps; i++) {
967 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
968 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
969 gfar_write(baddr, isrg);
970 baddr++;
971 isrg = 0x0;
972 }
973 }
974
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000975 /* Need to reverse the bit maps as bit_map's MSB is q0
976 * but, for_each_bit parses from right to left, which
977 * basically reverses the queue numbers */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000978 for (i = 0; i< priv->num_grps; i++) {
979 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
980 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
981 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
982 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
983 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000984
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000985 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
986 * also assign queues to groups */
987 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
988 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
989 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
990 priv->num_rx_queues) {
991 priv->gfargrp[grp_idx].num_rx_queues++;
992 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
993 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
994 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
995 }
996 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
997 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
998 priv->num_tx_queues) {
999 priv->gfargrp[grp_idx].num_tx_queues++;
1000 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1001 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1002 tqueue = tqueue | (TQUEUE_EN0 >> i);
1003 }
1004 priv->gfargrp[grp_idx].rstat = rstat;
1005 priv->gfargrp[grp_idx].tstat = tstat;
1006 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001007 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001008
1009 gfar_write(&regs->rqueue, rqueue);
1010 gfar_write(&regs->tqueue, tqueue);
1011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001014 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001015 for (i = 0; i < priv->num_tx_queues; i++) {
1016 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1017 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1018 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1019 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1020 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001021
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001022 for (i = 0; i < priv->num_rx_queues; i++) {
1023 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1024 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1025 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +00001028 /* enable filer if using multiple RX queues*/
1029 if(priv->num_rx_queues > 1)
1030 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001031 /* Enable most messages by default */
1032 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1033
Trent Piephod3eab822008-10-02 11:12:24 +00001034 /* Carrier starts down, phylib will bring it up */
1035 netif_carrier_off(dev);
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 err = register_netdev(dev);
1038
1039 if (err) {
1040 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1041 dev->name);
1042 goto register_fail;
1043 }
1044
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001045 device_init_wakeup(&dev->dev,
1046 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1047
Dai Harukic50a5d92008-12-17 16:51:32 -08001048 /* fill out IRQ number and name fields */
1049 len_devname = strlen(dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001050 for (i = 0; i < priv->num_grps; i++) {
1051 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1052 len_devname);
1053 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1054 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1055 "_g", sizeof("_g"));
1056 priv->gfargrp[i].int_name_tx[
1057 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1058 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1059 priv->gfargrp[i].int_name_tx)],
1060 "_tx", sizeof("_tx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001061
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001062 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1063 len_devname);
1064 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1065 "_g", sizeof("_g"));
1066 priv->gfargrp[i].int_name_rx[
1067 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1068 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1069 priv->gfargrp[i].int_name_rx)],
1070 "_rx", sizeof("_rx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001071
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001072 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1073 len_devname);
1074 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1075 "_g", sizeof("_g"));
1076 priv->gfargrp[i].int_name_er[strlen(
1077 priv->gfargrp[i].int_name_er)] = i+48;
1078 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1079 priv->gfargrp[i].int_name_er)],
1080 "_er", sizeof("_er") + 1);
1081 } else
1082 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1083 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001084
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001085 /* Initialize the filer table */
1086 gfar_init_filer_table(priv);
1087
Andy Fleming7f7f5312005-11-11 12:38:59 -06001088 /* Create all the sysfs files */
1089 gfar_init_sysfs(dev);
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 /* Print out the device info */
Johannes Berge1749612008-10-27 15:59:26 -07001092 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
1094 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001095 /* provided which set of benchmarks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001097 for (i = 0; i < priv->num_rx_queues; i++)
1098 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1099 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1100 for(i = 0; i < priv->num_tx_queues; i++)
1101 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1102 dev->name, i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 return 0;
1105
1106register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001107 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001108 free_tx_pointers(priv);
1109 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001110 if (priv->phy_node)
1111 of_node_put(priv->phy_node);
1112 if (priv->tbi_node)
1113 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001115 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
1117
Andy Flemingb31a1d82008-12-16 15:29:15 -08001118static int gfar_remove(struct of_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001120 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Grant Likelyfe192a42009-04-25 12:53:12 +00001122 if (priv->phy_node)
1123 of_node_put(priv->phy_node);
1124 if (priv->tbi_node)
1125 of_node_put(priv->tbi_node);
1126
Andy Flemingb31a1d82008-12-16 15:29:15 -08001127 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
David S. Millerd9d8e042009-09-06 01:41:02 -07001129 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001130 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001131 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
1133 return 0;
1134}
1135
Scott Woodd87eb122008-07-11 18:04:45 -05001136#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001137
1138static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001139{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001140 struct gfar_private *priv = dev_get_drvdata(dev);
1141 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001142 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001143 unsigned long flags;
1144 u32 tempval;
1145
1146 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001147 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001148
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001149 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001150
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001151 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001152
1153 local_irq_save(flags);
1154 lock_tx_qs(priv);
1155 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001156
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001157 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001158
1159 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001160 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001161
1162 tempval &= ~MACCFG1_TX_EN;
1163
1164 if (!magic_packet)
1165 tempval &= ~MACCFG1_RX_EN;
1166
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001167 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001168
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001169 unlock_rx_qs(priv);
1170 unlock_tx_qs(priv);
1171 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001172
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001173 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001174
1175 if (magic_packet) {
1176 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001177 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001178
1179 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001180 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001181 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001182 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001183 } else {
1184 phy_stop(priv->phydev);
1185 }
1186 }
1187
1188 return 0;
1189}
1190
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001191static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001192{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001193 struct gfar_private *priv = dev_get_drvdata(dev);
1194 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001195 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001196 unsigned long flags;
1197 u32 tempval;
1198 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001199 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001200
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001201 if (!netif_running(ndev)) {
1202 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001203 return 0;
1204 }
1205
1206 if (!magic_packet && priv->phydev)
1207 phy_start(priv->phydev);
1208
1209 /* Disable Magic Packet mode, in case something
1210 * else woke us up.
1211 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001212 local_irq_save(flags);
1213 lock_tx_qs(priv);
1214 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001215
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001216 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001217 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001218 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001219
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001220 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001221
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001222 unlock_rx_qs(priv);
1223 unlock_tx_qs(priv);
1224 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001225
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001226 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001227
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001228 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001229
1230 return 0;
1231}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001232
1233static int gfar_restore(struct device *dev)
1234{
1235 struct gfar_private *priv = dev_get_drvdata(dev);
1236 struct net_device *ndev = priv->ndev;
1237
1238 if (!netif_running(ndev))
1239 return 0;
1240
1241 gfar_init_bds(ndev);
1242 init_registers(ndev);
1243 gfar_set_mac_address(ndev);
1244 gfar_init_mac(ndev);
1245 gfar_start(ndev);
1246
1247 priv->oldlink = 0;
1248 priv->oldspeed = 0;
1249 priv->oldduplex = -1;
1250
1251 if (priv->phydev)
1252 phy_start(priv->phydev);
1253
1254 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001255 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001256
1257 return 0;
1258}
1259
1260static struct dev_pm_ops gfar_pm_ops = {
1261 .suspend = gfar_suspend,
1262 .resume = gfar_resume,
1263 .freeze = gfar_suspend,
1264 .thaw = gfar_resume,
1265 .restore = gfar_restore,
1266};
1267
1268#define GFAR_PM_OPS (&gfar_pm_ops)
1269
1270static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1271{
1272 return gfar_suspend(&ofdev->dev);
1273}
1274
1275static int gfar_legacy_resume(struct of_device *ofdev)
1276{
1277 return gfar_resume(&ofdev->dev);
1278}
1279
Scott Woodd87eb122008-07-11 18:04:45 -05001280#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001281
1282#define GFAR_PM_OPS NULL
1283#define gfar_legacy_suspend NULL
1284#define gfar_legacy_resume NULL
1285
Scott Woodd87eb122008-07-11 18:04:45 -05001286#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001288/* Reads the controller's registers to determine what interface
1289 * connects it to the PHY.
1290 */
1291static phy_interface_t gfar_get_interface(struct net_device *dev)
1292{
1293 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001294 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001295 u32 ecntrl;
1296
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001297 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001298
1299 if (ecntrl & ECNTRL_SGMII_MODE)
1300 return PHY_INTERFACE_MODE_SGMII;
1301
1302 if (ecntrl & ECNTRL_TBI_MODE) {
1303 if (ecntrl & ECNTRL_REDUCED_MODE)
1304 return PHY_INTERFACE_MODE_RTBI;
1305 else
1306 return PHY_INTERFACE_MODE_TBI;
1307 }
1308
1309 if (ecntrl & ECNTRL_REDUCED_MODE) {
1310 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1311 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001312 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001313 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001314
1315 /*
1316 * This isn't autodetected right now, so it must
1317 * be set by the device tree or platform code.
1318 */
1319 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1320 return PHY_INTERFACE_MODE_RGMII_ID;
1321
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001322 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001323 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001324 }
1325
Andy Flemingb31a1d82008-12-16 15:29:15 -08001326 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001327 return PHY_INTERFACE_MODE_GMII;
1328
1329 return PHY_INTERFACE_MODE_MII;
1330}
1331
1332
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001333/* Initializes driver's PHY state, and attaches to the PHY.
1334 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 */
1336static int init_phy(struct net_device *dev)
1337{
1338 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001339 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001340 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001341 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001342 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
1344 priv->oldlink = 0;
1345 priv->oldspeed = 0;
1346 priv->oldduplex = -1;
1347
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001348 interface = gfar_get_interface(dev);
1349
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001350 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1351 interface);
1352 if (!priv->phydev)
1353 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1354 interface);
1355 if (!priv->phydev) {
1356 dev_err(&dev->dev, "could not attach to PHY\n");
1357 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Kapil Junejad3c12872007-05-11 18:25:11 -05001360 if (interface == PHY_INTERFACE_MODE_SGMII)
1361 gfar_configure_serdes(dev);
1362
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001363 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001364 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1365 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368}
1369
Paul Gortmakerd0313582008-04-17 00:08:10 -04001370/*
1371 * Initialize TBI PHY interface for communicating with the
1372 * SERDES lynx PHY on the chip. We communicate with this PHY
1373 * through the MDIO bus on each controller, treating it as a
1374 * "normal" PHY at the address found in the TBIPA register. We assume
1375 * that the TBIPA register is valid. Either the MDIO bus code will set
1376 * it to a value that doesn't conflict with other PHYs on the bus, or the
1377 * value doesn't matter, as there are no other PHYs on the bus.
1378 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001379static void gfar_configure_serdes(struct net_device *dev)
1380{
1381 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001382 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001383
Grant Likelyfe192a42009-04-25 12:53:12 +00001384 if (!priv->tbi_node) {
1385 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1386 "device tree specify a tbi-handle\n");
1387 return;
1388 }
1389
1390 tbiphy = of_phy_find_device(priv->tbi_node);
1391 if (!tbiphy) {
1392 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001393 return;
1394 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001395
Andy Flemingb31a1d82008-12-16 15:29:15 -08001396 /*
1397 * If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001398 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1399 * everything for us? Resetting it takes the link down and requires
1400 * several seconds for it to come back.
1401 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001402 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001403 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001404
Paul Gortmakerd0313582008-04-17 00:08:10 -04001405 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001406 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001407
Grant Likelyfe192a42009-04-25 12:53:12 +00001408 phy_write(tbiphy, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -05001409 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1410 ADVERTISE_1000XPSE_ASYM);
1411
Grant Likelyfe192a42009-04-25 12:53:12 +00001412 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -05001413 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1414}
1415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416static void init_registers(struct net_device *dev)
1417{
1418 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001419 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001420 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001422 for (i = 0; i < priv->num_grps; i++) {
1423 regs = priv->gfargrp[i].regs;
1424 /* Clear IEVENT */
1425 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001427 /* Initialize IMASK */
1428 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001431 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001433 gfar_write(&regs->igaddr0, 0);
1434 gfar_write(&regs->igaddr1, 0);
1435 gfar_write(&regs->igaddr2, 0);
1436 gfar_write(&regs->igaddr3, 0);
1437 gfar_write(&regs->igaddr4, 0);
1438 gfar_write(&regs->igaddr5, 0);
1439 gfar_write(&regs->igaddr6, 0);
1440 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001442 gfar_write(&regs->gaddr0, 0);
1443 gfar_write(&regs->gaddr1, 0);
1444 gfar_write(&regs->gaddr2, 0);
1445 gfar_write(&regs->gaddr3, 0);
1446 gfar_write(&regs->gaddr4, 0);
1447 gfar_write(&regs->gaddr5, 0);
1448 gfar_write(&regs->gaddr6, 0);
1449 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001452 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001453 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001456 gfar_write(&regs->rmon.cam1, 0xffffffff);
1457 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 }
1459
1460 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001461 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001464 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465}
1466
Kumar Gala0bbaf062005-06-20 10:54:21 -05001467
1468/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001469static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470{
1471 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001472 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001474 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001476 for (i = 0; i < priv->num_grps; i++) {
1477 regs = priv->gfargrp[i].regs;
1478 /* Mask all interrupts */
1479 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001481 /* Clear all interrupts */
1482 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001485 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001487 tempval = gfar_read(&regs->dmactrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1489 != (DMACTRL_GRS | DMACTRL_GTS)) {
1490 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001491 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001493 while (!(gfar_read(&regs->ievent) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 (IEVENT_GRSC | IEVENT_GTSC)))
1495 cpu_relax();
1496 }
Scott Woodd87eb122008-07-11 18:04:45 -05001497}
Scott Woodd87eb122008-07-11 18:04:45 -05001498
1499/* Halt the receive and transmit queues */
1500void gfar_halt(struct net_device *dev)
1501{
1502 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001503 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001504 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Scott Wood2a54adc2008-08-12 15:10:46 -05001506 gfar_halt_nodisable(dev);
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 /* Disable Rx and Tx */
1509 tempval = gfar_read(&regs->maccfg1);
1510 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1511 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001512}
1513
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001514static void free_grp_irqs(struct gfar_priv_grp *grp)
1515{
1516 free_irq(grp->interruptError, grp);
1517 free_irq(grp->interruptTransmit, grp);
1518 free_irq(grp->interruptReceive, grp);
1519}
1520
Kumar Gala0bbaf062005-06-20 10:54:21 -05001521void stop_gfar(struct net_device *dev)
1522{
1523 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001524 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001525 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001526
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001527 phy_stop(priv->phydev);
1528
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001529
Kumar Gala0bbaf062005-06-20 10:54:21 -05001530 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001531 local_irq_save(flags);
1532 lock_tx_qs(priv);
1533 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001534
Kumar Gala0bbaf062005-06-20 10:54:21 -05001535 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001537 unlock_rx_qs(priv);
1538 unlock_tx_qs(priv);
1539 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001542 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001543 for (i = 0; i < priv->num_grps; i++)
1544 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001546 for (i = 0; i < priv->num_grps; i++)
1547 free_irq(priv->gfargrp[i].interruptTransmit,
1548 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 }
1550
1551 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552}
1553
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001554static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001557 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001558 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001560 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001562 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1563 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001564 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
Kumar Gala48268572009-03-18 23:28:22 -07001566 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001567 txbdp->length, DMA_TO_DEVICE);
1568 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001569 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1570 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001571 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001572 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001573 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001575 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001576 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1577 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001579 kfree(tx_queue->tx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001580}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001582static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1583{
1584 struct rxbd8 *rxbdp;
1585 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1586 int i;
1587
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001588 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001590 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1591 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001592 dma_unmap_single(&priv->ofdev->dev,
1593 rxbdp->bufPtr, priv->rx_buffer_size,
Anton Vorontsove69edd22009-10-12 06:00:30 +00001594 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001595 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1596 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001598 rxbdp->lstatus = 0;
1599 rxbdp->bufPtr = 0;
1600 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001602 kfree(rx_queue->rx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001603}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001604
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001605/* If there are any tx skbs or rx skbs still around, free them.
1606 * Then free tx_skbuff and rx_skbuff */
1607static void free_skb_resources(struct gfar_private *priv)
1608{
1609 struct gfar_priv_tx_q *tx_queue = NULL;
1610 struct gfar_priv_rx_q *rx_queue = NULL;
1611 int i;
1612
1613 /* Go through all the buffer descriptors and free their data buffers */
1614 for (i = 0; i < priv->num_tx_queues; i++) {
1615 tx_queue = priv->tx_queue[i];
1616 if(!tx_queue->tx_skbuff)
1617 free_skb_tx_queue(tx_queue);
1618 }
1619
1620 for (i = 0; i < priv->num_rx_queues; i++) {
1621 rx_queue = priv->rx_queue[i];
1622 if(!rx_queue->rx_skbuff)
1623 free_skb_rx_queue(rx_queue);
1624 }
1625
1626 dma_free_coherent(&priv->ofdev->dev,
1627 sizeof(struct txbd8) * priv->total_tx_ring_size +
1628 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1629 priv->tx_queue[0]->tx_bd_base,
1630 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631}
1632
Kumar Gala0bbaf062005-06-20 10:54:21 -05001633void gfar_start(struct net_device *dev)
1634{
1635 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001636 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001637 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001638 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001639
1640 /* Enable Rx and Tx in MACCFG1 */
1641 tempval = gfar_read(&regs->maccfg1);
1642 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1643 gfar_write(&regs->maccfg1, tempval);
1644
1645 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001646 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001647 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001648 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001649
Kumar Gala0bbaf062005-06-20 10:54:21 -05001650 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001651 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001652 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001653 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001654
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001655 for (i = 0; i < priv->num_grps; i++) {
1656 regs = priv->gfargrp[i].regs;
1657 /* Clear THLT/RHLT, so that the DMA starts polling now */
1658 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1659 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1660 /* Unmask the interrupts we look for */
1661 gfar_write(&regs->imask, IMASK_DEFAULT);
1662 }
Dai Haruki12dea572008-12-16 15:30:20 -08001663
1664 dev->trans_start = jiffies;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001665}
1666
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001667void gfar_configure_coalescing(struct gfar_private *priv,
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001668 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001670 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001671 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001672 int i = 0;
1673
1674 /* Backward compatible case ---- even if we enable
1675 * multiple queues, there's only single reg to program
1676 */
1677 gfar_write(&regs->txic, 0);
1678 if(likely(priv->tx_queue[0]->txcoalescing))
1679 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1680
1681 gfar_write(&regs->rxic, 0);
1682 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1683 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1684
1685 if (priv->mode == MQ_MG_MODE) {
1686 baddr = &regs->txic0;
1687 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1688 if (likely(priv->tx_queue[i]->txcoalescing)) {
1689 gfar_write(baddr + i, 0);
1690 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1691 }
1692 }
1693
1694 baddr = &regs->rxic0;
1695 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1696 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1697 gfar_write(baddr + i, 0);
1698 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1699 }
1700 }
1701 }
1702}
1703
1704static int register_grp_irqs(struct gfar_priv_grp *grp)
1705{
1706 struct gfar_private *priv = grp->priv;
1707 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001708 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 /* If the device has multiple interrupts, register for
1711 * them. Otherwise, only register for the one */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001712 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001713 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 * Transmit, and Receive */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001715 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1716 grp->int_name_er,grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001717 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001718 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1719 dev->name, grp->interruptError);
1720
1721 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 }
1723
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001724 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1725 0, grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001726 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001727 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1728 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 goto tx_irq_fail;
1730 }
1731
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001732 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1733 grp->int_name_rx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001734 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001735 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1736 dev->name, grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 goto rx_irq_fail;
1738 }
1739 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001740 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1741 grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001742 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001743 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1744 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 goto err_irq_fail;
1746 }
1747 }
1748
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001749 return 0;
1750
1751rx_irq_fail:
1752 free_irq(grp->interruptTransmit, grp);
1753tx_irq_fail:
1754 free_irq(grp->interruptError, grp);
1755err_irq_fail:
1756 return err;
1757
1758}
1759
1760/* Bring the controller up and running */
1761int startup_gfar(struct net_device *ndev)
1762{
1763 struct gfar_private *priv = netdev_priv(ndev);
1764 struct gfar __iomem *regs = NULL;
1765 int err, i, j;
1766
1767 for (i = 0; i < priv->num_grps; i++) {
1768 regs= priv->gfargrp[i].regs;
1769 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1770 }
1771
1772 regs= priv->gfargrp[0].regs;
1773 err = gfar_alloc_skb_resources(ndev);
1774 if (err)
1775 return err;
1776
1777 gfar_init_mac(ndev);
1778
1779 for (i = 0; i < priv->num_grps; i++) {
1780 err = register_grp_irqs(&priv->gfargrp[i]);
1781 if (err) {
1782 for (j = 0; j < i; j++)
1783 free_grp_irqs(&priv->gfargrp[j]);
1784 goto irq_fail;
1785 }
1786 }
1787
Andy Fleming7f7f5312005-11-11 12:38:59 -06001788 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001789 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001791 phy_start(priv->phydev);
1792
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001793 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 return 0;
1796
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001797irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001798 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return err;
1800}
1801
1802/* Called when something needs to use the ethernet device */
1803/* Returns 0 for success. */
1804static int gfar_enet_open(struct net_device *dev)
1805{
Li Yang94e8cc32007-10-12 21:53:51 +08001806 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 int err;
1808
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001809 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001810
Andy Fleming0fd56bb2009-02-04 16:43:16 -08001811 skb_queue_head_init(&priv->rx_recycle);
1812
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 /* Initialize a bunch of registers */
1814 init_registers(dev);
1815
1816 gfar_set_mac_address(dev);
1817
1818 err = init_phy(dev);
1819
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001820 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001821 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
1825 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001826 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001827 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001828 return err;
1829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001831 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001833 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 return err;
1836}
1837
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001838static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001839{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001840 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001841
1842 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001843
Kumar Gala0bbaf062005-06-20 10:54:21 -05001844 return fcb;
1845}
1846
1847static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1848{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001849 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001850
1851 /* If we're here, it's a IP packet with a TCP or UDP
1852 * payload. We set it to checksum, using a pseudo-header
1853 * we provide
1854 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001855 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001856
Andy Fleming7f7f5312005-11-11 12:38:59 -06001857 /* Tell the controller what the protocol is */
1858 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001859 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001860 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03001861 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06001862 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05001863 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001864
1865 /* l3os is the distance between the start of the
1866 * frame (skb->data) and the start of the IP hdr.
1867 * l4os is the distance between the start of the
1868 * l3 hdr and the l4 hdr */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001869 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001870 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001871
Andy Fleming7f7f5312005-11-11 12:38:59 -06001872 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001873}
1874
Andy Fleming7f7f5312005-11-11 12:38:59 -06001875void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001876{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001877 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001878 fcb->vlctl = vlan_tx_tag_get(skb);
1879}
1880
Dai Haruki4669bc92008-12-17 16:51:04 -08001881static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1882 struct txbd8 *base, int ring_size)
1883{
1884 struct txbd8 *new_bd = bdp + stride;
1885
1886 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1887}
1888
1889static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1890 int ring_size)
1891{
1892 return skip_txbd(bdp, 1, base, ring_size);
1893}
1894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895/* This is called by the kernel when a frame is ready for transmission. */
1896/* It is pointed to by the dev->hard_start_xmit function pointer */
1897static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1898{
1899 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001900 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001901 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001902 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001903 struct txfcb *fcb = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08001904 struct txbd8 *txbdp, *txbdp_start, *base;
Dai Haruki5a5efed2008-12-16 15:34:50 -08001905 u32 lstatus;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001906 int i, rq = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08001907 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05001908 unsigned long flags;
Dai Haruki4669bc92008-12-17 16:51:04 -08001909 unsigned int nr_frags, length;
1910
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001911
1912 rq = skb->queue_mapping;
1913 tx_queue = priv->tx_queue[rq];
1914 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001915 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001916 regs = tx_queue->grp->regs;
Dai Haruki4669bc92008-12-17 16:51:04 -08001917
Li Yang5b28bea2009-03-27 15:54:30 -07001918 /* make space for additional header when fcb is needed */
1919 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1920 (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1921 (skb_headroom(skb) < GMAC_FCB_LEN)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001922 struct sk_buff *skb_new;
1923
1924 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1925 if (!skb_new) {
1926 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07001927 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001928 return NETDEV_TX_OK;
1929 }
1930 kfree_skb(skb);
1931 skb = skb_new;
1932 }
1933
Dai Haruki4669bc92008-12-17 16:51:04 -08001934 /* total number of fragments in the SKB */
1935 nr_frags = skb_shinfo(skb)->nr_frags;
1936
Dai Haruki4669bc92008-12-17 16:51:04 -08001937 /* check if there is space to queue this packet */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001938 if ((nr_frags+1) > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001939 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001940 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08001941 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08001942 return NETDEV_TX_BUSY;
1943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
1945 /* Update transmit stats */
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001946 dev->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001948 txbdp = txbdp_start = tx_queue->cur_tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
Dai Haruki4669bc92008-12-17 16:51:04 -08001950 if (nr_frags == 0) {
1951 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1952 } else {
1953 /* Place the fragment addresses and lengths into the TxBDs */
1954 for (i = 0; i < nr_frags; i++) {
1955 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001956 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
Dai Haruki4669bc92008-12-17 16:51:04 -08001958 length = skb_shinfo(skb)->frags[i].size;
1959
1960 lstatus = txbdp->lstatus | length |
1961 BD_LFLAG(TXBD_READY);
1962
1963 /* Handle the last BD specially */
1964 if (i == nr_frags - 1)
1965 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1966
Kumar Gala48268572009-03-18 23:28:22 -07001967 bufaddr = dma_map_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08001968 skb_shinfo(skb)->frags[i].page,
1969 skb_shinfo(skb)->frags[i].page_offset,
1970 length,
1971 DMA_TO_DEVICE);
1972
1973 /* set the TxBD length and buffer pointer */
1974 txbdp->bufPtr = bufaddr;
1975 txbdp->lstatus = lstatus;
1976 }
1977
1978 lstatus = txbdp_start->lstatus;
1979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
Kumar Gala0bbaf062005-06-20 10:54:21 -05001981 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08001982 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001983 fcb = gfar_add_fcb(skb);
1984 lstatus |= BD_LFLAG(TXBD_TOE);
1985 gfar_tx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001986 }
1987
Dai Haruki77ecaf22008-12-16 15:30:48 -08001988 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001989 if (unlikely(NULL == fcb)) {
1990 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08001991 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06001992 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001993
1994 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001995 }
1996
Dai Haruki4669bc92008-12-17 16:51:04 -08001997 /* setup the TxBD length and buffer pointer for the first BD */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001998 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
Kumar Gala48268572009-03-18 23:28:22 -07001999 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Dai Haruki4669bc92008-12-17 16:51:04 -08002000 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Dai Haruki4669bc92008-12-17 16:51:04 -08002002 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Dai Haruki4669bc92008-12-17 16:51:04 -08002004 /*
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002005 * We can work in parallel with gfar_clean_tx_ring(), except
2006 * when modifying num_txbdfree. Note that we didn't grab the lock
2007 * when we were reading the num_txbdfree and checking for available
2008 * space, that's because outside of this function it can only grow,
2009 * and once we've got needed space, it cannot suddenly disappear.
2010 *
2011 * The lock also protects us from gfar_error(), which can modify
2012 * regs->tstat and thus retrigger the transfers, which is why we
2013 * also must grab the lock before setting ready bit for the first
2014 * to be transmitted BD.
2015 */
2016 spin_lock_irqsave(&tx_queue->txlock, flags);
2017
2018 /*
Dai Haruki4669bc92008-12-17 16:51:04 -08002019 * The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002020 * semantics (it requires synchronization between cacheable and
2021 * uncacheable mappings, which eieio doesn't provide and which we
2022 * don't need), thus requiring a more expensive sync instruction. At
2023 * some point, the set of architecture-independent barrier functions
2024 * should be expanded to include weaker barriers.
2025 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002026 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002027
Dai Haruki4669bc92008-12-17 16:51:04 -08002028 txbdp_start->lstatus = lstatus;
2029
2030 /* Update the current skb pointer to the next entry we will use
2031 * (wrapping if necessary) */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002032 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2033 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002034
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002035 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002036
2037 /* reduce TxBD free count */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002038 tx_queue->num_txbdfree -= (nr_frags + 1);
Dai Haruki4669bc92008-12-17 16:51:04 -08002039
2040 dev->trans_start = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042 /* If the next BD still needs to be cleaned up, then the bds
2043 are full. We need to tell the kernel to stop sending us stuff. */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002044 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002045 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002047 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 }
2049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002051 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
2053 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002054 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002056 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057}
2058
2059/* Stops the kernel queue, and halts the controller */
2060static int gfar_close(struct net_device *dev)
2061{
2062 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002063
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002064 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002065
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002066 skb_queue_purge(&priv->rx_recycle);
Sebastian Siewiorab939902008-08-19 21:12:45 +02002067 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 stop_gfar(dev);
2069
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002070 /* Disconnect from the PHY */
2071 phy_disconnect(priv->phydev);
2072 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002074 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
2076 return 0;
2077}
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002080static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002082 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
2084 return 0;
2085}
2086
2087
Kumar Gala0bbaf062005-06-20 10:54:21 -05002088/* Enables and disables VLAN insertion/extraction */
2089static void gfar_vlan_rx_register(struct net_device *dev,
2090 struct vlan_group *grp)
2091{
2092 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002093 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002094 unsigned long flags;
2095 u32 tempval;
2096
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002097 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002098 local_irq_save(flags);
2099 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002100
Anton Vorontsovcd1f55a2009-01-26 14:33:23 -08002101 priv->vlgrp = grp;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002102
2103 if (grp) {
2104 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002105 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002106 tempval |= TCTRL_VLINS;
2107
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002108 gfar_write(&regs->tctrl, tempval);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002109
Kumar Gala0bbaf062005-06-20 10:54:21 -05002110 /* Enable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002111 tempval = gfar_read(&regs->rctrl);
Dai Haruki77ecaf22008-12-16 15:30:48 -08002112 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002113 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002114 } else {
2115 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002116 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002117 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002118 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002119
2120 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002121 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002122 tempval &= ~RCTRL_VLEX;
Dai Haruki77ecaf22008-12-16 15:30:48 -08002123 /* If parse is no longer required, then disable parser */
2124 if (tempval & RCTRL_REQ_PARSER)
2125 tempval |= RCTRL_PRSDEP_INIT;
2126 else
2127 tempval &= ~RCTRL_PRSDEP_INIT;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002128 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002129 }
2130
Dai Haruki77ecaf22008-12-16 15:30:48 -08002131 gfar_change_mtu(dev, dev->mtu);
2132
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002133 unlock_rx_qs(priv);
2134 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002135}
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2138{
2139 int tempsize, tempval;
2140 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002141 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002143 int frame_size = new_mtu + ETH_HLEN;
2144
Dai Haruki77ecaf22008-12-16 15:30:48 -08002145 if (priv->vlgrp)
Dai Harukifaa89572008-03-24 10:53:26 -05002146 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002149 if (netif_msg_drv(priv))
2150 printk(KERN_ERR "%s: Invalid MTU setting\n",
2151 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 return -EINVAL;
2153 }
2154
Dai Haruki77ecaf22008-12-16 15:30:48 -08002155 if (gfar_uses_fcb(priv))
2156 frame_size += GMAC_FCB_LEN;
2157
2158 frame_size += priv->padding;
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 tempsize =
2161 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2162 INCREMENTAL_BUFFER_SIZE;
2163
2164 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06002165 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2167 stop_gfar(dev);
2168
2169 priv->rx_buffer_size = tempsize;
2170
2171 dev->mtu = new_mtu;
2172
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002173 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2174 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
2176 /* If the mtu is larger than the max size for standard
2177 * ethernet frames (ie, a jumbo frame), then set maccfg2
2178 * to allow huge frames, and to check the length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002179 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
2182 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2183 else
2184 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2185
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002186 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
2188 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2189 startup_gfar(dev);
2190
2191 return 0;
2192}
2193
Sebastian Siewiorab939902008-08-19 21:12:45 +02002194/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 * transmitted after a set amount of time.
2196 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002197 * starting over will fix the problem.
2198 */
2199static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002201 struct gfar_private *priv = container_of(work, struct gfar_private,
2202 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002203 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002206 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 stop_gfar(dev);
2208 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002209 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 }
2211
David S. Miller263ba322008-07-15 03:47:41 -07002212 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
2214
Sebastian Siewiorab939902008-08-19 21:12:45 +02002215static void gfar_timeout(struct net_device *dev)
2216{
2217 struct gfar_private *priv = netdev_priv(dev);
2218
2219 dev->stats.tx_errors++;
2220 schedule_work(&priv->reset_task);
2221}
2222
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002224static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002226 struct net_device *dev = tx_queue->dev;
Dai Harukid080cd62008-04-09 19:37:51 -05002227 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002228 struct gfar_priv_rx_q *rx_queue = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002229 struct txbd8 *bdp;
2230 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002231 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002232 struct sk_buff *skb;
2233 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002234 int tx_ring_size = tx_queue->tx_ring_size;
Dai Haruki4669bc92008-12-17 16:51:04 -08002235 int frags = 0;
2236 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002237 int howmany = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002238 u32 lstatus;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002240 rx_queue = priv->rx_queue[tx_queue->qindex];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002241 bdp = tx_queue->dirty_tx;
2242 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002243
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002244 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002245 unsigned long flags;
2246
Dai Haruki4669bc92008-12-17 16:51:04 -08002247 frags = skb_shinfo(skb)->nr_frags;
2248 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
2249
2250 lstatus = lbdp->lstatus;
2251
2252 /* Only clean completed frames */
2253 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2254 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 break;
2256
Kumar Gala48268572009-03-18 23:28:22 -07002257 dma_unmap_single(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002258 bdp->bufPtr,
2259 bdp->length,
2260 DMA_TO_DEVICE);
2261
2262 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2263 bdp = next_txbd(bdp, base, tx_ring_size);
2264
2265 for (i = 0; i < frags; i++) {
Kumar Gala48268572009-03-18 23:28:22 -07002266 dma_unmap_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002267 bdp->bufPtr,
2268 bdp->length,
2269 DMA_TO_DEVICE);
2270 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2271 bdp = next_txbd(bdp, base, tx_ring_size);
2272 }
2273
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002274 /*
2275 * If there's room in the queue (limit it to rx_buffer_size)
2276 * we add this skb back into the pool, if it's the right size
2277 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002278 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002279 skb_recycle_check(skb, priv->rx_buffer_size +
2280 RXBUF_ALIGNMENT))
2281 __skb_queue_head(&priv->rx_recycle, skb);
2282 else
2283 dev_kfree_skb_any(skb);
2284
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002285 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002286
2287 skb_dirtytx = (skb_dirtytx + 1) &
2288 TX_RING_MOD_MASK(tx_ring_size);
2289
Dai Harukid080cd62008-04-09 19:37:51 -05002290 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002291 spin_lock_irqsave(&tx_queue->txlock, flags);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002292 tx_queue->num_txbdfree += frags + 1;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002293 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
Dai Haruki4669bc92008-12-17 16:51:04 -08002296 /* If we freed a buffer, we can restart transmission, if necessary */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002297 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2298 netif_wake_subqueue(dev, tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Dai Haruki4669bc92008-12-17 16:51:04 -08002300 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002301 tx_queue->skb_dirtytx = skb_dirtytx;
2302 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Dai Harukid080cd62008-04-09 19:37:51 -05002304 dev->stats.tx_packets += howmany;
2305
2306 return howmany;
2307}
2308
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002309static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002310{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002311 unsigned long flags;
2312
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002313 spin_lock_irqsave(&gfargrp->grplock, flags);
2314 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002315 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002316 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002317 } else {
2318 /*
2319 * Clear IEVENT, so interrupts aren't called again
2320 * because of the packets that have already arrived.
2321 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002322 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002323 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002324 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002325
Dai Haruki8c7396a2008-12-17 16:52:00 -08002326}
2327
Dai Harukid080cd62008-04-09 19:37:51 -05002328/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002329static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002330{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002331 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 return IRQ_HANDLED;
2333}
2334
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002335static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -05002336 struct sk_buff *skb)
2337{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002338 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002339 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002340 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002341
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002342 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2343 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002344 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002345}
2346
2347
2348struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002350 unsigned int alignamount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 struct gfar_private *priv = netdev_priv(dev);
2352 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002354 skb = __skb_dequeue(&priv->rx_recycle);
2355 if (!skb)
2356 skb = netdev_alloc_skb(dev,
2357 priv->rx_buffer_size + RXBUF_ALIGNMENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Andy Fleming815b97c2008-04-22 17:18:29 -05002359 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 return NULL;
2361
Andy Fleming7f7f5312005-11-11 12:38:59 -06002362 alignamount = RXBUF_ALIGNMENT -
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002363 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
Andy Fleming7f7f5312005-11-11 12:38:59 -06002364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 /* We need the data buffer to be aligned properly. We will reserve
2366 * as many bytes as needed to align the data properly
2367 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002368 skb_reserve(skb, alignamount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 return skb;
2371}
2372
Li Yang298e1a92007-10-16 14:18:13 +08002373static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374{
Li Yang298e1a92007-10-16 14:18:13 +08002375 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002376 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 struct gfar_extra_stats *estats = &priv->extra_stats;
2378
2379 /* If the packet was truncated, none of the other errors
2380 * matter */
2381 if (status & RXBD_TRUNCATED) {
2382 stats->rx_length_errors++;
2383
2384 estats->rx_trunc++;
2385
2386 return;
2387 }
2388 /* Count the errors, if there were any */
2389 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2390 stats->rx_length_errors++;
2391
2392 if (status & RXBD_LARGE)
2393 estats->rx_large++;
2394 else
2395 estats->rx_short++;
2396 }
2397 if (status & RXBD_NONOCTET) {
2398 stats->rx_frame_errors++;
2399 estats->rx_nonoctet++;
2400 }
2401 if (status & RXBD_CRCERR) {
2402 estats->rx_crcerr++;
2403 stats->rx_crc_errors++;
2404 }
2405 if (status & RXBD_OVERRUN) {
2406 estats->rx_overrun++;
2407 stats->rx_crc_errors++;
2408 }
2409}
2410
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002411irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002413 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 return IRQ_HANDLED;
2415}
2416
Kumar Gala0bbaf062005-06-20 10:54:21 -05002417static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2418{
2419 /* If valid headers were found, and valid sums
2420 * were verified, then we tell the kernel that no
2421 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002422 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002423 skb->ip_summed = CHECKSUM_UNNECESSARY;
2424 else
2425 skb->ip_summed = CHECKSUM_NONE;
2426}
2427
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429/* gfar_process_frame() -- handle one incoming packet if skb
2430 * isn't NULL. */
2431static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Dai Haruki2c2db482008-12-16 15:31:15 -08002432 int amount_pull)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433{
2434 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002435 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
Dai Haruki2c2db482008-12-16 15:31:15 -08002437 int ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002438
Dai Haruki2c2db482008-12-16 15:31:15 -08002439 /* fcb is at the beginning if exists */
2440 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
Dai Haruki2c2db482008-12-16 15:31:15 -08002442 /* Remove the FCB from the skb */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002443 skb_set_queue_mapping(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002444 /* Remove the padded bytes, if there are any */
2445 if (amount_pull)
2446 skb_pull(skb, amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002447
Dai Haruki2c2db482008-12-16 15:31:15 -08002448 if (priv->rx_csum_enable)
2449 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002450
Dai Haruki2c2db482008-12-16 15:31:15 -08002451 /* Tell the skb what kind of packet this is */
2452 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002453
Dai Haruki2c2db482008-12-16 15:31:15 -08002454 /* Send the packet up the stack */
2455 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2456 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2457 else
2458 ret = netif_receive_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
Dai Haruki2c2db482008-12-16 15:31:15 -08002460 if (NET_RX_DROP == ret)
2461 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462
2463 return 0;
2464}
2465
2466/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05002467 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 * of frames handled
2469 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002470int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002472 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002473 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002475 int pkt_len;
2476 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 int howmany = 0;
2478 struct gfar_private *priv = netdev_priv(dev);
2479
2480 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002481 bdp = rx_queue->cur_rx;
2482 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Dai Haruki2c2db482008-12-16 15:31:15 -08002484 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
2485 priv->padding;
2486
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002488 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05002489 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002490
2491 /* Add another skb for the future */
2492 newskb = gfar_new_skb(dev);
2493
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002494 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
Kumar Gala48268572009-03-18 23:28:22 -07002496 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Andy Fleming81183052008-11-12 10:07:11 -06002497 priv->rx_buffer_size, DMA_FROM_DEVICE);
2498
Andy Fleming815b97c2008-04-22 17:18:29 -05002499 /* We drop the frame if we failed to allocate a new buffer */
2500 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2501 bdp->status & RXBD_ERR)) {
2502 count_errors(bdp->status, dev);
2503
2504 if (unlikely(!newskb))
2505 newskb = skb;
Lennert Buytenhek4e2fd552009-05-25 00:42:34 -07002506 else if (skb) {
2507 /*
2508 * We need to reset ->data to what it
2509 * was before gfar_new_skb() re-aligned
2510 * it to an RXBUF_ALIGNMENT boundary
2511 * before we put the skb back on the
2512 * recycle list.
2513 */
2514 skb->data = skb->head + NET_SKB_PAD;
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002515 __skb_queue_head(&priv->rx_recycle, skb);
Lennert Buytenhek4e2fd552009-05-25 00:42:34 -07002516 }
Andy Fleming815b97c2008-04-22 17:18:29 -05002517 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 /* Increment the number of packets */
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002519 dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 howmany++;
2521
Dai Haruki2c2db482008-12-16 15:31:15 -08002522 if (likely(skb)) {
2523 pkt_len = bdp->length - ETH_FCS_LEN;
2524 /* Remove the FCS from the packet length */
2525 skb_put(skb, pkt_len);
2526 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
Dai Haruki2c2db482008-12-16 15:31:15 -08002528 gfar_process_frame(dev, skb, amount_pull);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
Dai Haruki2c2db482008-12-16 15:31:15 -08002530 } else {
2531 if (netif_msg_rx_err(priv))
2532 printk(KERN_WARNING
2533 "%s: Missing skb!\n", dev->name);
2534 dev->stats.rx_dropped++;
2535 priv->extra_stats.rx_skbmissing++;
2536 }
2537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 }
2539
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002540 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
Andy Fleming815b97c2008-04-22 17:18:29 -05002542 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002543 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
2545 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002546 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
2548 /* update to point at the next skb */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002549 rx_queue->skb_currx =
2550 (rx_queue->skb_currx + 1) &
2551 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 }
2553
2554 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002555 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 return howmany;
2558}
2559
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002560static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002562 struct gfar_priv_grp *gfargrp = container_of(napi,
2563 struct gfar_priv_grp, napi);
2564 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002565 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002566 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002567 struct gfar_priv_rx_q *rx_queue = NULL;
2568 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002569 int tx_cleaned = 0, i, left_over_budget = budget;
2570 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002571 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002572
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002573 num_queues = gfargrp->num_rx_queues;
2574 budget_per_queue = budget/num_queues;
2575
Dai Haruki8c7396a2008-12-17 16:52:00 -08002576 /* Clear IEVENT, so interrupts aren't called again
2577 * because of the packets that have already arrived */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002578 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002579
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002580 while (num_queues && left_over_budget) {
2581
2582 budget_per_queue = left_over_budget/num_queues;
2583 left_over_budget = 0;
2584
2585 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2586 if (test_bit(i, &serviced_queues))
2587 continue;
2588 rx_queue = priv->rx_queue[i];
2589 tx_queue = priv->tx_queue[rx_queue->qindex];
2590
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002591 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002592 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2593 budget_per_queue);
2594 rx_cleaned += rx_cleaned_per_queue;
2595 if(rx_cleaned_per_queue < budget_per_queue) {
2596 left_over_budget = left_over_budget +
2597 (budget_per_queue - rx_cleaned_per_queue);
2598 set_bit(i, &serviced_queues);
2599 num_queues--;
2600 }
2601 }
Dai Harukid080cd62008-04-09 19:37:51 -05002602 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
Andy Fleming42199882008-12-17 16:52:30 -08002604 if (tx_cleaned)
2605 return budget;
2606
2607 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002608 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
2610 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002611 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002613 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 /* If we are coalescing interrupts, update the timer */
2616 /* Otherwise, clear it */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002617 gfar_configure_coalescing(priv,
2618 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 }
2620
Andy Fleming42199882008-12-17 16:52:30 -08002621 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002624#ifdef CONFIG_NET_POLL_CONTROLLER
2625/*
2626 * Polling 'interrupt' - used by things like netconsole to send skbs
2627 * without having to re-enable interrupts. It's not called while
2628 * the interrupt routine is executing.
2629 */
2630static void gfar_netpoll(struct net_device *dev)
2631{
2632 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002633 int i = 0;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002634
2635 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002636 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002637 for (i = 0; i < priv->num_grps; i++) {
2638 disable_irq(priv->gfargrp[i].interruptTransmit);
2639 disable_irq(priv->gfargrp[i].interruptReceive);
2640 disable_irq(priv->gfargrp[i].interruptError);
2641 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2642 &priv->gfargrp[i]);
2643 enable_irq(priv->gfargrp[i].interruptError);
2644 enable_irq(priv->gfargrp[i].interruptReceive);
2645 enable_irq(priv->gfargrp[i].interruptTransmit);
2646 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002647 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002648 for (i = 0; i < priv->num_grps; i++) {
2649 disable_irq(priv->gfargrp[i].interruptTransmit);
2650 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2651 &priv->gfargrp[i]);
2652 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002653 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002654 }
2655}
2656#endif
2657
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002659static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002661 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662
2663 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002664 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002667 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002668 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
2670 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002671 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002672 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002674 /* Check for errors */
2675 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002676 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677
2678 return IRQ_HANDLED;
2679}
2680
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681/* Called every time the controller might need to be made
2682 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002683 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 * function converts those variables into the appropriate
2685 * register values, and can bring down the device if needed.
2686 */
2687static void adjust_link(struct net_device *dev)
2688{
2689 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002690 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002691 unsigned long flags;
2692 struct phy_device *phydev = priv->phydev;
2693 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002695 local_irq_save(flags);
2696 lock_tx_qs(priv);
2697
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002698 if (phydev->link) {
2699 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002700 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 /* Now we make sure that we can be in full duplex mode.
2703 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002704 if (phydev->duplex != priv->oldduplex) {
2705 new_state = 1;
2706 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002708 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002711 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 }
2713
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002714 if (phydev->speed != priv->oldspeed) {
2715 new_state = 1;
2716 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 tempval =
2719 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002720
2721 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 break;
2723 case 100:
2724 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 tempval =
2726 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002727
2728 /* Reduced mode distinguishes
2729 * between 10 and 100 */
2730 if (phydev->speed == SPEED_100)
2731 ecntrl |= ECNTRL_R100;
2732 else
2733 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 break;
2735 default:
Kumar Gala0bbaf062005-06-20 10:54:21 -05002736 if (netif_msg_link(priv))
2737 printk(KERN_WARNING
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002738 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2739 dev->name, phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 break;
2741 }
2742
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002743 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 }
2745
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002746 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002747 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002748
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002750 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002753 } else if (priv->oldlink) {
2754 new_state = 1;
2755 priv->oldlink = 0;
2756 priv->oldspeed = 0;
2757 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002760 if (new_state && netif_msg_link(priv))
2761 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002762 unlock_tx_qs(priv);
2763 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002764}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
2766/* Update the hash table based on the current list of multicast
2767 * addresses we subscribe to. Also, change the promiscuity of
2768 * the device based on the flags (this function is called
2769 * whenever dev->flags is changed */
2770static void gfar_set_multi(struct net_device *dev)
2771{
2772 struct dev_mc_list *mc_ptr;
2773 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002774 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 u32 tempval;
2776
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002777 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 /* Set RCTRL to PROM */
2779 tempval = gfar_read(&regs->rctrl);
2780 tempval |= RCTRL_PROM;
2781 gfar_write(&regs->rctrl, tempval);
2782 } else {
2783 /* Set RCTRL to not PROM */
2784 tempval = gfar_read(&regs->rctrl);
2785 tempval &= ~(RCTRL_PROM);
2786 gfar_write(&regs->rctrl, tempval);
2787 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002788
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002789 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002791 gfar_write(&regs->igaddr0, 0xffffffff);
2792 gfar_write(&regs->igaddr1, 0xffffffff);
2793 gfar_write(&regs->igaddr2, 0xffffffff);
2794 gfar_write(&regs->igaddr3, 0xffffffff);
2795 gfar_write(&regs->igaddr4, 0xffffffff);
2796 gfar_write(&regs->igaddr5, 0xffffffff);
2797 gfar_write(&regs->igaddr6, 0xffffffff);
2798 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 gfar_write(&regs->gaddr0, 0xffffffff);
2800 gfar_write(&regs->gaddr1, 0xffffffff);
2801 gfar_write(&regs->gaddr2, 0xffffffff);
2802 gfar_write(&regs->gaddr3, 0xffffffff);
2803 gfar_write(&regs->gaddr4, 0xffffffff);
2804 gfar_write(&regs->gaddr5, 0xffffffff);
2805 gfar_write(&regs->gaddr6, 0xffffffff);
2806 gfar_write(&regs->gaddr7, 0xffffffff);
2807 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002808 int em_num;
2809 int idx;
2810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002812 gfar_write(&regs->igaddr0, 0x0);
2813 gfar_write(&regs->igaddr1, 0x0);
2814 gfar_write(&regs->igaddr2, 0x0);
2815 gfar_write(&regs->igaddr3, 0x0);
2816 gfar_write(&regs->igaddr4, 0x0);
2817 gfar_write(&regs->igaddr5, 0x0);
2818 gfar_write(&regs->igaddr6, 0x0);
2819 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 gfar_write(&regs->gaddr0, 0x0);
2821 gfar_write(&regs->gaddr1, 0x0);
2822 gfar_write(&regs->gaddr2, 0x0);
2823 gfar_write(&regs->gaddr3, 0x0);
2824 gfar_write(&regs->gaddr4, 0x0);
2825 gfar_write(&regs->gaddr5, 0x0);
2826 gfar_write(&regs->gaddr6, 0x0);
2827 gfar_write(&regs->gaddr7, 0x0);
2828
Andy Fleming7f7f5312005-11-11 12:38:59 -06002829 /* If we have extended hash tables, we need to
2830 * clear the exact match registers to prepare for
2831 * setting them */
2832 if (priv->extended_hash) {
2833 em_num = GFAR_EM_NUM + 1;
2834 gfar_clear_exact_match(dev);
2835 idx = 1;
2836 } else {
2837 idx = 0;
2838 em_num = 0;
2839 }
2840
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002841 if (dev->mc_count == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 return;
2843
2844 /* Parse the list, and set the appropriate bits */
2845 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002846 if (idx < em_num) {
2847 gfar_set_mac_for_addr(dev, idx,
2848 mc_ptr->dmi_addr);
2849 idx++;
2850 } else
2851 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853 }
2854
2855 return;
2856}
2857
Andy Fleming7f7f5312005-11-11 12:38:59 -06002858
2859/* Clears each of the exact match registers to zero, so they
2860 * don't interfere with normal reception */
2861static void gfar_clear_exact_match(struct net_device *dev)
2862{
2863 int idx;
2864 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2865
2866 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2867 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2868}
2869
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870/* Set the appropriate hash bit for the given addr */
2871/* The algorithm works like so:
2872 * 1) Take the Destination Address (ie the multicast address), and
2873 * do a CRC on it (little endian), and reverse the bits of the
2874 * result.
2875 * 2) Use the 8 most significant bits as a hash into a 256-entry
2876 * table. The table is controlled through 8 32-bit registers:
2877 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2878 * gaddr7. This means that the 3 most significant bits in the
2879 * hash index which gaddr register to use, and the 5 other bits
2880 * indicate which bit (assuming an IBM numbering scheme, which
2881 * for PowerPC (tm) is usually the case) in the register holds
2882 * the entry. */
2883static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2884{
2885 u32 tempval;
2886 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 u32 result = ether_crc(MAC_ADDR_LEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002888 int width = priv->hash_width;
2889 u8 whichbit = (result >> (32 - width)) & 0x1f;
2890 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 u32 value = (1 << (31-whichbit));
2892
Kumar Gala0bbaf062005-06-20 10:54:21 -05002893 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002895 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896
2897 return;
2898}
2899
Andy Fleming7f7f5312005-11-11 12:38:59 -06002900
2901/* There are multiple MAC Address register pairs on some controllers
2902 * This function sets the numth pair to a given address
2903 */
2904static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2905{
2906 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002907 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002908 int idx;
2909 char tmpbuf[MAC_ADDR_LEN];
2910 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002911 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002912
2913 macptr += num*2;
2914
2915 /* Now copy it into the mac registers backwards, cuz */
2916 /* little endian is silly */
2917 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2918 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2919
2920 gfar_write(macptr, *((u32 *) (tmpbuf)));
2921
2922 tempval = *((u32 *) (tmpbuf + 4));
2923
2924 gfar_write(macptr+1, tempval);
2925}
2926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002928static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002930 struct gfar_priv_grp *gfargrp = grp_id;
2931 struct gfar __iomem *regs = gfargrp->regs;
2932 struct gfar_private *priv= gfargrp->priv;
2933 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934
2935 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002936 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
2938 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002939 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05002940
2941 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002942 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05002943 (events & IEVENT_MAG))
2944 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
2946 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002947 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2948 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002949 dev->name, events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950
2951 /* Update the error counters */
2952 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002953 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
2955 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002956 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002958 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00002960 unsigned long flags;
2961
Kumar Gala0bbaf062005-06-20 10:54:21 -05002962 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002963 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2964 "packet dropped.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002965 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 priv->extra_stats.tx_underrun++;
2967
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00002968 local_irq_save(flags);
2969 lock_tx_qs(priv);
2970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002972 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00002973
2974 unlock_tx_qs(priv);
2975 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002977 if (netif_msg_tx_err(priv))
2978 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 }
2980 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002981 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 priv->extra_stats.rx_bsy++;
2983
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002984 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985
Kumar Gala0bbaf062005-06-20 10:54:21 -05002986 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002987 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002988 dev->name, gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 }
2990 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002991 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 priv->extra_stats.rx_babr++;
2993
Kumar Gala0bbaf062005-06-20 10:54:21 -05002994 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002995 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 }
2997 if (events & IEVENT_EBERR) {
2998 priv->extra_stats.eberr++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002999 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003000 printk(KERN_DEBUG "%s: bus error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05003002 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003003 printk(KERN_DEBUG "%s: control frame\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004
3005 if (events & IEVENT_BABT) {
3006 priv->extra_stats.tx_babt++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003007 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003008 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 }
3010 return IRQ_HANDLED;
3011}
3012
Andy Flemingb31a1d82008-12-16 15:29:15 -08003013static struct of_device_id gfar_match[] =
3014{
3015 {
3016 .type = "network",
3017 .compatible = "gianfar",
3018 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003019 {
3020 .compatible = "fsl,etsec2",
3021 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003022 {},
3023};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003024MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003025
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026/* Structure for a device driver */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003027static struct of_platform_driver gfar_driver = {
3028 .name = "fsl-gianfar",
3029 .match_table = gfar_match,
3030
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 .probe = gfar_probe,
3032 .remove = gfar_remove,
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00003033 .suspend = gfar_legacy_suspend,
3034 .resume = gfar_legacy_resume,
3035 .driver.pm = GFAR_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036};
3037
3038static int __init gfar_init(void)
3039{
Andy Fleming1577ece2009-02-04 16:42:12 -08003040 return of_register_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041}
3042
3043static void __exit gfar_exit(void)
3044{
Andy Flemingb31a1d82008-12-16 15:29:15 -08003045 of_unregister_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046}
3047
3048module_init(gfar_init);
3049module_exit(gfar_exit);
3050