blob: 6850dc0a7b9194c34807762eccb2d7d04a4a007c [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
26 *
27 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050028 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080029 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * last descriptor of the ring.
37 *
38 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050039 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040043 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020045 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050046 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
54 *
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
63 */
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/string.h>
67#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040068#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/slab.h>
70#include <linux/interrupt.h>
71#include <linux/init.h>
72#include <linux/delay.h>
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050076#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/spinlock.h>
78#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000079#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080080#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050081#include <linux/ip.h>
82#include <linux/tcp.h>
83#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080084#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86#include <asm/io.h>
87#include <asm/irq.h>
88#include <asm/uaccess.h>
89#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#include <linux/dma-mapping.h>
91#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040092#include <linux/mii.h>
93#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080094#include <linux/phy_fixed.h>
95#include <linux/of.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97#include "gianfar.h"
Andy Fleming1577ece2009-02-04 16:42:12 -080098#include "fsl_pq_mdio.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#undef BRIEF_GFAR_ERRORS
102#undef VERBOSE_GFAR_ERRORS
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104const char gfar_driver_name[] = "Gianfar Ethernet";
Andy Fleming7f7f5312005-11-11 12:38:59 -0600105const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107static int gfar_enet_open(struct net_device *dev);
108static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200109static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static void gfar_timeout(struct net_device *dev);
111static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500112struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -0500114 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static int gfar_set_mac_address(struct net_device *dev);
116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100117static irqreturn_t gfar_error(int irq, void *dev_id);
118static irqreturn_t gfar_transmit(int irq, void *dev_id);
119static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120static void adjust_link(struct net_device *dev);
121static void init_registers(struct net_device *dev);
122static int init_phy(struct net_device *dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800123static int gfar_probe(struct of_device *ofdev,
124 const struct of_device_id *match);
125static int gfar_remove(struct of_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400126static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static void gfar_set_multi(struct net_device *dev);
128static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500129static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700130static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300131#ifdef CONFIG_NET_POLL_CONTROLLER
132static void gfar_netpoll(struct net_device *dev);
133#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137 int amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500138static void gfar_vlan_rx_register(struct net_device *netdev,
139 struct vlan_group *grp);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600140void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500141static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600142void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev);
144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150MODULE_LICENSE("GPL");
151
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000153 dma_addr_t buf)
154{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000155 u32 lstatus;
156
157 bdp->bufPtr = buf;
158
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163 eieio();
164
165 bdp->lstatus = lstatus;
166}
167
Anton Vorontsov87283272009-10-12 06:00:39 +0000168static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000169{
Anton Vorontsov87283272009-10-12 06:00:39 +0000170 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000173 struct txbd8 *txbdp;
174 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000175 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000176
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000185
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000192 }
193
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000194 /* Set the last descriptor in the ring to indicate wrap */
195 txbdp--;
196 txbdp->status |= TXBD_WRAP;
197 }
198
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
204
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208 if (skb) {
209 gfar_init_rxbdp(rx_queue, rxbdp,
210 rxbdp->bufPtr);
211 } else {
212 skb = gfar_new_skb(ndev);
213 if (!skb) {
214 pr_err("%s: Can't allocate RX buffers\n",
215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 }
222
223 rxbdp++;
224 }
225
Anton Vorontsov87283272009-10-12 06:00:39 +0000226 }
227
228 return 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
Anton Vorontsov87283272009-10-12 06:00:39 +0000233}
234
235static int gfar_alloc_skb_resources(struct net_device *ndev)
236{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000237 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000238 dma_addr_t addr;
239 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000240 struct gfar_private *priv = netdev_priv(ndev);
241 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
244
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000252
253 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000254 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000255 sizeof(struct txbd8) * priv->total_tx_ring_size +
256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
257 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000258 if (!vaddr) {
259 if (netif_msg_ifup(priv))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
261 ndev->name);
262 return -ENOMEM;
263 }
264
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000274
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000275 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000276 for (i = 0; i < priv->num_rx_queues; i++) {
277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000284
285 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000289 tx_queue->tx_ring_size, GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000290 if (!tx_queue->tx_skbuff) {
291 if (netif_msg_ifup(priv))
292 pr_err("%s: Could not allocate tx_skbuff\n",
293 ndev->name);
294 goto cleanup;
295 }
296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000299 }
300
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000304 rx_queue->rx_ring_size, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000305
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000306 if (!rx_queue->rx_skbuff) {
307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
309 ndev->name);
310 goto cleanup;
311 }
312
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
315 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000316
Anton Vorontsov87283272009-10-12 06:00:39 +0000317 if (gfar_init_bds(ndev))
318 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000319
320 return 0;
321
322cleanup:
323 free_skb_resources(priv);
324 return -ENOMEM;
325}
326
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000330 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000346static void gfar_init_mac(struct net_device *ndev)
347{
348 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000350 u32 rctrl = 0;
351 u32 tctrl = 0;
352 u32 attrs = 0;
353
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000356
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000357 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000359
360 if (priv->rx_filer_enable)
361 rctrl |= RCTRL_FILREN;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000362
363 if (priv->rx_csum_enable)
364 rctrl |= RCTRL_CHECKSUMMING;
365
366 if (priv->extended_hash) {
367 rctrl |= RCTRL_EXTHASH;
368
369 gfar_clear_exact_match(ndev);
370 rctrl |= RCTRL_EMEN;
371 }
372
373 if (priv->padding) {
374 rctrl &= ~RCTRL_PAL_MASK;
375 rctrl |= RCTRL_PADDING(priv->padding);
376 }
377
378 /* keep vlan related bits if it's enabled */
379 if (priv->vlgrp) {
380 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
381 tctrl |= TCTRL_VLINS;
382 }
383
384 /* Init rctrl based on our settings */
385 gfar_write(&regs->rctrl, rctrl);
386
387 if (ndev->features & NETIF_F_IP_CSUM)
388 tctrl |= TCTRL_INIT_CSUM;
389
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000390 tctrl |= TCTRL_TXSCHED_PRIO;
391
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000392 gfar_write(&regs->tctrl, tctrl);
393
394 /* Set the extraction length and index */
395 attrs = ATTRELI_EL(priv->rx_stash_size) |
396 ATTRELI_EI(priv->rx_stash_index);
397
398 gfar_write(&regs->attreli, attrs);
399
400 /* Start with defaults, and add stashing or locking
401 * depending on the approprate variables */
402 attrs = ATTR_INIT_SETTINGS;
403
404 if (priv->bd_stash_en)
405 attrs |= ATTR_BDSTASH;
406
407 if (priv->rx_stash_size != 0)
408 attrs |= ATTR_BUFSTASH;
409
410 gfar_write(&regs->attr, attrs);
411
412 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
413 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
414 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
415}
416
Andy Fleming26ccfc32009-03-10 12:58:28 +0000417static const struct net_device_ops gfar_netdev_ops = {
418 .ndo_open = gfar_enet_open,
419 .ndo_start_xmit = gfar_start_xmit,
420 .ndo_stop = gfar_close,
421 .ndo_change_mtu = gfar_change_mtu,
422 .ndo_set_multicast_list = gfar_set_multi,
423 .ndo_tx_timeout = gfar_timeout,
424 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000425 .ndo_select_queue = gfar_select_queue,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000426 .ndo_vlan_rx_register = gfar_vlan_rx_register,
Ben Hutchings240c1022009-07-09 17:54:35 +0000427 .ndo_set_mac_address = eth_mac_addr,
428 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000429#ifdef CONFIG_NET_POLL_CONTROLLER
430 .ndo_poll_controller = gfar_netpoll,
431#endif
432};
433
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000434unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
435unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
436
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000437void lock_rx_qs(struct gfar_private *priv)
438{
439 int i = 0x0;
440
441 for (i = 0; i < priv->num_rx_queues; i++)
442 spin_lock(&priv->rx_queue[i]->rxlock);
443}
444
445void lock_tx_qs(struct gfar_private *priv)
446{
447 int i = 0x0;
448
449 for (i = 0; i < priv->num_tx_queues; i++)
450 spin_lock(&priv->tx_queue[i]->txlock);
451}
452
453void unlock_rx_qs(struct gfar_private *priv)
454{
455 int i = 0x0;
456
457 for (i = 0; i < priv->num_rx_queues; i++)
458 spin_unlock(&priv->rx_queue[i]->rxlock);
459}
460
461void unlock_tx_qs(struct gfar_private *priv)
462{
463 int i = 0x0;
464
465 for (i = 0; i < priv->num_tx_queues; i++)
466 spin_unlock(&priv->tx_queue[i]->txlock);
467}
468
Andy Fleming7f7f5312005-11-11 12:38:59 -0600469/* Returns 1 if incoming frames use an FCB */
470static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500471{
Dai Haruki77ecaf22008-12-16 15:30:48 -0800472 return priv->vlgrp || priv->rx_csum_enable;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500473}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400474
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000475u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
476{
477 return skb_get_queue_mapping(skb);
478}
479static void free_tx_pointers(struct gfar_private *priv)
480{
481 int i = 0;
482
483 for (i = 0; i < priv->num_tx_queues; i++)
484 kfree(priv->tx_queue[i]);
485}
486
487static void free_rx_pointers(struct gfar_private *priv)
488{
489 int i = 0;
490
491 for (i = 0; i < priv->num_rx_queues; i++)
492 kfree(priv->rx_queue[i]);
493}
494
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000495static void unmap_group_regs(struct gfar_private *priv)
496{
497 int i = 0;
498
499 for (i = 0; i < MAXGROUPS; i++)
500 if (priv->gfargrp[i].regs)
501 iounmap(priv->gfargrp[i].regs);
502}
503
504static void disable_napi(struct gfar_private *priv)
505{
506 int i = 0;
507
508 for (i = 0; i < priv->num_grps; i++)
509 napi_disable(&priv->gfargrp[i].napi);
510}
511
512static void enable_napi(struct gfar_private *priv)
513{
514 int i = 0;
515
516 for (i = 0; i < priv->num_grps; i++)
517 napi_enable(&priv->gfargrp[i].napi);
518}
519
520static int gfar_parse_group(struct device_node *np,
521 struct gfar_private *priv, const char *model)
522{
523 u32 *queue_mask;
524 u64 addr, size;
525
526 addr = of_translate_address(np,
527 of_get_address(np, 0, &size, NULL));
528 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
529
530 if (!priv->gfargrp[priv->num_grps].regs)
531 return -ENOMEM;
532
533 priv->gfargrp[priv->num_grps].interruptTransmit =
534 irq_of_parse_and_map(np, 0);
535
536 /* If we aren't the FEC we have multiple interrupts */
537 if (model && strcasecmp(model, "FEC")) {
538 priv->gfargrp[priv->num_grps].interruptReceive =
539 irq_of_parse_and_map(np, 1);
540 priv->gfargrp[priv->num_grps].interruptError =
541 irq_of_parse_and_map(np,2);
542 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
543 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
544 priv->gfargrp[priv->num_grps].interruptError < 0) {
545 return -EINVAL;
546 }
547 }
548
549 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
550 priv->gfargrp[priv->num_grps].priv = priv;
551 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
552 if(priv->mode == MQ_MG_MODE) {
553 queue_mask = (u32 *)of_get_property(np,
554 "fsl,rx-bit-map", NULL);
555 priv->gfargrp[priv->num_grps].rx_bit_map =
556 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
557 queue_mask = (u32 *)of_get_property(np,
558 "fsl,tx-bit-map", NULL);
559 priv->gfargrp[priv->num_grps].tx_bit_map =
560 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
561 } else {
562 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
563 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
564 }
565 priv->num_grps++;
566
567 return 0;
568}
569
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000570static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800571{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800572 const char *model;
573 const char *ctype;
574 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000575 int err = 0, i;
576 struct net_device *dev = NULL;
577 struct gfar_private *priv = NULL;
578 struct device_node *np = ofdev->node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000579 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800580 const u32 *stash;
581 const u32 *stash_len;
582 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000583 unsigned int num_tx_qs, num_rx_qs;
584 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800585
586 if (!np || !of_device_is_available(np))
587 return -ENODEV;
588
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000589 /* parse the num of tx and rx queues */
590 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
591 num_tx_qs = tx_queues ? *tx_queues : 1;
592
593 if (num_tx_qs > MAX_TX_QS) {
594 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
595 num_tx_qs, MAX_TX_QS);
596 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
597 return -EINVAL;
598 }
599
600 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
601 num_rx_qs = rx_queues ? *rx_queues : 1;
602
603 if (num_rx_qs > MAX_RX_QS) {
604 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
605 num_tx_qs, MAX_TX_QS);
606 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
607 return -EINVAL;
608 }
609
610 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
611 dev = *pdev;
612 if (NULL == dev)
613 return -ENOMEM;
614
615 priv = netdev_priv(dev);
616 priv->node = ofdev->node;
617 priv->ndev = dev;
618
619 dev->num_tx_queues = num_tx_qs;
620 dev->real_num_tx_queues = num_tx_qs;
621 priv->num_tx_queues = num_tx_qs;
622 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000623 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800624
625 model = of_get_property(np, "model", NULL);
626
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000627 for (i = 0; i < MAXGROUPS; i++)
628 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800629
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000630 /* Parse and initialize group specific information */
631 if (of_device_is_compatible(np, "fsl,etsec2")) {
632 priv->mode = MQ_MG_MODE;
633 for_each_child_of_node(np, child) {
634 err = gfar_parse_group(child, priv, model);
635 if (err)
636 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800637 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000638 } else {
639 priv->mode = SQ_SG_MODE;
640 err = gfar_parse_group(np, priv, model);
641 if(err)
642 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800643 }
644
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000645 for (i = 0; i < priv->num_tx_queues; i++)
646 priv->tx_queue[i] = NULL;
647 for (i = 0; i < priv->num_rx_queues; i++)
648 priv->rx_queue[i] = NULL;
649
650 for (i = 0; i < priv->num_tx_queues; i++) {
651 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
652 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
653 if (!priv->tx_queue[i]) {
654 err = -ENOMEM;
655 goto tx_alloc_failed;
656 }
657 priv->tx_queue[i]->tx_skbuff = NULL;
658 priv->tx_queue[i]->qindex = i;
659 priv->tx_queue[i]->dev = dev;
660 spin_lock_init(&(priv->tx_queue[i]->txlock));
661 }
662
663 for (i = 0; i < priv->num_rx_queues; i++) {
664 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
665 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
666 if (!priv->rx_queue[i]) {
667 err = -ENOMEM;
668 goto rx_alloc_failed;
669 }
670 priv->rx_queue[i]->rx_skbuff = NULL;
671 priv->rx_queue[i]->qindex = i;
672 priv->rx_queue[i]->dev = dev;
673 spin_lock_init(&(priv->rx_queue[i]->rxlock));
674 }
675
676
Andy Fleming4d7902f2009-02-04 16:43:44 -0800677 stash = of_get_property(np, "bd-stash", NULL);
678
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000679 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800680 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
681 priv->bd_stash_en = 1;
682 }
683
684 stash_len = of_get_property(np, "rx-stash-len", NULL);
685
686 if (stash_len)
687 priv->rx_stash_size = *stash_len;
688
689 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
690
691 if (stash_idx)
692 priv->rx_stash_index = *stash_idx;
693
694 if (stash_len || stash_idx)
695 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
696
Andy Flemingb31a1d82008-12-16 15:29:15 -0800697 mac_addr = of_get_mac_address(np);
698 if (mac_addr)
699 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
700
701 if (model && !strcasecmp(model, "TSEC"))
702 priv->device_flags =
703 FSL_GIANFAR_DEV_HAS_GIGABIT |
704 FSL_GIANFAR_DEV_HAS_COALESCE |
705 FSL_GIANFAR_DEV_HAS_RMON |
706 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
707 if (model && !strcasecmp(model, "eTSEC"))
708 priv->device_flags =
709 FSL_GIANFAR_DEV_HAS_GIGABIT |
710 FSL_GIANFAR_DEV_HAS_COALESCE |
711 FSL_GIANFAR_DEV_HAS_RMON |
712 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Dai Haruki2c2db482008-12-16 15:31:15 -0800713 FSL_GIANFAR_DEV_HAS_PADDING |
Andy Flemingb31a1d82008-12-16 15:29:15 -0800714 FSL_GIANFAR_DEV_HAS_CSUM |
715 FSL_GIANFAR_DEV_HAS_VLAN |
716 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
717 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
718
719 ctype = of_get_property(np, "phy-connection-type", NULL);
720
721 /* We only care about rgmii-id. The rest are autodetected */
722 if (ctype && !strcmp(ctype, "rgmii-id"))
723 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
724 else
725 priv->interface = PHY_INTERFACE_MODE_MII;
726
727 if (of_get_property(np, "fsl,magic-packet", NULL))
728 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
729
Grant Likelyfe192a42009-04-25 12:53:12 +0000730 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800731
732 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000733 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800734
735 return 0;
736
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000737rx_alloc_failed:
738 free_rx_pointers(priv);
739tx_alloc_failed:
740 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000741err_grp_init:
742 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000743 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800744 return err;
745}
746
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000747/* Ioctl MII Interface */
748static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
749{
750 struct gfar_private *priv = netdev_priv(dev);
751
752 if (!netif_running(dev))
753 return -EINVAL;
754
755 if (!priv->phydev)
756 return -ENODEV;
757
758 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
759}
760
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000761static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
762{
763 unsigned int new_bit_map = 0x0;
764 int mask = 0x1 << (max_qs - 1), i;
765 for (i = 0; i < max_qs; i++) {
766 if (bit_map & mask)
767 new_bit_map = new_bit_map + (1 << i);
768 mask = mask >> 0x1;
769 }
770 return new_bit_map;
771}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000772
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000773static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
774 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000775{
776 u32 rqfpr = FPR_FILER_MASK;
777 u32 rqfcr = 0x0;
778
779 rqfar--;
780 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
781 ftp_rqfpr[rqfar] = rqfpr;
782 ftp_rqfcr[rqfar] = rqfcr;
783 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
784
785 rqfar--;
786 rqfcr = RQFCR_CMP_NOMATCH;
787 ftp_rqfpr[rqfar] = rqfpr;
788 ftp_rqfcr[rqfar] = rqfcr;
789 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
790
791 rqfar--;
792 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
793 rqfpr = class;
794 ftp_rqfcr[rqfar] = rqfcr;
795 ftp_rqfpr[rqfar] = rqfpr;
796 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
797
798 rqfar--;
799 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
800 rqfpr = class;
801 ftp_rqfcr[rqfar] = rqfcr;
802 ftp_rqfpr[rqfar] = rqfpr;
803 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
804
805 return rqfar;
806}
807
808static void gfar_init_filer_table(struct gfar_private *priv)
809{
810 int i = 0x0;
811 u32 rqfar = MAX_FILER_IDX;
812 u32 rqfcr = 0x0;
813 u32 rqfpr = FPR_FILER_MASK;
814
815 /* Default rule */
816 rqfcr = RQFCR_CMP_MATCH;
817 ftp_rqfcr[rqfar] = rqfcr;
818 ftp_rqfpr[rqfar] = rqfpr;
819 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
820
821 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
822 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
823 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
824 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
825 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
826 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
827
828 /* cur_filer_idx indicated the fisrt non-masked rule */
829 priv->cur_filer_idx = rqfar;
830
831 /* Rest are masked rules */
832 rqfcr = RQFCR_CMP_NOMATCH;
833 for (i = 0; i < rqfar; i++) {
834 ftp_rqfcr[i] = rqfcr;
835 ftp_rqfpr[i] = rqfpr;
836 gfar_write_filer(priv, i, rqfcr, rqfpr);
837 }
838}
839
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400840/* Set up the ethernet device structure, private data,
841 * and anything else we need before we start */
Andy Flemingb31a1d82008-12-16 15:29:15 -0800842static int gfar_probe(struct of_device *ofdev,
843 const struct of_device_id *match)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
845 u32 tempval;
846 struct net_device *dev = NULL;
847 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000848 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000849 int err = 0, i, grp_idx = 0;
Dai Harukic50a5d92008-12-17 16:51:32 -0800850 int len_devname;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000851 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000852 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000853 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000855 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000857 if (err)
858 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
860 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700861 priv->ndev = dev;
862 priv->ofdev = ofdev;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800863 priv->node = ofdev->node;
Kumar Gala48268572009-03-18 23:28:22 -0700864 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Scott Woodd87eb122008-07-11 18:04:45 -0500866 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200867 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Andy Flemingb31a1d82008-12-16 15:29:15 -0800869 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000870 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 /* Stop the DMA engine now, in case it was running before */
873 /* (The firmware could have used it, and left it running). */
Andy Fleming257d9382008-12-16 15:25:45 -0800874 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
876 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000877 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Andy Flemingb98ac702009-02-04 16:38:05 -0800879 /* We need to delay at least 3 TX clocks */
880 udelay(2);
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000883 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 /* Initialize MACCFG2. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000886 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000889 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000892 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Andy Flemingb31a1d82008-12-16 15:29:15 -0800894 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
896 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +0000899 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500900 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000902 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000903 for (i = 0; i < priv->num_grps; i++)
904 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000905
Andy Flemingb31a1d82008-12-16 15:29:15 -0800906 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500907 priv->rx_csum_enable = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -0800908 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500909 } else
910 priv->rx_csum_enable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Kumar Gala0bbaf062005-06-20 10:54:21 -0500912 priv->vlgrp = NULL;
913
Andy Fleming26ccfc32009-03-10 12:58:28 +0000914 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500915 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500916
Andy Flemingb31a1d82008-12-16 15:29:15 -0800917 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500918 priv->extended_hash = 1;
919 priv->hash_width = 9;
920
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000921 priv->hash_regs[0] = &regs->igaddr0;
922 priv->hash_regs[1] = &regs->igaddr1;
923 priv->hash_regs[2] = &regs->igaddr2;
924 priv->hash_regs[3] = &regs->igaddr3;
925 priv->hash_regs[4] = &regs->igaddr4;
926 priv->hash_regs[5] = &regs->igaddr5;
927 priv->hash_regs[6] = &regs->igaddr6;
928 priv->hash_regs[7] = &regs->igaddr7;
929 priv->hash_regs[8] = &regs->gaddr0;
930 priv->hash_regs[9] = &regs->gaddr1;
931 priv->hash_regs[10] = &regs->gaddr2;
932 priv->hash_regs[11] = &regs->gaddr3;
933 priv->hash_regs[12] = &regs->gaddr4;
934 priv->hash_regs[13] = &regs->gaddr5;
935 priv->hash_regs[14] = &regs->gaddr6;
936 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500937
938 } else {
939 priv->extended_hash = 0;
940 priv->hash_width = 8;
941
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000942 priv->hash_regs[0] = &regs->gaddr0;
943 priv->hash_regs[1] = &regs->gaddr1;
944 priv->hash_regs[2] = &regs->gaddr2;
945 priv->hash_regs[3] = &regs->gaddr3;
946 priv->hash_regs[4] = &regs->gaddr4;
947 priv->hash_regs[5] = &regs->gaddr5;
948 priv->hash_regs[6] = &regs->gaddr6;
949 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500950 }
951
Andy Flemingb31a1d82008-12-16 15:29:15 -0800952 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500953 priv->padding = DEFAULT_PADDING;
954 else
955 priv->padding = 0;
956
Kumar Gala0bbaf062005-06-20 10:54:21 -0500957 if (dev->features & NETIF_F_IP_CSUM)
958 dev->hard_header_len += GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000960 /* Program the isrg regs only if number of grps > 1 */
961 if (priv->num_grps > 1) {
962 baddr = &regs->isrg0;
963 for (i = 0; i < priv->num_grps; i++) {
964 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
965 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
966 gfar_write(baddr, isrg);
967 baddr++;
968 isrg = 0x0;
969 }
970 }
971
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000972 /* Need to reverse the bit maps as bit_map's MSB is q0
973 * but, for_each_bit parses from right to left, which
974 * basically reverses the queue numbers */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000975 for (i = 0; i< priv->num_grps; i++) {
976 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
977 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
978 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
979 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
980 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000981
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000982 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
983 * also assign queues to groups */
984 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
985 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
986 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
987 priv->num_rx_queues) {
988 priv->gfargrp[grp_idx].num_rx_queues++;
989 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
990 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
991 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
992 }
993 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
994 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
995 priv->num_tx_queues) {
996 priv->gfargrp[grp_idx].num_tx_queues++;
997 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
998 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
999 tqueue = tqueue | (TQUEUE_EN0 >> i);
1000 }
1001 priv->gfargrp[grp_idx].rstat = rstat;
1002 priv->gfargrp[grp_idx].tstat = tstat;
1003 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001004 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001005
1006 gfar_write(&regs->rqueue, rqueue);
1007 gfar_write(&regs->tqueue, tqueue);
1008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001011 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001012 for (i = 0; i < priv->num_tx_queues; i++) {
1013 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1014 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1015 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1016 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1017 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001018
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001019 for (i = 0; i < priv->num_rx_queues; i++) {
1020 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1021 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1022 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1023 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Kumar Gala0bbaf062005-06-20 10:54:21 -05001025 /* Enable most messages by default */
1026 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1027
Trent Piephod3eab822008-10-02 11:12:24 +00001028 /* Carrier starts down, phylib will bring it up */
1029 netif_carrier_off(dev);
1030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 err = register_netdev(dev);
1032
1033 if (err) {
1034 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1035 dev->name);
1036 goto register_fail;
1037 }
1038
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001039 device_init_wakeup(&dev->dev,
1040 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1041
Dai Harukic50a5d92008-12-17 16:51:32 -08001042 /* fill out IRQ number and name fields */
1043 len_devname = strlen(dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001044 for (i = 0; i < priv->num_grps; i++) {
1045 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1046 len_devname);
1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1048 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1049 "_g", sizeof("_g"));
1050 priv->gfargrp[i].int_name_tx[
1051 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1052 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1053 priv->gfargrp[i].int_name_tx)],
1054 "_tx", sizeof("_tx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001055
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001056 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1057 len_devname);
1058 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1059 "_g", sizeof("_g"));
1060 priv->gfargrp[i].int_name_rx[
1061 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1062 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1063 priv->gfargrp[i].int_name_rx)],
1064 "_rx", sizeof("_rx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001065
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001066 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1067 len_devname);
1068 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1069 "_g", sizeof("_g"));
1070 priv->gfargrp[i].int_name_er[strlen(
1071 priv->gfargrp[i].int_name_er)] = i+48;
1072 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1073 priv->gfargrp[i].int_name_er)],
1074 "_er", sizeof("_er") + 1);
1075 } else
1076 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1077 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001078
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001079 /* Initialize the filer table */
1080 gfar_init_filer_table(priv);
1081
Andy Fleming7f7f5312005-11-11 12:38:59 -06001082 /* Create all the sysfs files */
1083 gfar_init_sysfs(dev);
1084
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 /* Print out the device info */
Johannes Berge1749612008-10-27 15:59:26 -07001086 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001089 /* provided which set of benchmarks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001091 for (i = 0; i < priv->num_rx_queues; i++)
1092 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1093 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1094 for(i = 0; i < priv->num_tx_queues; i++)
1095 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1096 dev->name, i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
1098 return 0;
1099
1100register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001101 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001102 free_tx_pointers(priv);
1103 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001104 if (priv->phy_node)
1105 of_node_put(priv->phy_node);
1106 if (priv->tbi_node)
1107 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001109 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110}
1111
Andy Flemingb31a1d82008-12-16 15:29:15 -08001112static int gfar_remove(struct of_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001114 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Grant Likelyfe192a42009-04-25 12:53:12 +00001116 if (priv->phy_node)
1117 of_node_put(priv->phy_node);
1118 if (priv->tbi_node)
1119 of_node_put(priv->tbi_node);
1120
Andy Flemingb31a1d82008-12-16 15:29:15 -08001121 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
David S. Millerd9d8e042009-09-06 01:41:02 -07001123 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001124 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001125 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 return 0;
1128}
1129
Scott Woodd87eb122008-07-11 18:04:45 -05001130#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001131
1132static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001133{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001134 struct gfar_private *priv = dev_get_drvdata(dev);
1135 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001136 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001137 unsigned long flags;
1138 u32 tempval;
1139
1140 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001141 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001142
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001143 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001144
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001145 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001146
1147 local_irq_save(flags);
1148 lock_tx_qs(priv);
1149 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001150
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001151 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001152
1153 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001154 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001155
1156 tempval &= ~MACCFG1_TX_EN;
1157
1158 if (!magic_packet)
1159 tempval &= ~MACCFG1_RX_EN;
1160
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001161 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001162
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001163 unlock_rx_qs(priv);
1164 unlock_tx_qs(priv);
1165 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001166
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001167 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001168
1169 if (magic_packet) {
1170 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001171 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001172
1173 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001174 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001175 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001176 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001177 } else {
1178 phy_stop(priv->phydev);
1179 }
1180 }
1181
1182 return 0;
1183}
1184
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001185static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001186{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001187 struct gfar_private *priv = dev_get_drvdata(dev);
1188 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001189 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001190 unsigned long flags;
1191 u32 tempval;
1192 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001193 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001194
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001195 if (!netif_running(ndev)) {
1196 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001197 return 0;
1198 }
1199
1200 if (!magic_packet && priv->phydev)
1201 phy_start(priv->phydev);
1202
1203 /* Disable Magic Packet mode, in case something
1204 * else woke us up.
1205 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001206 local_irq_save(flags);
1207 lock_tx_qs(priv);
1208 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001209
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001210 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001211 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001212 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001213
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001214 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001215
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001216 unlock_rx_qs(priv);
1217 unlock_tx_qs(priv);
1218 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001219
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001220 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001221
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001222 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001223
1224 return 0;
1225}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001226
1227static int gfar_restore(struct device *dev)
1228{
1229 struct gfar_private *priv = dev_get_drvdata(dev);
1230 struct net_device *ndev = priv->ndev;
1231
1232 if (!netif_running(ndev))
1233 return 0;
1234
1235 gfar_init_bds(ndev);
1236 init_registers(ndev);
1237 gfar_set_mac_address(ndev);
1238 gfar_init_mac(ndev);
1239 gfar_start(ndev);
1240
1241 priv->oldlink = 0;
1242 priv->oldspeed = 0;
1243 priv->oldduplex = -1;
1244
1245 if (priv->phydev)
1246 phy_start(priv->phydev);
1247
1248 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001249 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001250
1251 return 0;
1252}
1253
1254static struct dev_pm_ops gfar_pm_ops = {
1255 .suspend = gfar_suspend,
1256 .resume = gfar_resume,
1257 .freeze = gfar_suspend,
1258 .thaw = gfar_resume,
1259 .restore = gfar_restore,
1260};
1261
1262#define GFAR_PM_OPS (&gfar_pm_ops)
1263
1264static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1265{
1266 return gfar_suspend(&ofdev->dev);
1267}
1268
1269static int gfar_legacy_resume(struct of_device *ofdev)
1270{
1271 return gfar_resume(&ofdev->dev);
1272}
1273
Scott Woodd87eb122008-07-11 18:04:45 -05001274#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001275
1276#define GFAR_PM_OPS NULL
1277#define gfar_legacy_suspend NULL
1278#define gfar_legacy_resume NULL
1279
Scott Woodd87eb122008-07-11 18:04:45 -05001280#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001282/* Reads the controller's registers to determine what interface
1283 * connects it to the PHY.
1284 */
1285static phy_interface_t gfar_get_interface(struct net_device *dev)
1286{
1287 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001288 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001289 u32 ecntrl;
1290
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001291 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001292
1293 if (ecntrl & ECNTRL_SGMII_MODE)
1294 return PHY_INTERFACE_MODE_SGMII;
1295
1296 if (ecntrl & ECNTRL_TBI_MODE) {
1297 if (ecntrl & ECNTRL_REDUCED_MODE)
1298 return PHY_INTERFACE_MODE_RTBI;
1299 else
1300 return PHY_INTERFACE_MODE_TBI;
1301 }
1302
1303 if (ecntrl & ECNTRL_REDUCED_MODE) {
1304 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1305 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001306 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001307 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001308
1309 /*
1310 * This isn't autodetected right now, so it must
1311 * be set by the device tree or platform code.
1312 */
1313 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1314 return PHY_INTERFACE_MODE_RGMII_ID;
1315
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001316 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001317 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001318 }
1319
Andy Flemingb31a1d82008-12-16 15:29:15 -08001320 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001321 return PHY_INTERFACE_MODE_GMII;
1322
1323 return PHY_INTERFACE_MODE_MII;
1324}
1325
1326
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001327/* Initializes driver's PHY state, and attaches to the PHY.
1328 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 */
1330static int init_phy(struct net_device *dev)
1331{
1332 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001333 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001334 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001335 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001336 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 priv->oldlink = 0;
1339 priv->oldspeed = 0;
1340 priv->oldduplex = -1;
1341
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001342 interface = gfar_get_interface(dev);
1343
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001344 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1345 interface);
1346 if (!priv->phydev)
1347 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1348 interface);
1349 if (!priv->phydev) {
1350 dev_err(&dev->dev, "could not attach to PHY\n");
1351 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Kapil Junejad3c12872007-05-11 18:25:11 -05001354 if (interface == PHY_INTERFACE_MODE_SGMII)
1355 gfar_configure_serdes(dev);
1356
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001357 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001358 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1359 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
1363
Paul Gortmakerd0313582008-04-17 00:08:10 -04001364/*
1365 * Initialize TBI PHY interface for communicating with the
1366 * SERDES lynx PHY on the chip. We communicate with this PHY
1367 * through the MDIO bus on each controller, treating it as a
1368 * "normal" PHY at the address found in the TBIPA register. We assume
1369 * that the TBIPA register is valid. Either the MDIO bus code will set
1370 * it to a value that doesn't conflict with other PHYs on the bus, or the
1371 * value doesn't matter, as there are no other PHYs on the bus.
1372 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001373static void gfar_configure_serdes(struct net_device *dev)
1374{
1375 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001376 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001377
Grant Likelyfe192a42009-04-25 12:53:12 +00001378 if (!priv->tbi_node) {
1379 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1380 "device tree specify a tbi-handle\n");
1381 return;
1382 }
1383
1384 tbiphy = of_phy_find_device(priv->tbi_node);
1385 if (!tbiphy) {
1386 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001387 return;
1388 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001389
Andy Flemingb31a1d82008-12-16 15:29:15 -08001390 /*
1391 * If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001392 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1393 * everything for us? Resetting it takes the link down and requires
1394 * several seconds for it to come back.
1395 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001396 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001397 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001398
Paul Gortmakerd0313582008-04-17 00:08:10 -04001399 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001400 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001401
Grant Likelyfe192a42009-04-25 12:53:12 +00001402 phy_write(tbiphy, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -05001403 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1404 ADVERTISE_1000XPSE_ASYM);
1405
Grant Likelyfe192a42009-04-25 12:53:12 +00001406 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -05001407 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1408}
1409
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410static void init_registers(struct net_device *dev)
1411{
1412 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001413 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001414 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001416 for (i = 0; i < priv->num_grps; i++) {
1417 regs = priv->gfargrp[i].regs;
1418 /* Clear IEVENT */
1419 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001421 /* Initialize IMASK */
1422 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001425 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001427 gfar_write(&regs->igaddr0, 0);
1428 gfar_write(&regs->igaddr1, 0);
1429 gfar_write(&regs->igaddr2, 0);
1430 gfar_write(&regs->igaddr3, 0);
1431 gfar_write(&regs->igaddr4, 0);
1432 gfar_write(&regs->igaddr5, 0);
1433 gfar_write(&regs->igaddr6, 0);
1434 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001436 gfar_write(&regs->gaddr0, 0);
1437 gfar_write(&regs->gaddr1, 0);
1438 gfar_write(&regs->gaddr2, 0);
1439 gfar_write(&regs->gaddr3, 0);
1440 gfar_write(&regs->gaddr4, 0);
1441 gfar_write(&regs->gaddr5, 0);
1442 gfar_write(&regs->gaddr6, 0);
1443 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001446 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001447 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001450 gfar_write(&regs->rmon.cam1, 0xffffffff);
1451 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 }
1453
1454 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001455 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001458 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459}
1460
Kumar Gala0bbaf062005-06-20 10:54:21 -05001461
1462/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001463static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464{
1465 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001466 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001468 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001470 for (i = 0; i < priv->num_grps; i++) {
1471 regs = priv->gfargrp[i].regs;
1472 /* Mask all interrupts */
1473 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001475 /* Clear all interrupts */
1476 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001479 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001481 tempval = gfar_read(&regs->dmactrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1483 != (DMACTRL_GRS | DMACTRL_GTS)) {
1484 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001485 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001487 while (!(gfar_read(&regs->ievent) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 (IEVENT_GRSC | IEVENT_GTSC)))
1489 cpu_relax();
1490 }
Scott Woodd87eb122008-07-11 18:04:45 -05001491}
Scott Woodd87eb122008-07-11 18:04:45 -05001492
1493/* Halt the receive and transmit queues */
1494void gfar_halt(struct net_device *dev)
1495{
1496 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001497 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001498 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
Scott Wood2a54adc2008-08-12 15:10:46 -05001500 gfar_halt_nodisable(dev);
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 /* Disable Rx and Tx */
1503 tempval = gfar_read(&regs->maccfg1);
1504 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1505 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001506}
1507
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001508static void free_grp_irqs(struct gfar_priv_grp *grp)
1509{
1510 free_irq(grp->interruptError, grp);
1511 free_irq(grp->interruptTransmit, grp);
1512 free_irq(grp->interruptReceive, grp);
1513}
1514
Kumar Gala0bbaf062005-06-20 10:54:21 -05001515void stop_gfar(struct net_device *dev)
1516{
1517 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001518 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001519 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001520
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001521 phy_stop(priv->phydev);
1522
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001523
Kumar Gala0bbaf062005-06-20 10:54:21 -05001524 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001525 local_irq_save(flags);
1526 lock_tx_qs(priv);
1527 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001528
Kumar Gala0bbaf062005-06-20 10:54:21 -05001529 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001531 unlock_rx_qs(priv);
1532 unlock_tx_qs(priv);
1533 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001536 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001537 for (i = 0; i < priv->num_grps; i++)
1538 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001540 for (i = 0; i < priv->num_grps; i++)
1541 free_irq(priv->gfargrp[i].interruptTransmit,
1542 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 }
1544
1545 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001548static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001551 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001552 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001554 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001556 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1557 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001558 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Kumar Gala48268572009-03-18 23:28:22 -07001560 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001561 txbdp->length, DMA_TO_DEVICE);
1562 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001563 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1564 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001565 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001566 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001567 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001569 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001570 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1571 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001573 kfree(tx_queue->tx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001574}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001576static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1577{
1578 struct rxbd8 *rxbdp;
1579 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1580 int i;
1581
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001582 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001584 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1585 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001586 dma_unmap_single(&priv->ofdev->dev,
1587 rxbdp->bufPtr, priv->rx_buffer_size,
Anton Vorontsove69edd22009-10-12 06:00:30 +00001588 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001589 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1590 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001592 rxbdp->lstatus = 0;
1593 rxbdp->bufPtr = 0;
1594 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001596 kfree(rx_queue->rx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001597}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001598
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001599/* If there are any tx skbs or rx skbs still around, free them.
1600 * Then free tx_skbuff and rx_skbuff */
1601static void free_skb_resources(struct gfar_private *priv)
1602{
1603 struct gfar_priv_tx_q *tx_queue = NULL;
1604 struct gfar_priv_rx_q *rx_queue = NULL;
1605 int i;
1606
1607 /* Go through all the buffer descriptors and free their data buffers */
1608 for (i = 0; i < priv->num_tx_queues; i++) {
1609 tx_queue = priv->tx_queue[i];
1610 if(!tx_queue->tx_skbuff)
1611 free_skb_tx_queue(tx_queue);
1612 }
1613
1614 for (i = 0; i < priv->num_rx_queues; i++) {
1615 rx_queue = priv->rx_queue[i];
1616 if(!rx_queue->rx_skbuff)
1617 free_skb_rx_queue(rx_queue);
1618 }
1619
1620 dma_free_coherent(&priv->ofdev->dev,
1621 sizeof(struct txbd8) * priv->total_tx_ring_size +
1622 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1623 priv->tx_queue[0]->tx_bd_base,
1624 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625}
1626
Kumar Gala0bbaf062005-06-20 10:54:21 -05001627void gfar_start(struct net_device *dev)
1628{
1629 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001630 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001631 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001632 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001633
1634 /* Enable Rx and Tx in MACCFG1 */
1635 tempval = gfar_read(&regs->maccfg1);
1636 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1637 gfar_write(&regs->maccfg1, tempval);
1638
1639 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001640 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001641 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001642 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001643
Kumar Gala0bbaf062005-06-20 10:54:21 -05001644 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001645 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001646 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001647 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001648
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001649 for (i = 0; i < priv->num_grps; i++) {
1650 regs = priv->gfargrp[i].regs;
1651 /* Clear THLT/RHLT, so that the DMA starts polling now */
1652 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1653 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1654 /* Unmask the interrupts we look for */
1655 gfar_write(&regs->imask, IMASK_DEFAULT);
1656 }
Dai Haruki12dea572008-12-16 15:30:20 -08001657
1658 dev->trans_start = jiffies;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001659}
1660
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001661void gfar_configure_coalescing(struct gfar_private *priv,
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001662 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001664 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001665 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001666 int i = 0;
1667
1668 /* Backward compatible case ---- even if we enable
1669 * multiple queues, there's only single reg to program
1670 */
1671 gfar_write(&regs->txic, 0);
1672 if(likely(priv->tx_queue[0]->txcoalescing))
1673 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1674
1675 gfar_write(&regs->rxic, 0);
1676 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1677 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1678
1679 if (priv->mode == MQ_MG_MODE) {
1680 baddr = &regs->txic0;
1681 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1682 if (likely(priv->tx_queue[i]->txcoalescing)) {
1683 gfar_write(baddr + i, 0);
1684 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1685 }
1686 }
1687
1688 baddr = &regs->rxic0;
1689 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1690 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1691 gfar_write(baddr + i, 0);
1692 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1693 }
1694 }
1695 }
1696}
1697
1698static int register_grp_irqs(struct gfar_priv_grp *grp)
1699{
1700 struct gfar_private *priv = grp->priv;
1701 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001702 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 /* If the device has multiple interrupts, register for
1705 * them. Otherwise, only register for the one */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001706 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001707 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 * Transmit, and Receive */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001709 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1710 grp->int_name_er,grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001711 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001712 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1713 dev->name, grp->interruptError);
1714
1715 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 }
1717
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001718 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1719 0, grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001720 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001721 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1722 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 goto tx_irq_fail;
1724 }
1725
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001726 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1727 grp->int_name_rx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001728 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001729 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1730 dev->name, grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 goto rx_irq_fail;
1732 }
1733 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001734 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1735 grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001736 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001737 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1738 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 goto err_irq_fail;
1740 }
1741 }
1742
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001743 return 0;
1744
1745rx_irq_fail:
1746 free_irq(grp->interruptTransmit, grp);
1747tx_irq_fail:
1748 free_irq(grp->interruptError, grp);
1749err_irq_fail:
1750 return err;
1751
1752}
1753
1754/* Bring the controller up and running */
1755int startup_gfar(struct net_device *ndev)
1756{
1757 struct gfar_private *priv = netdev_priv(ndev);
1758 struct gfar __iomem *regs = NULL;
1759 int err, i, j;
1760
1761 for (i = 0; i < priv->num_grps; i++) {
1762 regs= priv->gfargrp[i].regs;
1763 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1764 }
1765
1766 regs= priv->gfargrp[0].regs;
1767 err = gfar_alloc_skb_resources(ndev);
1768 if (err)
1769 return err;
1770
1771 gfar_init_mac(ndev);
1772
1773 for (i = 0; i < priv->num_grps; i++) {
1774 err = register_grp_irqs(&priv->gfargrp[i]);
1775 if (err) {
1776 for (j = 0; j < i; j++)
1777 free_grp_irqs(&priv->gfargrp[j]);
1778 goto irq_fail;
1779 }
1780 }
1781
Andy Fleming7f7f5312005-11-11 12:38:59 -06001782 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001783 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001785 phy_start(priv->phydev);
1786
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001787 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 return 0;
1790
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001791irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001792 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 return err;
1794}
1795
1796/* Called when something needs to use the ethernet device */
1797/* Returns 0 for success. */
1798static int gfar_enet_open(struct net_device *dev)
1799{
Li Yang94e8cc32007-10-12 21:53:51 +08001800 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 int err;
1802
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001803 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001804
Andy Fleming0fd56bb2009-02-04 16:43:16 -08001805 skb_queue_head_init(&priv->rx_recycle);
1806
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 /* Initialize a bunch of registers */
1808 init_registers(dev);
1809
1810 gfar_set_mac_address(dev);
1811
1812 err = init_phy(dev);
1813
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001814 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001815 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001817 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
1819 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001820 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001821 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001822 return err;
1823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001825 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001827 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 return err;
1830}
1831
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001832static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001833{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001834 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001835
1836 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001837
Kumar Gala0bbaf062005-06-20 10:54:21 -05001838 return fcb;
1839}
1840
1841static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1842{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001843 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001844
1845 /* If we're here, it's a IP packet with a TCP or UDP
1846 * payload. We set it to checksum, using a pseudo-header
1847 * we provide
1848 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001849 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001850
Andy Fleming7f7f5312005-11-11 12:38:59 -06001851 /* Tell the controller what the protocol is */
1852 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001853 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001854 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03001855 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06001856 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05001857 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001858
1859 /* l3os is the distance between the start of the
1860 * frame (skb->data) and the start of the IP hdr.
1861 * l4os is the distance between the start of the
1862 * l3 hdr and the l4 hdr */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001863 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001864 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001865
Andy Fleming7f7f5312005-11-11 12:38:59 -06001866 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001867}
1868
Andy Fleming7f7f5312005-11-11 12:38:59 -06001869void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001870{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001871 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001872 fcb->vlctl = vlan_tx_tag_get(skb);
1873}
1874
Dai Haruki4669bc92008-12-17 16:51:04 -08001875static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1876 struct txbd8 *base, int ring_size)
1877{
1878 struct txbd8 *new_bd = bdp + stride;
1879
1880 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1881}
1882
1883static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1884 int ring_size)
1885{
1886 return skip_txbd(bdp, 1, base, ring_size);
1887}
1888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889/* This is called by the kernel when a frame is ready for transmission. */
1890/* It is pointed to by the dev->hard_start_xmit function pointer */
1891static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1892{
1893 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001894 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001895 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001896 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001897 struct txfcb *fcb = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08001898 struct txbd8 *txbdp, *txbdp_start, *base;
Dai Haruki5a5efed2008-12-16 15:34:50 -08001899 u32 lstatus;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001900 int i, rq = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08001901 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05001902 unsigned long flags;
Dai Haruki4669bc92008-12-17 16:51:04 -08001903 unsigned int nr_frags, length;
1904
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001905
1906 rq = skb->queue_mapping;
1907 tx_queue = priv->tx_queue[rq];
1908 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001909 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001910 regs = tx_queue->grp->regs;
Dai Haruki4669bc92008-12-17 16:51:04 -08001911
Li Yang5b28bea2009-03-27 15:54:30 -07001912 /* make space for additional header when fcb is needed */
1913 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1914 (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1915 (skb_headroom(skb) < GMAC_FCB_LEN)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001916 struct sk_buff *skb_new;
1917
1918 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1919 if (!skb_new) {
1920 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07001921 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001922 return NETDEV_TX_OK;
1923 }
1924 kfree_skb(skb);
1925 skb = skb_new;
1926 }
1927
Dai Haruki4669bc92008-12-17 16:51:04 -08001928 /* total number of fragments in the SKB */
1929 nr_frags = skb_shinfo(skb)->nr_frags;
1930
Dai Haruki4669bc92008-12-17 16:51:04 -08001931 /* check if there is space to queue this packet */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001932 if ((nr_frags+1) > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001933 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001934 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08001935 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08001936 return NETDEV_TX_BUSY;
1937 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 /* Update transmit stats */
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001940 dev->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001942 txbdp = txbdp_start = tx_queue->cur_tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
Dai Haruki4669bc92008-12-17 16:51:04 -08001944 if (nr_frags == 0) {
1945 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1946 } else {
1947 /* Place the fragment addresses and lengths into the TxBDs */
1948 for (i = 0; i < nr_frags; i++) {
1949 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001950 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
Dai Haruki4669bc92008-12-17 16:51:04 -08001952 length = skb_shinfo(skb)->frags[i].size;
1953
1954 lstatus = txbdp->lstatus | length |
1955 BD_LFLAG(TXBD_READY);
1956
1957 /* Handle the last BD specially */
1958 if (i == nr_frags - 1)
1959 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1960
Kumar Gala48268572009-03-18 23:28:22 -07001961 bufaddr = dma_map_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08001962 skb_shinfo(skb)->frags[i].page,
1963 skb_shinfo(skb)->frags[i].page_offset,
1964 length,
1965 DMA_TO_DEVICE);
1966
1967 /* set the TxBD length and buffer pointer */
1968 txbdp->bufPtr = bufaddr;
1969 txbdp->lstatus = lstatus;
1970 }
1971
1972 lstatus = txbdp_start->lstatus;
1973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
Kumar Gala0bbaf062005-06-20 10:54:21 -05001975 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08001976 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001977 fcb = gfar_add_fcb(skb);
1978 lstatus |= BD_LFLAG(TXBD_TOE);
1979 gfar_tx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001980 }
1981
Dai Haruki77ecaf22008-12-16 15:30:48 -08001982 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001983 if (unlikely(NULL == fcb)) {
1984 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08001985 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06001986 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001987
1988 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001989 }
1990
Dai Haruki4669bc92008-12-17 16:51:04 -08001991 /* setup the TxBD length and buffer pointer for the first BD */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001992 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
Kumar Gala48268572009-03-18 23:28:22 -07001993 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Dai Haruki4669bc92008-12-17 16:51:04 -08001994 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Dai Haruki4669bc92008-12-17 16:51:04 -08001996 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Dai Haruki4669bc92008-12-17 16:51:04 -08001998 /*
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00001999 * We can work in parallel with gfar_clean_tx_ring(), except
2000 * when modifying num_txbdfree. Note that we didn't grab the lock
2001 * when we were reading the num_txbdfree and checking for available
2002 * space, that's because outside of this function it can only grow,
2003 * and once we've got needed space, it cannot suddenly disappear.
2004 *
2005 * The lock also protects us from gfar_error(), which can modify
2006 * regs->tstat and thus retrigger the transfers, which is why we
2007 * also must grab the lock before setting ready bit for the first
2008 * to be transmitted BD.
2009 */
2010 spin_lock_irqsave(&tx_queue->txlock, flags);
2011
2012 /*
Dai Haruki4669bc92008-12-17 16:51:04 -08002013 * The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002014 * semantics (it requires synchronization between cacheable and
2015 * uncacheable mappings, which eieio doesn't provide and which we
2016 * don't need), thus requiring a more expensive sync instruction. At
2017 * some point, the set of architecture-independent barrier functions
2018 * should be expanded to include weaker barriers.
2019 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002020 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002021
Dai Haruki4669bc92008-12-17 16:51:04 -08002022 txbdp_start->lstatus = lstatus;
2023
2024 /* Update the current skb pointer to the next entry we will use
2025 * (wrapping if necessary) */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002026 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2027 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002028
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002029 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002030
2031 /* reduce TxBD free count */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002032 tx_queue->num_txbdfree -= (nr_frags + 1);
Dai Haruki4669bc92008-12-17 16:51:04 -08002033
2034 dev->trans_start = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 /* If the next BD still needs to be cleaned up, then the bds
2037 are full. We need to tell the kernel to stop sending us stuff. */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002038 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002039 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002041 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 }
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002045 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
2047 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002048 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002050 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051}
2052
2053/* Stops the kernel queue, and halts the controller */
2054static int gfar_close(struct net_device *dev)
2055{
2056 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002057
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002058 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002059
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002060 skb_queue_purge(&priv->rx_recycle);
Sebastian Siewiorab939902008-08-19 21:12:45 +02002061 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 stop_gfar(dev);
2063
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002064 /* Disconnect from the PHY */
2065 phy_disconnect(priv->phydev);
2066 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002068 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
2070 return 0;
2071}
2072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002074static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002076 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
2078 return 0;
2079}
2080
2081
Kumar Gala0bbaf062005-06-20 10:54:21 -05002082/* Enables and disables VLAN insertion/extraction */
2083static void gfar_vlan_rx_register(struct net_device *dev,
2084 struct vlan_group *grp)
2085{
2086 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002087 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002088 unsigned long flags;
2089 u32 tempval;
2090
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002091 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002092 local_irq_save(flags);
2093 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002094
Anton Vorontsovcd1f55a2009-01-26 14:33:23 -08002095 priv->vlgrp = grp;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002096
2097 if (grp) {
2098 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002099 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002100 tempval |= TCTRL_VLINS;
2101
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002102 gfar_write(&regs->tctrl, tempval);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002103
Kumar Gala0bbaf062005-06-20 10:54:21 -05002104 /* Enable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002105 tempval = gfar_read(&regs->rctrl);
Dai Haruki77ecaf22008-12-16 15:30:48 -08002106 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002107 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002108 } else {
2109 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002110 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002111 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002112 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002113
2114 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002115 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002116 tempval &= ~RCTRL_VLEX;
Dai Haruki77ecaf22008-12-16 15:30:48 -08002117 /* If parse is no longer required, then disable parser */
2118 if (tempval & RCTRL_REQ_PARSER)
2119 tempval |= RCTRL_PRSDEP_INIT;
2120 else
2121 tempval &= ~RCTRL_PRSDEP_INIT;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002122 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002123 }
2124
Dai Haruki77ecaf22008-12-16 15:30:48 -08002125 gfar_change_mtu(dev, dev->mtu);
2126
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002127 unlock_rx_qs(priv);
2128 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002129}
2130
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2132{
2133 int tempsize, tempval;
2134 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002135 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002137 int frame_size = new_mtu + ETH_HLEN;
2138
Dai Haruki77ecaf22008-12-16 15:30:48 -08002139 if (priv->vlgrp)
Dai Harukifaa89572008-03-24 10:53:26 -05002140 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002141
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002143 if (netif_msg_drv(priv))
2144 printk(KERN_ERR "%s: Invalid MTU setting\n",
2145 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 return -EINVAL;
2147 }
2148
Dai Haruki77ecaf22008-12-16 15:30:48 -08002149 if (gfar_uses_fcb(priv))
2150 frame_size += GMAC_FCB_LEN;
2151
2152 frame_size += priv->padding;
2153
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 tempsize =
2155 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2156 INCREMENTAL_BUFFER_SIZE;
2157
2158 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06002159 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2161 stop_gfar(dev);
2162
2163 priv->rx_buffer_size = tempsize;
2164
2165 dev->mtu = new_mtu;
2166
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002167 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2168 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
2170 /* If the mtu is larger than the max size for standard
2171 * ethernet frames (ie, a jumbo frame), then set maccfg2
2172 * to allow huge frames, and to check the length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002173 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
2176 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2177 else
2178 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2179
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002180 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
2182 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2183 startup_gfar(dev);
2184
2185 return 0;
2186}
2187
Sebastian Siewiorab939902008-08-19 21:12:45 +02002188/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 * transmitted after a set amount of time.
2190 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002191 * starting over will fix the problem.
2192 */
2193static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002195 struct gfar_private *priv = container_of(work, struct gfar_private,
2196 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002197 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002200 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 stop_gfar(dev);
2202 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002203 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 }
2205
David S. Miller263ba322008-07-15 03:47:41 -07002206 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
2208
Sebastian Siewiorab939902008-08-19 21:12:45 +02002209static void gfar_timeout(struct net_device *dev)
2210{
2211 struct gfar_private *priv = netdev_priv(dev);
2212
2213 dev->stats.tx_errors++;
2214 schedule_work(&priv->reset_task);
2215}
2216
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002218static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002220 struct net_device *dev = tx_queue->dev;
Dai Harukid080cd62008-04-09 19:37:51 -05002221 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002222 struct gfar_priv_rx_q *rx_queue = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002223 struct txbd8 *bdp;
2224 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002225 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002226 struct sk_buff *skb;
2227 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002228 int tx_ring_size = tx_queue->tx_ring_size;
Dai Haruki4669bc92008-12-17 16:51:04 -08002229 int frags = 0;
2230 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002231 int howmany = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002232 u32 lstatus;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002234 rx_queue = priv->rx_queue[tx_queue->qindex];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002235 bdp = tx_queue->dirty_tx;
2236 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002237
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002238 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002239 unsigned long flags;
2240
Dai Haruki4669bc92008-12-17 16:51:04 -08002241 frags = skb_shinfo(skb)->nr_frags;
2242 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
2243
2244 lstatus = lbdp->lstatus;
2245
2246 /* Only clean completed frames */
2247 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2248 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 break;
2250
Kumar Gala48268572009-03-18 23:28:22 -07002251 dma_unmap_single(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002252 bdp->bufPtr,
2253 bdp->length,
2254 DMA_TO_DEVICE);
2255
2256 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2257 bdp = next_txbd(bdp, base, tx_ring_size);
2258
2259 for (i = 0; i < frags; i++) {
Kumar Gala48268572009-03-18 23:28:22 -07002260 dma_unmap_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002261 bdp->bufPtr,
2262 bdp->length,
2263 DMA_TO_DEVICE);
2264 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2265 bdp = next_txbd(bdp, base, tx_ring_size);
2266 }
2267
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002268 /*
2269 * If there's room in the queue (limit it to rx_buffer_size)
2270 * we add this skb back into the pool, if it's the right size
2271 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002272 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002273 skb_recycle_check(skb, priv->rx_buffer_size +
2274 RXBUF_ALIGNMENT))
2275 __skb_queue_head(&priv->rx_recycle, skb);
2276 else
2277 dev_kfree_skb_any(skb);
2278
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002279 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002280
2281 skb_dirtytx = (skb_dirtytx + 1) &
2282 TX_RING_MOD_MASK(tx_ring_size);
2283
Dai Harukid080cd62008-04-09 19:37:51 -05002284 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002285 spin_lock_irqsave(&tx_queue->txlock, flags);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002286 tx_queue->num_txbdfree += frags + 1;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002287 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
Dai Haruki4669bc92008-12-17 16:51:04 -08002290 /* If we freed a buffer, we can restart transmission, if necessary */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002291 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2292 netif_wake_subqueue(dev, tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293
Dai Haruki4669bc92008-12-17 16:51:04 -08002294 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002295 tx_queue->skb_dirtytx = skb_dirtytx;
2296 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Dai Harukid080cd62008-04-09 19:37:51 -05002298 dev->stats.tx_packets += howmany;
2299
2300 return howmany;
2301}
2302
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002303static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002304{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002305 unsigned long flags;
2306
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002307 spin_lock_irqsave(&gfargrp->grplock, flags);
2308 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002309 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002310 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002311 } else {
2312 /*
2313 * Clear IEVENT, so interrupts aren't called again
2314 * because of the packets that have already arrived.
2315 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002316 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002317 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002318 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002319
Dai Haruki8c7396a2008-12-17 16:52:00 -08002320}
2321
Dai Harukid080cd62008-04-09 19:37:51 -05002322/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002323static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002324{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002325 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 return IRQ_HANDLED;
2327}
2328
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002329static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -05002330 struct sk_buff *skb)
2331{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002332 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002333 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002334 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002335
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002336 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2337 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002338 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002339}
2340
2341
2342struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002344 unsigned int alignamount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 struct gfar_private *priv = netdev_priv(dev);
2346 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002348 skb = __skb_dequeue(&priv->rx_recycle);
2349 if (!skb)
2350 skb = netdev_alloc_skb(dev,
2351 priv->rx_buffer_size + RXBUF_ALIGNMENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352
Andy Fleming815b97c2008-04-22 17:18:29 -05002353 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 return NULL;
2355
Andy Fleming7f7f5312005-11-11 12:38:59 -06002356 alignamount = RXBUF_ALIGNMENT -
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002357 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
Andy Fleming7f7f5312005-11-11 12:38:59 -06002358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 /* We need the data buffer to be aligned properly. We will reserve
2360 * as many bytes as needed to align the data properly
2361 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002362 skb_reserve(skb, alignamount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 return skb;
2365}
2366
Li Yang298e1a92007-10-16 14:18:13 +08002367static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368{
Li Yang298e1a92007-10-16 14:18:13 +08002369 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002370 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 struct gfar_extra_stats *estats = &priv->extra_stats;
2372
2373 /* If the packet was truncated, none of the other errors
2374 * matter */
2375 if (status & RXBD_TRUNCATED) {
2376 stats->rx_length_errors++;
2377
2378 estats->rx_trunc++;
2379
2380 return;
2381 }
2382 /* Count the errors, if there were any */
2383 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2384 stats->rx_length_errors++;
2385
2386 if (status & RXBD_LARGE)
2387 estats->rx_large++;
2388 else
2389 estats->rx_short++;
2390 }
2391 if (status & RXBD_NONOCTET) {
2392 stats->rx_frame_errors++;
2393 estats->rx_nonoctet++;
2394 }
2395 if (status & RXBD_CRCERR) {
2396 estats->rx_crcerr++;
2397 stats->rx_crc_errors++;
2398 }
2399 if (status & RXBD_OVERRUN) {
2400 estats->rx_overrun++;
2401 stats->rx_crc_errors++;
2402 }
2403}
2404
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002405irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002407 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 return IRQ_HANDLED;
2409}
2410
Kumar Gala0bbaf062005-06-20 10:54:21 -05002411static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2412{
2413 /* If valid headers were found, and valid sums
2414 * were verified, then we tell the kernel that no
2415 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002416 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002417 skb->ip_summed = CHECKSUM_UNNECESSARY;
2418 else
2419 skb->ip_summed = CHECKSUM_NONE;
2420}
2421
2422
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423/* gfar_process_frame() -- handle one incoming packet if skb
2424 * isn't NULL. */
2425static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Dai Haruki2c2db482008-12-16 15:31:15 -08002426 int amount_pull)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427{
2428 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002429 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
Dai Haruki2c2db482008-12-16 15:31:15 -08002431 int ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002432
Dai Haruki2c2db482008-12-16 15:31:15 -08002433 /* fcb is at the beginning if exists */
2434 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
Dai Haruki2c2db482008-12-16 15:31:15 -08002436 /* Remove the FCB from the skb */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002437 skb_set_queue_mapping(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002438 /* Remove the padded bytes, if there are any */
2439 if (amount_pull)
2440 skb_pull(skb, amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002441
Dai Haruki2c2db482008-12-16 15:31:15 -08002442 if (priv->rx_csum_enable)
2443 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002444
Dai Haruki2c2db482008-12-16 15:31:15 -08002445 /* Tell the skb what kind of packet this is */
2446 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002447
Dai Haruki2c2db482008-12-16 15:31:15 -08002448 /* Send the packet up the stack */
2449 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2450 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2451 else
2452 ret = netif_receive_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
Dai Haruki2c2db482008-12-16 15:31:15 -08002454 if (NET_RX_DROP == ret)
2455 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
2457 return 0;
2458}
2459
2460/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05002461 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 * of frames handled
2463 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002464int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002466 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002467 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002469 int pkt_len;
2470 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 int howmany = 0;
2472 struct gfar_private *priv = netdev_priv(dev);
2473
2474 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002475 bdp = rx_queue->cur_rx;
2476 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
Dai Haruki2c2db482008-12-16 15:31:15 -08002478 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
2479 priv->padding;
2480
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002482 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05002483 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002484
2485 /* Add another skb for the future */
2486 newskb = gfar_new_skb(dev);
2487
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002488 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
Kumar Gala48268572009-03-18 23:28:22 -07002490 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Andy Fleming81183052008-11-12 10:07:11 -06002491 priv->rx_buffer_size, DMA_FROM_DEVICE);
2492
Andy Fleming815b97c2008-04-22 17:18:29 -05002493 /* We drop the frame if we failed to allocate a new buffer */
2494 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2495 bdp->status & RXBD_ERR)) {
2496 count_errors(bdp->status, dev);
2497
2498 if (unlikely(!newskb))
2499 newskb = skb;
Lennert Buytenhek4e2fd552009-05-25 00:42:34 -07002500 else if (skb) {
2501 /*
2502 * We need to reset ->data to what it
2503 * was before gfar_new_skb() re-aligned
2504 * it to an RXBUF_ALIGNMENT boundary
2505 * before we put the skb back on the
2506 * recycle list.
2507 */
2508 skb->data = skb->head + NET_SKB_PAD;
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002509 __skb_queue_head(&priv->rx_recycle, skb);
Lennert Buytenhek4e2fd552009-05-25 00:42:34 -07002510 }
Andy Fleming815b97c2008-04-22 17:18:29 -05002511 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 /* Increment the number of packets */
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002513 dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 howmany++;
2515
Dai Haruki2c2db482008-12-16 15:31:15 -08002516 if (likely(skb)) {
2517 pkt_len = bdp->length - ETH_FCS_LEN;
2518 /* Remove the FCS from the packet length */
2519 skb_put(skb, pkt_len);
2520 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
Dai Haruki2c2db482008-12-16 15:31:15 -08002522 gfar_process_frame(dev, skb, amount_pull);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
Dai Haruki2c2db482008-12-16 15:31:15 -08002524 } else {
2525 if (netif_msg_rx_err(priv))
2526 printk(KERN_WARNING
2527 "%s: Missing skb!\n", dev->name);
2528 dev->stats.rx_dropped++;
2529 priv->extra_stats.rx_skbmissing++;
2530 }
2531
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 }
2533
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002534 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
Andy Fleming815b97c2008-04-22 17:18:29 -05002536 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002537 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002540 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
2542 /* update to point at the next skb */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002543 rx_queue->skb_currx =
2544 (rx_queue->skb_currx + 1) &
2545 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 }
2547
2548 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002549 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 return howmany;
2552}
2553
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002554static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002556 struct gfar_priv_grp *gfargrp = container_of(napi,
2557 struct gfar_priv_grp, napi);
2558 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002559 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002560 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002561 struct gfar_priv_rx_q *rx_queue = NULL;
2562 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002563 int tx_cleaned = 0, i, left_over_budget = budget;
2564 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002565 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002566
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002567 num_queues = gfargrp->num_rx_queues;
2568 budget_per_queue = budget/num_queues;
2569
Dai Haruki8c7396a2008-12-17 16:52:00 -08002570 /* Clear IEVENT, so interrupts aren't called again
2571 * because of the packets that have already arrived */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002572 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002573
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002574 while (num_queues && left_over_budget) {
2575
2576 budget_per_queue = left_over_budget/num_queues;
2577 left_over_budget = 0;
2578
2579 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2580 if (test_bit(i, &serviced_queues))
2581 continue;
2582 rx_queue = priv->rx_queue[i];
2583 tx_queue = priv->tx_queue[rx_queue->qindex];
2584
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002585 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002586 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2587 budget_per_queue);
2588 rx_cleaned += rx_cleaned_per_queue;
2589 if(rx_cleaned_per_queue < budget_per_queue) {
2590 left_over_budget = left_over_budget +
2591 (budget_per_queue - rx_cleaned_per_queue);
2592 set_bit(i, &serviced_queues);
2593 num_queues--;
2594 }
2595 }
Dai Harukid080cd62008-04-09 19:37:51 -05002596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597
Andy Fleming42199882008-12-17 16:52:30 -08002598 if (tx_cleaned)
2599 return budget;
2600
2601 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002602 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
2604 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002605 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002607 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
2609 /* If we are coalescing interrupts, update the timer */
2610 /* Otherwise, clear it */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002611 gfar_configure_coalescing(priv,
2612 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 }
2614
Andy Fleming42199882008-12-17 16:52:30 -08002615 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002618#ifdef CONFIG_NET_POLL_CONTROLLER
2619/*
2620 * Polling 'interrupt' - used by things like netconsole to send skbs
2621 * without having to re-enable interrupts. It's not called while
2622 * the interrupt routine is executing.
2623 */
2624static void gfar_netpoll(struct net_device *dev)
2625{
2626 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002627 int i = 0;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002628
2629 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002630 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002631 for (i = 0; i < priv->num_grps; i++) {
2632 disable_irq(priv->gfargrp[i].interruptTransmit);
2633 disable_irq(priv->gfargrp[i].interruptReceive);
2634 disable_irq(priv->gfargrp[i].interruptError);
2635 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2636 &priv->gfargrp[i]);
2637 enable_irq(priv->gfargrp[i].interruptError);
2638 enable_irq(priv->gfargrp[i].interruptReceive);
2639 enable_irq(priv->gfargrp[i].interruptTransmit);
2640 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002641 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002642 for (i = 0; i < priv->num_grps; i++) {
2643 disable_irq(priv->gfargrp[i].interruptTransmit);
2644 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2645 &priv->gfargrp[i]);
2646 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002647 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002648 }
2649}
2650#endif
2651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002653static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002655 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
2657 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002658 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002661 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002662 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663
2664 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002665 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002666 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002668 /* Check for errors */
2669 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002670 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
2672 return IRQ_HANDLED;
2673}
2674
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675/* Called every time the controller might need to be made
2676 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002677 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 * function converts those variables into the appropriate
2679 * register values, and can bring down the device if needed.
2680 */
2681static void adjust_link(struct net_device *dev)
2682{
2683 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002684 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002685 unsigned long flags;
2686 struct phy_device *phydev = priv->phydev;
2687 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002689 local_irq_save(flags);
2690 lock_tx_qs(priv);
2691
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002692 if (phydev->link) {
2693 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002694 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002695
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 /* Now we make sure that we can be in full duplex mode.
2697 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002698 if (phydev->duplex != priv->oldduplex) {
2699 new_state = 1;
2700 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002702 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002705 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 }
2707
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002708 if (phydev->speed != priv->oldspeed) {
2709 new_state = 1;
2710 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 tempval =
2713 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002714
2715 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 break;
2717 case 100:
2718 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 tempval =
2720 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002721
2722 /* Reduced mode distinguishes
2723 * between 10 and 100 */
2724 if (phydev->speed == SPEED_100)
2725 ecntrl |= ECNTRL_R100;
2726 else
2727 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 break;
2729 default:
Kumar Gala0bbaf062005-06-20 10:54:21 -05002730 if (netif_msg_link(priv))
2731 printk(KERN_WARNING
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002732 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2733 dev->name, phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 break;
2735 }
2736
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002737 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 }
2739
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002740 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002741 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002742
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002744 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002747 } else if (priv->oldlink) {
2748 new_state = 1;
2749 priv->oldlink = 0;
2750 priv->oldspeed = 0;
2751 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002754 if (new_state && netif_msg_link(priv))
2755 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002756 unlock_tx_qs(priv);
2757 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002758}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759
2760/* Update the hash table based on the current list of multicast
2761 * addresses we subscribe to. Also, change the promiscuity of
2762 * the device based on the flags (this function is called
2763 * whenever dev->flags is changed */
2764static void gfar_set_multi(struct net_device *dev)
2765{
2766 struct dev_mc_list *mc_ptr;
2767 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002768 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 u32 tempval;
2770
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002771 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 /* Set RCTRL to PROM */
2773 tempval = gfar_read(&regs->rctrl);
2774 tempval |= RCTRL_PROM;
2775 gfar_write(&regs->rctrl, tempval);
2776 } else {
2777 /* Set RCTRL to not PROM */
2778 tempval = gfar_read(&regs->rctrl);
2779 tempval &= ~(RCTRL_PROM);
2780 gfar_write(&regs->rctrl, tempval);
2781 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002782
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002783 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002785 gfar_write(&regs->igaddr0, 0xffffffff);
2786 gfar_write(&regs->igaddr1, 0xffffffff);
2787 gfar_write(&regs->igaddr2, 0xffffffff);
2788 gfar_write(&regs->igaddr3, 0xffffffff);
2789 gfar_write(&regs->igaddr4, 0xffffffff);
2790 gfar_write(&regs->igaddr5, 0xffffffff);
2791 gfar_write(&regs->igaddr6, 0xffffffff);
2792 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 gfar_write(&regs->gaddr0, 0xffffffff);
2794 gfar_write(&regs->gaddr1, 0xffffffff);
2795 gfar_write(&regs->gaddr2, 0xffffffff);
2796 gfar_write(&regs->gaddr3, 0xffffffff);
2797 gfar_write(&regs->gaddr4, 0xffffffff);
2798 gfar_write(&regs->gaddr5, 0xffffffff);
2799 gfar_write(&regs->gaddr6, 0xffffffff);
2800 gfar_write(&regs->gaddr7, 0xffffffff);
2801 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002802 int em_num;
2803 int idx;
2804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002806 gfar_write(&regs->igaddr0, 0x0);
2807 gfar_write(&regs->igaddr1, 0x0);
2808 gfar_write(&regs->igaddr2, 0x0);
2809 gfar_write(&regs->igaddr3, 0x0);
2810 gfar_write(&regs->igaddr4, 0x0);
2811 gfar_write(&regs->igaddr5, 0x0);
2812 gfar_write(&regs->igaddr6, 0x0);
2813 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 gfar_write(&regs->gaddr0, 0x0);
2815 gfar_write(&regs->gaddr1, 0x0);
2816 gfar_write(&regs->gaddr2, 0x0);
2817 gfar_write(&regs->gaddr3, 0x0);
2818 gfar_write(&regs->gaddr4, 0x0);
2819 gfar_write(&regs->gaddr5, 0x0);
2820 gfar_write(&regs->gaddr6, 0x0);
2821 gfar_write(&regs->gaddr7, 0x0);
2822
Andy Fleming7f7f5312005-11-11 12:38:59 -06002823 /* If we have extended hash tables, we need to
2824 * clear the exact match registers to prepare for
2825 * setting them */
2826 if (priv->extended_hash) {
2827 em_num = GFAR_EM_NUM + 1;
2828 gfar_clear_exact_match(dev);
2829 idx = 1;
2830 } else {
2831 idx = 0;
2832 em_num = 0;
2833 }
2834
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002835 if (dev->mc_count == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 return;
2837
2838 /* Parse the list, and set the appropriate bits */
2839 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002840 if (idx < em_num) {
2841 gfar_set_mac_for_addr(dev, idx,
2842 mc_ptr->dmi_addr);
2843 idx++;
2844 } else
2845 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 }
2847 }
2848
2849 return;
2850}
2851
Andy Fleming7f7f5312005-11-11 12:38:59 -06002852
2853/* Clears each of the exact match registers to zero, so they
2854 * don't interfere with normal reception */
2855static void gfar_clear_exact_match(struct net_device *dev)
2856{
2857 int idx;
2858 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2859
2860 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2861 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2862}
2863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864/* Set the appropriate hash bit for the given addr */
2865/* The algorithm works like so:
2866 * 1) Take the Destination Address (ie the multicast address), and
2867 * do a CRC on it (little endian), and reverse the bits of the
2868 * result.
2869 * 2) Use the 8 most significant bits as a hash into a 256-entry
2870 * table. The table is controlled through 8 32-bit registers:
2871 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2872 * gaddr7. This means that the 3 most significant bits in the
2873 * hash index which gaddr register to use, and the 5 other bits
2874 * indicate which bit (assuming an IBM numbering scheme, which
2875 * for PowerPC (tm) is usually the case) in the register holds
2876 * the entry. */
2877static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2878{
2879 u32 tempval;
2880 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 u32 result = ether_crc(MAC_ADDR_LEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002882 int width = priv->hash_width;
2883 u8 whichbit = (result >> (32 - width)) & 0x1f;
2884 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 u32 value = (1 << (31-whichbit));
2886
Kumar Gala0bbaf062005-06-20 10:54:21 -05002887 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002889 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890
2891 return;
2892}
2893
Andy Fleming7f7f5312005-11-11 12:38:59 -06002894
2895/* There are multiple MAC Address register pairs on some controllers
2896 * This function sets the numth pair to a given address
2897 */
2898static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2899{
2900 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002901 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002902 int idx;
2903 char tmpbuf[MAC_ADDR_LEN];
2904 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002905 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002906
2907 macptr += num*2;
2908
2909 /* Now copy it into the mac registers backwards, cuz */
2910 /* little endian is silly */
2911 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2912 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2913
2914 gfar_write(macptr, *((u32 *) (tmpbuf)));
2915
2916 tempval = *((u32 *) (tmpbuf + 4));
2917
2918 gfar_write(macptr+1, tempval);
2919}
2920
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002922static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002924 struct gfar_priv_grp *gfargrp = grp_id;
2925 struct gfar __iomem *regs = gfargrp->regs;
2926 struct gfar_private *priv= gfargrp->priv;
2927 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
2929 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002930 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002933 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05002934
2935 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002936 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05002937 (events & IEVENT_MAG))
2938 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939
2940 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002941 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2942 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002943 dev->name, events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
2945 /* Update the error counters */
2946 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002947 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
2949 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002950 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002952 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00002954 unsigned long flags;
2955
Kumar Gala0bbaf062005-06-20 10:54:21 -05002956 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002957 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2958 "packet dropped.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002959 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 priv->extra_stats.tx_underrun++;
2961
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00002962 local_irq_save(flags);
2963 lock_tx_qs(priv);
2964
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002966 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00002967
2968 unlock_tx_qs(priv);
2969 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002971 if (netif_msg_tx_err(priv))
2972 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 }
2974 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002975 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 priv->extra_stats.rx_bsy++;
2977
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002978 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
Kumar Gala0bbaf062005-06-20 10:54:21 -05002980 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002981 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002982 dev->name, gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 }
2984 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002985 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 priv->extra_stats.rx_babr++;
2987
Kumar Gala0bbaf062005-06-20 10:54:21 -05002988 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002989 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 }
2991 if (events & IEVENT_EBERR) {
2992 priv->extra_stats.eberr++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002993 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002994 printk(KERN_DEBUG "%s: bus error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002996 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002997 printk(KERN_DEBUG "%s: control frame\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998
2999 if (events & IEVENT_BABT) {
3000 priv->extra_stats.tx_babt++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003001 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003002 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 }
3004 return IRQ_HANDLED;
3005}
3006
Andy Flemingb31a1d82008-12-16 15:29:15 -08003007static struct of_device_id gfar_match[] =
3008{
3009 {
3010 .type = "network",
3011 .compatible = "gianfar",
3012 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003013 {
3014 .compatible = "fsl,etsec2",
3015 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003016 {},
3017};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003018MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020/* Structure for a device driver */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003021static struct of_platform_driver gfar_driver = {
3022 .name = "fsl-gianfar",
3023 .match_table = gfar_match,
3024
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 .probe = gfar_probe,
3026 .remove = gfar_remove,
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00003027 .suspend = gfar_legacy_suspend,
3028 .resume = gfar_legacy_resume,
3029 .driver.pm = GFAR_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030};
3031
3032static int __init gfar_init(void)
3033{
Andy Fleming1577ece2009-02-04 16:42:12 -08003034 return of_register_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035}
3036
3037static void __exit gfar_exit(void)
3038{
Andy Flemingb31a1d82008-12-16 15:29:15 -08003039 of_unregister_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040}
3041
3042module_init(gfar_init);
3043module_exit(gfar_exit);
3044