blob: f2db8fca46a13d8b431d977df4efee8a61bc0959 [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Paul Gortmaker3396c782012-01-27 13:36:01 +00002 * drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000013 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000014 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
26 *
27 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050028 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080029 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * last descriptor of the ring.
37 *
38 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050039 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040043 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020045 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050046 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
54 *
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
63 */
64
Joe Perches59deab22011-06-14 08:57:47 +000065#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66#define DEBUG
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/string.h>
70#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040071#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/slab.h>
73#include <linux/interrupt.h>
74#include <linux/init.h>
75#include <linux/delay.h>
76#include <linux/netdevice.h>
77#include <linux/etherdevice.h>
78#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050079#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/spinlock.h>
81#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Anton Vorontsov7d350972010-06-30 06:39:12 +000091#include <asm/reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <asm/irq.h>
93#include <asm/uaccess.h>
94#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/dma-mapping.h>
96#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040097#include <linux/mii.h>
98#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080099#include <linux/phy_fixed.h>
100#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700101#include <linux/of_net.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103#include "gianfar.h"
Andy Fleming1577ece2009-02-04 16:42:12 -0800104#include "fsl_pq_mdio.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Andy Fleming7f7f5312005-11-11 12:38:59 -0600108const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static int gfar_enet_open(struct net_device *dev);
111static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200112static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113static void gfar_timeout(struct net_device *dev);
114static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500115struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -0500117 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static int gfar_set_mac_address(struct net_device *dev);
119static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100120static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123static void adjust_link(struct net_device *dev);
124static void init_registers(struct net_device *dev);
125static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700126static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600127static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400128static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129static void gfar_set_multi(struct net_device *dev);
130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500131static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700132static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300133#ifdef CONFIG_NET_POLL_CONTROLLER
134static void gfar_netpoll(struct net_device *dev);
135#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +0000139 int amount_pull, struct napi_struct *napi);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600140void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500141static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600142void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800144static void gfar_set_mac_for_addr(struct net_device *dev, int num,
145 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000146static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150MODULE_LICENSE("GPL");
151
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000153 dma_addr_t buf)
154{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000155 u32 lstatus;
156
157 bdp->bufPtr = buf;
158
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163 eieio();
164
165 bdp->lstatus = lstatus;
166}
167
Anton Vorontsov87283272009-10-12 06:00:39 +0000168static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000169{
Anton Vorontsov87283272009-10-12 06:00:39 +0000170 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000173 struct txbd8 *txbdp;
174 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000175 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000176
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000185
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000192 }
193
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000194 /* Set the last descriptor in the ring to indicate wrap */
195 txbdp--;
196 txbdp->status |= TXBD_WRAP;
197 }
198
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
204
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208 if (skb) {
209 gfar_init_rxbdp(rx_queue, rxbdp,
210 rxbdp->bufPtr);
211 } else {
212 skb = gfar_new_skb(ndev);
213 if (!skb) {
Joe Perches59deab22011-06-14 08:57:47 +0000214 netdev_err(ndev, "Can't allocate RX buffers\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000215 goto err_rxalloc_fail;
216 }
217 rx_queue->rx_skbuff[j] = skb;
218
219 gfar_new_rxbdp(rx_queue, rxbdp, skb);
220 }
221
222 rxbdp++;
223 }
224
Anton Vorontsov87283272009-10-12 06:00:39 +0000225 }
226
227 return 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000228
229err_rxalloc_fail:
230 free_skb_resources(priv);
231 return -ENOMEM;
Anton Vorontsov87283272009-10-12 06:00:39 +0000232}
233
234static int gfar_alloc_skb_resources(struct net_device *ndev)
235{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000236 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000237 dma_addr_t addr;
238 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000239 struct gfar_private *priv = netdev_priv(ndev);
240 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000241 struct gfar_priv_tx_q *tx_queue = NULL;
242 struct gfar_priv_rx_q *rx_queue = NULL;
243
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000244 priv->total_tx_ring_size = 0;
245 for (i = 0; i < priv->num_tx_queues; i++)
246 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
247
248 priv->total_rx_ring_size = 0;
249 for (i = 0; i < priv->num_rx_queues; i++)
250 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000251
252 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000253 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000254 sizeof(struct txbd8) * priv->total_tx_ring_size +
255 sizeof(struct rxbd8) * priv->total_rx_ring_size,
256 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000257 if (!vaddr) {
Joe Perches59deab22011-06-14 08:57:47 +0000258 netif_err(priv, ifup, ndev,
259 "Could not allocate buffer descriptors!\n");
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000260 return -ENOMEM;
261 }
262
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000263 for (i = 0; i < priv->num_tx_queues; i++) {
264 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000265 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000266 tx_queue->tx_bd_dma_base = addr;
267 tx_queue->dev = ndev;
268 /* enet DMA only understands physical addresses */
269 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
270 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
271 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000272
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000273 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000274 for (i = 0; i < priv->num_rx_queues; i++) {
275 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000276 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000277 rx_queue->rx_bd_dma_base = addr;
278 rx_queue->dev = ndev;
279 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
280 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
281 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000282
283 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000284 for (i = 0; i < priv->num_tx_queues; i++) {
285 tx_queue = priv->tx_queue[i];
286 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000287 tx_queue->tx_ring_size, GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000288 if (!tx_queue->tx_skbuff) {
Joe Perches59deab22011-06-14 08:57:47 +0000289 netif_err(priv, ifup, ndev,
290 "Could not allocate tx_skbuff\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000291 goto cleanup;
292 }
293
294 for (k = 0; k < tx_queue->tx_ring_size; k++)
295 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000296 }
297
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000298 for (i = 0; i < priv->num_rx_queues; i++) {
299 rx_queue = priv->rx_queue[i];
300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000301 rx_queue->rx_ring_size, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000302
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000303 if (!rx_queue->rx_skbuff) {
Joe Perches59deab22011-06-14 08:57:47 +0000304 netif_err(priv, ifup, ndev,
305 "Could not allocate rx_skbuff\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000306 goto cleanup;
307 }
308
309 for (j = 0; j < rx_queue->rx_ring_size; j++)
310 rx_queue->rx_skbuff[j] = NULL;
311 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000312
Anton Vorontsov87283272009-10-12 06:00:39 +0000313 if (gfar_init_bds(ndev))
314 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000315
316 return 0;
317
318cleanup:
319 free_skb_resources(priv);
320 return -ENOMEM;
321}
322
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000323static void gfar_init_tx_rx_base(struct gfar_private *priv)
324{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000325 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000326 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000327 int i;
328
329 baddr = &regs->tbase0;
330 for(i = 0; i < priv->num_tx_queues; i++) {
331 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
332 baddr += 2;
333 }
334
335 baddr = &regs->rbase0;
336 for(i = 0; i < priv->num_rx_queues; i++) {
337 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
338 baddr += 2;
339 }
340}
341
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000342static void gfar_init_mac(struct net_device *ndev)
343{
344 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000345 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000346 u32 rctrl = 0;
347 u32 tctrl = 0;
348 u32 attrs = 0;
349
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000350 /* write the tx/rx base registers */
351 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000352
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000353 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000354 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000355
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000356 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000357 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000358 /* Program the RIR0 reg with the required distribution */
359 gfar_write(&regs->rir0, DEFAULT_RIR0);
360 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000361
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000362 if (ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000363 rctrl |= RCTRL_CHECKSUMMING;
364
365 if (priv->extended_hash) {
366 rctrl |= RCTRL_EXTHASH;
367
368 gfar_clear_exact_match(ndev);
369 rctrl |= RCTRL_EMEN;
370 }
371
372 if (priv->padding) {
373 rctrl &= ~RCTRL_PAL_MASK;
374 rctrl |= RCTRL_PADDING(priv->padding);
375 }
376
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000377 /* Insert receive time stamps into padding alignment bytes */
378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
379 rctrl &= ~RCTRL_PAL_MASK;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000380 rctrl |= RCTRL_PADDING(8);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000381 priv->padding = 8;
382 }
383
Manfred Rudigier97553f72010-06-11 01:49:05 +0000384 /* Enable HW time stamping if requested from user space */
385 if (priv->hwts_rx_en)
386 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
387
Jiri Pirko87c288c2011-07-20 04:54:19 +0000388 if (ndev->features & NETIF_F_HW_VLAN_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000389 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000390
391 /* Init rctrl based on our settings */
392 gfar_write(&regs->rctrl, rctrl);
393
394 if (ndev->features & NETIF_F_IP_CSUM)
395 tctrl |= TCTRL_INIT_CSUM;
396
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000397 tctrl |= TCTRL_TXSCHED_PRIO;
398
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000399 gfar_write(&regs->tctrl, tctrl);
400
401 /* Set the extraction length and index */
402 attrs = ATTRELI_EL(priv->rx_stash_size) |
403 ATTRELI_EI(priv->rx_stash_index);
404
405 gfar_write(&regs->attreli, attrs);
406
407 /* Start with defaults, and add stashing or locking
408 * depending on the approprate variables */
409 attrs = ATTR_INIT_SETTINGS;
410
411 if (priv->bd_stash_en)
412 attrs |= ATTR_BDSTASH;
413
414 if (priv->rx_stash_size != 0)
415 attrs |= ATTR_BUFSTASH;
416
417 gfar_write(&regs->attr, attrs);
418
419 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
420 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
421 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
422}
423
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000424static struct net_device_stats *gfar_get_stats(struct net_device *dev)
425{
426 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000427 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
428 unsigned long tx_packets = 0, tx_bytes = 0;
429 int i = 0;
430
431 for (i = 0; i < priv->num_rx_queues; i++) {
432 rx_packets += priv->rx_queue[i]->stats.rx_packets;
433 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
434 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
435 }
436
437 dev->stats.rx_packets = rx_packets;
438 dev->stats.rx_bytes = rx_bytes;
439 dev->stats.rx_dropped = rx_dropped;
440
441 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000442 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
443 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000444 }
445
446 dev->stats.tx_bytes = tx_bytes;
447 dev->stats.tx_packets = tx_packets;
448
449 return &dev->stats;
450}
451
Andy Fleming26ccfc32009-03-10 12:58:28 +0000452static const struct net_device_ops gfar_netdev_ops = {
453 .ndo_open = gfar_enet_open,
454 .ndo_start_xmit = gfar_start_xmit,
455 .ndo_stop = gfar_close,
456 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000457 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000458 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000459 .ndo_tx_timeout = gfar_timeout,
460 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000461 .ndo_get_stats = gfar_get_stats,
Ben Hutchings240c1022009-07-09 17:54:35 +0000462 .ndo_set_mac_address = eth_mac_addr,
463 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000464#ifdef CONFIG_NET_POLL_CONTROLLER
465 .ndo_poll_controller = gfar_netpoll,
466#endif
467};
468
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000469void lock_rx_qs(struct gfar_private *priv)
470{
471 int i = 0x0;
472
473 for (i = 0; i < priv->num_rx_queues; i++)
474 spin_lock(&priv->rx_queue[i]->rxlock);
475}
476
477void lock_tx_qs(struct gfar_private *priv)
478{
479 int i = 0x0;
480
481 for (i = 0; i < priv->num_tx_queues; i++)
482 spin_lock(&priv->tx_queue[i]->txlock);
483}
484
485void unlock_rx_qs(struct gfar_private *priv)
486{
487 int i = 0x0;
488
489 for (i = 0; i < priv->num_rx_queues; i++)
490 spin_unlock(&priv->rx_queue[i]->rxlock);
491}
492
493void unlock_tx_qs(struct gfar_private *priv)
494{
495 int i = 0x0;
496
497 for (i = 0; i < priv->num_tx_queues; i++)
498 spin_unlock(&priv->tx_queue[i]->txlock);
499}
500
Jiri Pirko87c288c2011-07-20 04:54:19 +0000501static bool gfar_is_vlan_on(struct gfar_private *priv)
502{
503 return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
504 (priv->ndev->features & NETIF_F_HW_VLAN_TX);
505}
506
Andy Fleming7f7f5312005-11-11 12:38:59 -0600507/* Returns 1 if incoming frames use an FCB */
508static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500509{
Jiri Pirko87c288c2011-07-20 04:54:19 +0000510 return gfar_is_vlan_on(priv) ||
511 (priv->ndev->features & NETIF_F_RXCSUM) ||
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000512 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500513}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400514
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000515static void free_tx_pointers(struct gfar_private *priv)
516{
517 int i = 0;
518
519 for (i = 0; i < priv->num_tx_queues; i++)
520 kfree(priv->tx_queue[i]);
521}
522
523static void free_rx_pointers(struct gfar_private *priv)
524{
525 int i = 0;
526
527 for (i = 0; i < priv->num_rx_queues; i++)
528 kfree(priv->rx_queue[i]);
529}
530
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000531static void unmap_group_regs(struct gfar_private *priv)
532{
533 int i = 0;
534
535 for (i = 0; i < MAXGROUPS; i++)
536 if (priv->gfargrp[i].regs)
537 iounmap(priv->gfargrp[i].regs);
538}
539
540static void disable_napi(struct gfar_private *priv)
541{
542 int i = 0;
543
544 for (i = 0; i < priv->num_grps; i++)
545 napi_disable(&priv->gfargrp[i].napi);
546}
547
548static void enable_napi(struct gfar_private *priv)
549{
550 int i = 0;
551
552 for (i = 0; i < priv->num_grps; i++)
553 napi_enable(&priv->gfargrp[i].napi);
554}
555
556static int gfar_parse_group(struct device_node *np,
557 struct gfar_private *priv, const char *model)
558{
559 u32 *queue_mask;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000560
Anton Vorontsov7ce97d42010-04-23 07:12:44 +0000561 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000562 if (!priv->gfargrp[priv->num_grps].regs)
563 return -ENOMEM;
564
565 priv->gfargrp[priv->num_grps].interruptTransmit =
566 irq_of_parse_and_map(np, 0);
567
568 /* If we aren't the FEC we have multiple interrupts */
569 if (model && strcasecmp(model, "FEC")) {
570 priv->gfargrp[priv->num_grps].interruptReceive =
571 irq_of_parse_and_map(np, 1);
572 priv->gfargrp[priv->num_grps].interruptError =
573 irq_of_parse_and_map(np,2);
Nicolas Kaiser28cb6cc2010-11-15 10:59:42 +0000574 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
575 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
576 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000577 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000578 }
579
580 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
581 priv->gfargrp[priv->num_grps].priv = priv;
582 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
583 if(priv->mode == MQ_MG_MODE) {
584 queue_mask = (u32 *)of_get_property(np,
585 "fsl,rx-bit-map", NULL);
586 priv->gfargrp[priv->num_grps].rx_bit_map =
587 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
588 queue_mask = (u32 *)of_get_property(np,
589 "fsl,tx-bit-map", NULL);
590 priv->gfargrp[priv->num_grps].tx_bit_map =
591 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
592 } else {
593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
595 }
596 priv->num_grps++;
597
598 return 0;
599}
600
Grant Likely2dc11582010-08-06 09:25:50 -0600601static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800602{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800603 const char *model;
604 const char *ctype;
605 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000606 int err = 0, i;
607 struct net_device *dev = NULL;
608 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700609 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000610 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800611 const u32 *stash;
612 const u32 *stash_len;
613 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000614 unsigned int num_tx_qs, num_rx_qs;
615 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800616
617 if (!np || !of_device_is_available(np))
618 return -ENODEV;
619
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000620 /* parse the num of tx and rx queues */
621 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
622 num_tx_qs = tx_queues ? *tx_queues : 1;
623
624 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000625 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
626 num_tx_qs, MAX_TX_QS);
627 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000628 return -EINVAL;
629 }
630
631 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
632 num_rx_qs = rx_queues ? *rx_queues : 1;
633
634 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000635 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
636 num_rx_qs, MAX_RX_QS);
637 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000638 return -EINVAL;
639 }
640
641 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
642 dev = *pdev;
643 if (NULL == dev)
644 return -ENOMEM;
645
646 priv = netdev_priv(dev);
Grant Likely61c7a082010-04-13 16:12:29 -0700647 priv->node = ofdev->dev.of_node;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000648 priv->ndev = dev;
649
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000650 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000651 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000652 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000653 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800654
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700655 /* Init Rx queue filer rule set linked list*/
656 INIT_LIST_HEAD(&priv->rx_list.list);
657 priv->rx_list.count = 0;
658 mutex_init(&priv->rx_queue_access);
659
Andy Flemingb31a1d82008-12-16 15:29:15 -0800660 model = of_get_property(np, "model", NULL);
661
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000662 for (i = 0; i < MAXGROUPS; i++)
663 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800664
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000665 /* Parse and initialize group specific information */
666 if (of_device_is_compatible(np, "fsl,etsec2")) {
667 priv->mode = MQ_MG_MODE;
668 for_each_child_of_node(np, child) {
669 err = gfar_parse_group(child, priv, model);
670 if (err)
671 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800672 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000673 } else {
674 priv->mode = SQ_SG_MODE;
675 err = gfar_parse_group(np, priv, model);
676 if(err)
677 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800678 }
679
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000680 for (i = 0; i < priv->num_tx_queues; i++)
681 priv->tx_queue[i] = NULL;
682 for (i = 0; i < priv->num_rx_queues; i++)
683 priv->rx_queue[i] = NULL;
684
685 for (i = 0; i < priv->num_tx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000686 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
687 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000688 if (!priv->tx_queue[i]) {
689 err = -ENOMEM;
690 goto tx_alloc_failed;
691 }
692 priv->tx_queue[i]->tx_skbuff = NULL;
693 priv->tx_queue[i]->qindex = i;
694 priv->tx_queue[i]->dev = dev;
695 spin_lock_init(&(priv->tx_queue[i]->txlock));
696 }
697
698 for (i = 0; i < priv->num_rx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000699 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
700 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000701 if (!priv->rx_queue[i]) {
702 err = -ENOMEM;
703 goto rx_alloc_failed;
704 }
705 priv->rx_queue[i]->rx_skbuff = NULL;
706 priv->rx_queue[i]->qindex = i;
707 priv->rx_queue[i]->dev = dev;
708 spin_lock_init(&(priv->rx_queue[i]->rxlock));
709 }
710
711
Andy Fleming4d7902f2009-02-04 16:43:44 -0800712 stash = of_get_property(np, "bd-stash", NULL);
713
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000714 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800715 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
716 priv->bd_stash_en = 1;
717 }
718
719 stash_len = of_get_property(np, "rx-stash-len", NULL);
720
721 if (stash_len)
722 priv->rx_stash_size = *stash_len;
723
724 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
725
726 if (stash_idx)
727 priv->rx_stash_index = *stash_idx;
728
729 if (stash_len || stash_idx)
730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
731
Andy Flemingb31a1d82008-12-16 15:29:15 -0800732 mac_addr = of_get_mac_address(np);
733 if (mac_addr)
Joe Perches6a3c9102011-11-16 09:38:02 +0000734 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800735
736 if (model && !strcasecmp(model, "TSEC"))
737 priv->device_flags =
738 FSL_GIANFAR_DEV_HAS_GIGABIT |
739 FSL_GIANFAR_DEV_HAS_COALESCE |
740 FSL_GIANFAR_DEV_HAS_RMON |
741 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
742 if (model && !strcasecmp(model, "eTSEC"))
743 priv->device_flags =
744 FSL_GIANFAR_DEV_HAS_GIGABIT |
745 FSL_GIANFAR_DEV_HAS_COALESCE |
746 FSL_GIANFAR_DEV_HAS_RMON |
747 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Dai Haruki2c2db482008-12-16 15:31:15 -0800748 FSL_GIANFAR_DEV_HAS_PADDING |
Andy Flemingb31a1d82008-12-16 15:29:15 -0800749 FSL_GIANFAR_DEV_HAS_CSUM |
750 FSL_GIANFAR_DEV_HAS_VLAN |
751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
Manfred Rudigier97553f72010-06-11 01:49:05 +0000752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
753 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800754
755 ctype = of_get_property(np, "phy-connection-type", NULL);
756
757 /* We only care about rgmii-id. The rest are autodetected */
758 if (ctype && !strcmp(ctype, "rgmii-id"))
759 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
760 else
761 priv->interface = PHY_INTERFACE_MODE_MII;
762
763 if (of_get_property(np, "fsl,magic-packet", NULL))
764 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
765
Grant Likelyfe192a42009-04-25 12:53:12 +0000766 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800767
768 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000769 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800770
771 return 0;
772
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000773rx_alloc_failed:
774 free_rx_pointers(priv);
775tx_alloc_failed:
776 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000777err_grp_init:
778 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000779 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800780 return err;
781}
782
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000783static int gfar_hwtstamp_ioctl(struct net_device *netdev,
784 struct ifreq *ifr, int cmd)
785{
786 struct hwtstamp_config config;
787 struct gfar_private *priv = netdev_priv(netdev);
788
789 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
790 return -EFAULT;
791
792 /* reserved for future extensions */
793 if (config.flags)
794 return -EINVAL;
795
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000796 switch (config.tx_type) {
797 case HWTSTAMP_TX_OFF:
798 priv->hwts_tx_en = 0;
799 break;
800 case HWTSTAMP_TX_ON:
801 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
802 return -ERANGE;
803 priv->hwts_tx_en = 1;
804 break;
805 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000806 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000807 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000808
809 switch (config.rx_filter) {
810 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000811 if (priv->hwts_rx_en) {
812 stop_gfar(netdev);
813 priv->hwts_rx_en = 0;
814 startup_gfar(netdev);
815 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000816 break;
817 default:
818 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
819 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000820 if (!priv->hwts_rx_en) {
821 stop_gfar(netdev);
822 priv->hwts_rx_en = 1;
823 startup_gfar(netdev);
824 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000825 config.rx_filter = HWTSTAMP_FILTER_ALL;
826 break;
827 }
828
829 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
830 -EFAULT : 0;
831}
832
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000833/* Ioctl MII Interface */
834static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
835{
836 struct gfar_private *priv = netdev_priv(dev);
837
838 if (!netif_running(dev))
839 return -EINVAL;
840
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000841 if (cmd == SIOCSHWTSTAMP)
842 return gfar_hwtstamp_ioctl(dev, rq, cmd);
843
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000844 if (!priv->phydev)
845 return -ENODEV;
846
Richard Cochran28b04112010-07-17 08:48:55 +0000847 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000848}
849
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000850static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
851{
852 unsigned int new_bit_map = 0x0;
853 int mask = 0x1 << (max_qs - 1), i;
854 for (i = 0; i < max_qs; i++) {
855 if (bit_map & mask)
856 new_bit_map = new_bit_map + (1 << i);
857 mask = mask >> 0x1;
858 }
859 return new_bit_map;
860}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000861
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000862static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
863 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000864{
865 u32 rqfpr = FPR_FILER_MASK;
866 u32 rqfcr = 0x0;
867
868 rqfar--;
869 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000870 priv->ftp_rqfpr[rqfar] = rqfpr;
871 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000872 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
873
874 rqfar--;
875 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000876 priv->ftp_rqfpr[rqfar] = rqfpr;
877 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000878 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
879
880 rqfar--;
881 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
882 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000883 priv->ftp_rqfcr[rqfar] = rqfcr;
884 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000885 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
886
887 rqfar--;
888 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
889 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000890 priv->ftp_rqfcr[rqfar] = rqfcr;
891 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000892 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
893
894 return rqfar;
895}
896
897static void gfar_init_filer_table(struct gfar_private *priv)
898{
899 int i = 0x0;
900 u32 rqfar = MAX_FILER_IDX;
901 u32 rqfcr = 0x0;
902 u32 rqfpr = FPR_FILER_MASK;
903
904 /* Default rule */
905 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000906 priv->ftp_rqfcr[rqfar] = rqfcr;
907 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000908 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
909
910 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
911 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
912 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
916
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +0200917 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000918 priv->cur_filer_idx = rqfar;
919
920 /* Rest are masked rules */
921 rqfcr = RQFCR_CMP_NOMATCH;
922 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000923 priv->ftp_rqfcr[i] = rqfcr;
924 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000925 gfar_write_filer(priv, i, rqfcr, rqfpr);
926 }
927}
928
Anton Vorontsov7d350972010-06-30 06:39:12 +0000929static void gfar_detect_errata(struct gfar_private *priv)
930{
931 struct device *dev = &priv->ofdev->dev;
932 unsigned int pvr = mfspr(SPRN_PVR);
933 unsigned int svr = mfspr(SPRN_SVR);
934 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
935 unsigned int rev = svr & 0xffff;
936
937 /* MPC8313 Rev 2.0 and higher; All MPC837x */
938 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
939 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
940 priv->errata |= GFAR_ERRATA_74;
941
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000942 /* MPC8313 and MPC837x all rev */
943 if ((pvr == 0x80850010 && mod == 0x80b0) ||
944 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
945 priv->errata |= GFAR_ERRATA_76;
946
Anton Vorontsov511d9342010-06-30 06:39:15 +0000947 /* MPC8313 and MPC837x all rev */
948 if ((pvr == 0x80850010 && mod == 0x80b0) ||
949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
950 priv->errata |= GFAR_ERRATA_A002;
951
Alex Dubov4363c2f2011-03-16 17:57:13 +0000952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12;
956
Anton Vorontsov7d350972010-06-30 06:39:12 +0000957 if (priv->errata)
958 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
959 priv->errata);
960}
961
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400962/* Set up the ethernet device structure, private data,
963 * and anything else we need before we start */
Grant Likely74888762011-02-22 21:05:51 -0700964static int gfar_probe(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965{
966 u32 tempval;
967 struct net_device *dev = NULL;
968 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000969 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000970 int err = 0, i, grp_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000971 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000972 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000973 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000975 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000977 if (err)
978 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700981 priv->ndev = dev;
982 priv->ofdev = ofdev;
Grant Likely61c7a082010-04-13 16:12:29 -0700983 priv->node = ofdev->dev.of_node;
Kumar Gala48268572009-03-18 23:28:22 -0700984 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Scott Woodd87eb122008-07-11 18:04:45 -0500986 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200987 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Andy Flemingb31a1d82008-12-16 15:29:15 -0800989 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000990 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Anton Vorontsov7d350972010-06-30 06:39:12 +0000992 gfar_detect_errata(priv);
993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 /* Stop the DMA engine now, in case it was running before */
995 /* (The firmware could have used it, and left it running). */
Andy Fleming257d9382008-12-16 15:25:45 -0800996 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000999 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Andy Flemingb98ac702009-02-04 16:38:05 -08001001 /* We need to delay at least 3 TX clocks */
1002 udelay(2);
1003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001005 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001008 tempval = MACCFG2_INIT_SETTINGS;
1009 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1010 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1011 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001014 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001017 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Andy Flemingb31a1d82008-12-16 15:29:15 -08001019 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +00001024 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001025 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001027 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001028 for (i = 0; i < priv->num_grps; i++)
1029 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001030
Andy Flemingb31a1d82008-12-16 15:29:15 -08001031 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Michał Mirosław8b3afe92011-04-15 04:50:50 +00001032 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1033 NETIF_F_RXCSUM;
1034 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1035 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Jiri Pirko87c288c2011-07-20 04:54:19 +00001038 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1039 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001040 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Jiri Pirko87c288c2011-07-20 04:54:19 +00001041 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05001042
Andy Flemingb31a1d82008-12-16 15:29:15 -08001043 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001044 priv->extended_hash = 1;
1045 priv->hash_width = 9;
1046
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001047 priv->hash_regs[0] = &regs->igaddr0;
1048 priv->hash_regs[1] = &regs->igaddr1;
1049 priv->hash_regs[2] = &regs->igaddr2;
1050 priv->hash_regs[3] = &regs->igaddr3;
1051 priv->hash_regs[4] = &regs->igaddr4;
1052 priv->hash_regs[5] = &regs->igaddr5;
1053 priv->hash_regs[6] = &regs->igaddr6;
1054 priv->hash_regs[7] = &regs->igaddr7;
1055 priv->hash_regs[8] = &regs->gaddr0;
1056 priv->hash_regs[9] = &regs->gaddr1;
1057 priv->hash_regs[10] = &regs->gaddr2;
1058 priv->hash_regs[11] = &regs->gaddr3;
1059 priv->hash_regs[12] = &regs->gaddr4;
1060 priv->hash_regs[13] = &regs->gaddr5;
1061 priv->hash_regs[14] = &regs->gaddr6;
1062 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001063
1064 } else {
1065 priv->extended_hash = 0;
1066 priv->hash_width = 8;
1067
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001068 priv->hash_regs[0] = &regs->gaddr0;
1069 priv->hash_regs[1] = &regs->gaddr1;
1070 priv->hash_regs[2] = &regs->gaddr2;
1071 priv->hash_regs[3] = &regs->gaddr3;
1072 priv->hash_regs[4] = &regs->gaddr4;
1073 priv->hash_regs[5] = &regs->gaddr5;
1074 priv->hash_regs[6] = &regs->gaddr6;
1075 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001076 }
1077
Andy Flemingb31a1d82008-12-16 15:29:15 -08001078 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001079 priv->padding = DEFAULT_PADDING;
1080 else
1081 priv->padding = 0;
1082
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001083 if (dev->features & NETIF_F_IP_CSUM ||
1084 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001085 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001087 /* Program the isrg regs only if number of grps > 1 */
1088 if (priv->num_grps > 1) {
1089 baddr = &regs->isrg0;
1090 for (i = 0; i < priv->num_grps; i++) {
1091 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1092 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1093 gfar_write(baddr, isrg);
1094 baddr++;
1095 isrg = 0x0;
1096 }
1097 }
1098
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001099 /* Need to reverse the bit maps as bit_map's MSB is q0
Akinobu Mita984b3f52010-03-05 13:41:37 -08001100 * but, for_each_set_bit parses from right to left, which
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001101 * basically reverses the queue numbers */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001102 for (i = 0; i< priv->num_grps; i++) {
1103 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1104 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1105 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1106 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1107 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001108
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001109 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1110 * also assign queues to groups */
1111 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1112 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001113 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001114 priv->num_rx_queues) {
1115 priv->gfargrp[grp_idx].num_rx_queues++;
1116 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1117 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1118 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1119 }
1120 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001121 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001122 priv->num_tx_queues) {
1123 priv->gfargrp[grp_idx].num_tx_queues++;
1124 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1125 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1126 tqueue = tqueue | (TQUEUE_EN0 >> i);
1127 }
1128 priv->gfargrp[grp_idx].rstat = rstat;
1129 priv->gfargrp[grp_idx].tstat = tstat;
1130 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001131 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001132
1133 gfar_write(&regs->rqueue, rqueue);
1134 gfar_write(&regs->tqueue, tqueue);
1135
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001138 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001139 for (i = 0; i < priv->num_tx_queues; i++) {
1140 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1141 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1142 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1143 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1144 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001145
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001146 for (i = 0; i < priv->num_rx_queues; i++) {
1147 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1148 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1149 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001152 /* always enable rx filer*/
1153 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001154 /* Enable most messages by default */
1155 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1156
Trent Piephod3eab822008-10-02 11:12:24 +00001157 /* Carrier starts down, phylib will bring it up */
1158 netif_carrier_off(dev);
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 err = register_netdev(dev);
1161
1162 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001163 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 goto register_fail;
1165 }
1166
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001167 device_init_wakeup(&dev->dev,
1168 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1169
Dai Harukic50a5d92008-12-17 16:51:32 -08001170 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001171 for (i = 0; i < priv->num_grps; i++) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001172 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Joe Perches0015e552012-03-25 07:10:07 +00001173 sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
1174 dev->name, "_g", '0' + i, "_tx");
1175 sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
1176 dev->name, "_g", '0' + i, "_rx");
1177 sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
1178 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001179 } else
Joe Perches0015e552012-03-25 07:10:07 +00001180 strcpy(priv->gfargrp[i].int_name_tx, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001181 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001182
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001183 /* Initialize the filer table */
1184 gfar_init_filer_table(priv);
1185
Andy Fleming7f7f5312005-11-11 12:38:59 -06001186 /* Create all the sysfs files */
1187 gfar_init_sysfs(dev);
1188
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001190 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001193 /* provided which set of benchmarks. */
Joe Perches59deab22011-06-14 08:57:47 +00001194 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001195 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001196 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1197 i, priv->rx_queue[i]->rx_ring_size);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001198 for(i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001199 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1200 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 return 0;
1203
1204register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001205 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001206 free_tx_pointers(priv);
1207 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001208 if (priv->phy_node)
1209 of_node_put(priv->phy_node);
1210 if (priv->tbi_node)
1211 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001213 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214}
1215
Grant Likely2dc11582010-08-06 09:25:50 -06001216static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001218 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
Grant Likelyfe192a42009-04-25 12:53:12 +00001220 if (priv->phy_node)
1221 of_node_put(priv->phy_node);
1222 if (priv->tbi_node)
1223 of_node_put(priv->tbi_node);
1224
Andy Flemingb31a1d82008-12-16 15:29:15 -08001225 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
David S. Millerd9d8e042009-09-06 01:41:02 -07001227 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001228 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001229 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
1231 return 0;
1232}
1233
Scott Woodd87eb122008-07-11 18:04:45 -05001234#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001235
1236static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001237{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001238 struct gfar_private *priv = dev_get_drvdata(dev);
1239 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001240 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001241 unsigned long flags;
1242 u32 tempval;
1243
1244 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001245 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001246
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001247 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001248
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001249 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001250
1251 local_irq_save(flags);
1252 lock_tx_qs(priv);
1253 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001254
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001255 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001256
1257 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001258 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001259
1260 tempval &= ~MACCFG1_TX_EN;
1261
1262 if (!magic_packet)
1263 tempval &= ~MACCFG1_RX_EN;
1264
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001265 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001266
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001267 unlock_rx_qs(priv);
1268 unlock_tx_qs(priv);
1269 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001270
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001271 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001272
1273 if (magic_packet) {
1274 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001275 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001276
1277 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001278 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001279 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001280 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001281 } else {
1282 phy_stop(priv->phydev);
1283 }
1284 }
1285
1286 return 0;
1287}
1288
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001289static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001290{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001291 struct gfar_private *priv = dev_get_drvdata(dev);
1292 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001293 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001294 unsigned long flags;
1295 u32 tempval;
1296 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001297 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001298
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001299 if (!netif_running(ndev)) {
1300 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001301 return 0;
1302 }
1303
1304 if (!magic_packet && priv->phydev)
1305 phy_start(priv->phydev);
1306
1307 /* Disable Magic Packet mode, in case something
1308 * else woke us up.
1309 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001310 local_irq_save(flags);
1311 lock_tx_qs(priv);
1312 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001313
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001314 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001315 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001316 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001317
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001318 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001319
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001320 unlock_rx_qs(priv);
1321 unlock_tx_qs(priv);
1322 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001323
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001324 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001325
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001326 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001327
1328 return 0;
1329}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001330
1331static int gfar_restore(struct device *dev)
1332{
1333 struct gfar_private *priv = dev_get_drvdata(dev);
1334 struct net_device *ndev = priv->ndev;
1335
1336 if (!netif_running(ndev))
1337 return 0;
1338
1339 gfar_init_bds(ndev);
1340 init_registers(ndev);
1341 gfar_set_mac_address(ndev);
1342 gfar_init_mac(ndev);
1343 gfar_start(ndev);
1344
1345 priv->oldlink = 0;
1346 priv->oldspeed = 0;
1347 priv->oldduplex = -1;
1348
1349 if (priv->phydev)
1350 phy_start(priv->phydev);
1351
1352 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001353 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001354
1355 return 0;
1356}
1357
1358static struct dev_pm_ops gfar_pm_ops = {
1359 .suspend = gfar_suspend,
1360 .resume = gfar_resume,
1361 .freeze = gfar_suspend,
1362 .thaw = gfar_resume,
1363 .restore = gfar_restore,
1364};
1365
1366#define GFAR_PM_OPS (&gfar_pm_ops)
1367
Scott Woodd87eb122008-07-11 18:04:45 -05001368#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001369
1370#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001371
Scott Woodd87eb122008-07-11 18:04:45 -05001372#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001374/* Reads the controller's registers to determine what interface
1375 * connects it to the PHY.
1376 */
1377static phy_interface_t gfar_get_interface(struct net_device *dev)
1378{
1379 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001380 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001381 u32 ecntrl;
1382
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001383 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001384
1385 if (ecntrl & ECNTRL_SGMII_MODE)
1386 return PHY_INTERFACE_MODE_SGMII;
1387
1388 if (ecntrl & ECNTRL_TBI_MODE) {
1389 if (ecntrl & ECNTRL_REDUCED_MODE)
1390 return PHY_INTERFACE_MODE_RTBI;
1391 else
1392 return PHY_INTERFACE_MODE_TBI;
1393 }
1394
1395 if (ecntrl & ECNTRL_REDUCED_MODE) {
1396 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1397 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001398 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001399 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001400
1401 /*
1402 * This isn't autodetected right now, so it must
1403 * be set by the device tree or platform code.
1404 */
1405 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1406 return PHY_INTERFACE_MODE_RGMII_ID;
1407
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001408 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001409 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001410 }
1411
Andy Flemingb31a1d82008-12-16 15:29:15 -08001412 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001413 return PHY_INTERFACE_MODE_GMII;
1414
1415 return PHY_INTERFACE_MODE_MII;
1416}
1417
1418
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001419/* Initializes driver's PHY state, and attaches to the PHY.
1420 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 */
1422static int init_phy(struct net_device *dev)
1423{
1424 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001425 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001426 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001427 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001428 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 priv->oldlink = 0;
1431 priv->oldspeed = 0;
1432 priv->oldduplex = -1;
1433
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001434 interface = gfar_get_interface(dev);
1435
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001436 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1437 interface);
1438 if (!priv->phydev)
1439 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1440 interface);
1441 if (!priv->phydev) {
1442 dev_err(&dev->dev, "could not attach to PHY\n");
1443 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
Kapil Junejad3c12872007-05-11 18:25:11 -05001446 if (interface == PHY_INTERFACE_MODE_SGMII)
1447 gfar_configure_serdes(dev);
1448
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001449 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001450 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1451 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454}
1455
Paul Gortmakerd0313582008-04-17 00:08:10 -04001456/*
1457 * Initialize TBI PHY interface for communicating with the
1458 * SERDES lynx PHY on the chip. We communicate with this PHY
1459 * through the MDIO bus on each controller, treating it as a
1460 * "normal" PHY at the address found in the TBIPA register. We assume
1461 * that the TBIPA register is valid. Either the MDIO bus code will set
1462 * it to a value that doesn't conflict with other PHYs on the bus, or the
1463 * value doesn't matter, as there are no other PHYs on the bus.
1464 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001465static void gfar_configure_serdes(struct net_device *dev)
1466{
1467 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001468 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001469
Grant Likelyfe192a42009-04-25 12:53:12 +00001470 if (!priv->tbi_node) {
1471 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1472 "device tree specify a tbi-handle\n");
1473 return;
1474 }
1475
1476 tbiphy = of_phy_find_device(priv->tbi_node);
1477 if (!tbiphy) {
1478 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001479 return;
1480 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001481
Andy Flemingb31a1d82008-12-16 15:29:15 -08001482 /*
1483 * If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001484 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1485 * everything for us? Resetting it takes the link down and requires
1486 * several seconds for it to come back.
1487 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001488 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001489 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001490
Paul Gortmakerd0313582008-04-17 00:08:10 -04001491 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001492 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001493
Grant Likelyfe192a42009-04-25 12:53:12 +00001494 phy_write(tbiphy, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -05001495 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1496 ADVERTISE_1000XPSE_ASYM);
1497
Grant Likelyfe192a42009-04-25 12:53:12 +00001498 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -05001499 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1500}
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502static void init_registers(struct net_device *dev)
1503{
1504 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001505 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001506 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001508 for (i = 0; i < priv->num_grps; i++) {
1509 regs = priv->gfargrp[i].regs;
1510 /* Clear IEVENT */
1511 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001513 /* Initialize IMASK */
1514 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001517 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001519 gfar_write(&regs->igaddr0, 0);
1520 gfar_write(&regs->igaddr1, 0);
1521 gfar_write(&regs->igaddr2, 0);
1522 gfar_write(&regs->igaddr3, 0);
1523 gfar_write(&regs->igaddr4, 0);
1524 gfar_write(&regs->igaddr5, 0);
1525 gfar_write(&regs->igaddr6, 0);
1526 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001528 gfar_write(&regs->gaddr0, 0);
1529 gfar_write(&regs->gaddr1, 0);
1530 gfar_write(&regs->gaddr2, 0);
1531 gfar_write(&regs->gaddr3, 0);
1532 gfar_write(&regs->gaddr4, 0);
1533 gfar_write(&regs->gaddr5, 0);
1534 gfar_write(&regs->gaddr6, 0);
1535 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001538 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001539 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001542 gfar_write(&regs->rmon.cam1, 0xffffffff);
1543 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 }
1545
1546 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001547 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001550 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551}
1552
Anton Vorontsov511d9342010-06-30 06:39:15 +00001553static int __gfar_is_rx_idle(struct gfar_private *priv)
1554{
1555 u32 res;
1556
1557 /*
1558 * Normaly TSEC should not hang on GRS commands, so we should
1559 * actually wait for IEVENT_GRSC flag.
1560 */
1561 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1562 return 0;
1563
1564 /*
1565 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1566 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1567 * and the Rx can be safely reset.
1568 */
1569 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1570 res &= 0x7f807f80;
1571 if ((res & 0xffff) == (res >> 16))
1572 return 1;
1573
1574 return 0;
1575}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001576
1577/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001578static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579{
1580 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001581 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001583 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001585 for (i = 0; i < priv->num_grps; i++) {
1586 regs = priv->gfargrp[i].regs;
1587 /* Mask all interrupts */
1588 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001590 /* Clear all interrupts */
1591 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001594 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001596 tempval = gfar_read(&regs->dmactrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1598 != (DMACTRL_GRS | DMACTRL_GTS)) {
Anton Vorontsov511d9342010-06-30 06:39:15 +00001599 int ret;
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001602 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603
Anton Vorontsov511d9342010-06-30 06:39:15 +00001604 do {
1605 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1606 (IEVENT_GRSC | IEVENT_GTSC)) ==
1607 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1608 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1609 ret = __gfar_is_rx_idle(priv);
1610 } while (!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 }
Scott Woodd87eb122008-07-11 18:04:45 -05001612}
Scott Woodd87eb122008-07-11 18:04:45 -05001613
1614/* Halt the receive and transmit queues */
1615void gfar_halt(struct net_device *dev)
1616{
1617 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001618 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001619 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Scott Wood2a54adc2008-08-12 15:10:46 -05001621 gfar_halt_nodisable(dev);
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 /* Disable Rx and Tx */
1624 tempval = gfar_read(&regs->maccfg1);
1625 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1626 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001627}
1628
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001629static void free_grp_irqs(struct gfar_priv_grp *grp)
1630{
1631 free_irq(grp->interruptError, grp);
1632 free_irq(grp->interruptTransmit, grp);
1633 free_irq(grp->interruptReceive, grp);
1634}
1635
Kumar Gala0bbaf062005-06-20 10:54:21 -05001636void stop_gfar(struct net_device *dev)
1637{
1638 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001639 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001640 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001641
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001642 phy_stop(priv->phydev);
1643
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001644
Kumar Gala0bbaf062005-06-20 10:54:21 -05001645 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001646 local_irq_save(flags);
1647 lock_tx_qs(priv);
1648 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001649
Kumar Gala0bbaf062005-06-20 10:54:21 -05001650 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001652 unlock_rx_qs(priv);
1653 unlock_tx_qs(priv);
1654 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
1656 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001657 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001658 for (i = 0; i < priv->num_grps; i++)
1659 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001661 for (i = 0; i < priv->num_grps; i++)
1662 free_irq(priv->gfargrp[i].interruptTransmit,
1663 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 }
1665
1666 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667}
1668
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001669static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001672 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001673 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001675 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001677 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1678 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001679 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Kumar Gala48268572009-03-18 23:28:22 -07001681 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001682 txbdp->length, DMA_TO_DEVICE);
1683 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001684 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1685 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001686 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001687 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001688 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001690 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001691 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1692 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001694 kfree(tx_queue->tx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001695}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001697static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1698{
1699 struct rxbd8 *rxbdp;
1700 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1701 int i;
1702
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001703 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001705 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1706 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001707 dma_unmap_single(&priv->ofdev->dev,
1708 rxbdp->bufPtr, priv->rx_buffer_size,
Anton Vorontsove69edd22009-10-12 06:00:30 +00001709 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001710 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1711 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001713 rxbdp->lstatus = 0;
1714 rxbdp->bufPtr = 0;
1715 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001717 kfree(rx_queue->rx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001718}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001719
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001720/* If there are any tx skbs or rx skbs still around, free them.
1721 * Then free tx_skbuff and rx_skbuff */
1722static void free_skb_resources(struct gfar_private *priv)
1723{
1724 struct gfar_priv_tx_q *tx_queue = NULL;
1725 struct gfar_priv_rx_q *rx_queue = NULL;
1726 int i;
1727
1728 /* Go through all the buffer descriptors and free their data buffers */
1729 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001730 struct netdev_queue *txq;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001731 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001732 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001733 if(tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001734 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001735 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001736 }
1737
1738 for (i = 0; i < priv->num_rx_queues; i++) {
1739 rx_queue = priv->rx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001740 if(rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001741 free_skb_rx_queue(rx_queue);
1742 }
1743
1744 dma_free_coherent(&priv->ofdev->dev,
1745 sizeof(struct txbd8) * priv->total_tx_ring_size +
1746 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1747 priv->tx_queue[0]->tx_bd_base,
1748 priv->tx_queue[0]->tx_bd_dma_base);
Sebastian Andrzej Siewior7df9c432010-05-04 22:30:47 +00001749 skb_queue_purge(&priv->rx_recycle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750}
1751
Kumar Gala0bbaf062005-06-20 10:54:21 -05001752void gfar_start(struct net_device *dev)
1753{
1754 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001755 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001756 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001757 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001758
1759 /* Enable Rx and Tx in MACCFG1 */
1760 tempval = gfar_read(&regs->maccfg1);
1761 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1762 gfar_write(&regs->maccfg1, tempval);
1763
1764 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001765 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001766 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001767 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001768
Kumar Gala0bbaf062005-06-20 10:54:21 -05001769 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001770 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001771 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001772 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001773
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001774 for (i = 0; i < priv->num_grps; i++) {
1775 regs = priv->gfargrp[i].regs;
1776 /* Clear THLT/RHLT, so that the DMA starts polling now */
1777 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1778 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1779 /* Unmask the interrupts we look for */
1780 gfar_write(&regs->imask, IMASK_DEFAULT);
1781 }
Dai Haruki12dea572008-12-16 15:30:20 -08001782
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001783 dev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001784}
1785
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001786void gfar_configure_coalescing(struct gfar_private *priv,
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001787 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001789 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001790 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001791 int i = 0;
1792
1793 /* Backward compatible case ---- even if we enable
1794 * multiple queues, there's only single reg to program
1795 */
1796 gfar_write(&regs->txic, 0);
1797 if(likely(priv->tx_queue[0]->txcoalescing))
1798 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1799
1800 gfar_write(&regs->rxic, 0);
1801 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1802 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1803
1804 if (priv->mode == MQ_MG_MODE) {
1805 baddr = &regs->txic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001806 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
Claudiu Manoil9740e002012-06-28 04:40:53 +00001807 gfar_write(baddr + i, 0);
1808 if (likely(priv->tx_queue[i]->txcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001809 gfar_write(baddr + i, priv->tx_queue[i]->txic);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001810 }
1811
1812 baddr = &regs->rxic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001813 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
Claudiu Manoil9740e002012-06-28 04:40:53 +00001814 gfar_write(baddr + i, 0);
1815 if (likely(priv->rx_queue[i]->rxcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001816 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001817 }
1818 }
1819}
1820
1821static int register_grp_irqs(struct gfar_priv_grp *grp)
1822{
1823 struct gfar_private *priv = grp->priv;
1824 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001825 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 /* If the device has multiple interrupts, register for
1828 * them. Otherwise, only register for the one */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001829 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001830 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 * Transmit, and Receive */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001832 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1833 grp->int_name_er,grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001834 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1835 grp->interruptError);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001836
Julia Lawall2145f1a2010-08-05 10:26:20 +00001837 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 }
1839
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001840 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1841 0, grp->int_name_tx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001842 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1843 grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 goto tx_irq_fail;
1845 }
1846
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001847 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1848 grp->int_name_rx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001849 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1850 grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 goto rx_irq_fail;
1852 }
1853 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001854 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1855 grp->int_name_tx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001856 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1857 grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 goto err_irq_fail;
1859 }
1860 }
1861
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001862 return 0;
1863
1864rx_irq_fail:
1865 free_irq(grp->interruptTransmit, grp);
1866tx_irq_fail:
1867 free_irq(grp->interruptError, grp);
1868err_irq_fail:
1869 return err;
1870
1871}
1872
1873/* Bring the controller up and running */
1874int startup_gfar(struct net_device *ndev)
1875{
1876 struct gfar_private *priv = netdev_priv(ndev);
1877 struct gfar __iomem *regs = NULL;
1878 int err, i, j;
1879
1880 for (i = 0; i < priv->num_grps; i++) {
1881 regs= priv->gfargrp[i].regs;
1882 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1883 }
1884
1885 regs= priv->gfargrp[0].regs;
1886 err = gfar_alloc_skb_resources(ndev);
1887 if (err)
1888 return err;
1889
1890 gfar_init_mac(ndev);
1891
1892 for (i = 0; i < priv->num_grps; i++) {
1893 err = register_grp_irqs(&priv->gfargrp[i]);
1894 if (err) {
1895 for (j = 0; j < i; j++)
1896 free_grp_irqs(&priv->gfargrp[j]);
Anton Vorontsovff760152011-01-18 02:36:02 +00001897 goto irq_fail;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001898 }
1899 }
1900
Andy Fleming7f7f5312005-11-11 12:38:59 -06001901 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001902 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001904 phy_start(priv->phydev);
1905
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001906 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 return 0;
1909
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001910irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001911 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 return err;
1913}
1914
1915/* Called when something needs to use the ethernet device */
1916/* Returns 0 for success. */
1917static int gfar_enet_open(struct net_device *dev)
1918{
Li Yang94e8cc32007-10-12 21:53:51 +08001919 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 int err;
1921
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001922 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001923
Andy Fleming0fd56bb2009-02-04 16:43:16 -08001924 skb_queue_head_init(&priv->rx_recycle);
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 /* Initialize a bunch of registers */
1927 init_registers(dev);
1928
1929 gfar_set_mac_address(dev);
1930
1931 err = init_phy(dev);
1932
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001933 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001934 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001936 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001939 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001940 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001941 return err;
1942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001944 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001946 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 return err;
1949}
1950
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001951static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001952{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001953 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001954
1955 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001956
Kumar Gala0bbaf062005-06-20 10:54:21 -05001957 return fcb;
1958}
1959
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00001960static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1961 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001962{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001963 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001964
1965 /* If we're here, it's a IP packet with a TCP or UDP
1966 * payload. We set it to checksum, using a pseudo-header
1967 * we provide
1968 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001969 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001970
Andy Fleming7f7f5312005-11-11 12:38:59 -06001971 /* Tell the controller what the protocol is */
1972 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001973 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001974 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03001975 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06001976 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05001977 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001978
1979 /* l3os is the distance between the start of the
1980 * frame (skb->data) and the start of the IP hdr.
1981 * l4os is the distance between the start of the
1982 * l3 hdr and the l4 hdr */
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00001983 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001984 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001985
Andy Fleming7f7f5312005-11-11 12:38:59 -06001986 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001987}
1988
Andy Fleming7f7f5312005-11-11 12:38:59 -06001989void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001990{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001991 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001992 fcb->vlctl = vlan_tx_tag_get(skb);
1993}
1994
Dai Haruki4669bc92008-12-17 16:51:04 -08001995static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1996 struct txbd8 *base, int ring_size)
1997{
1998 struct txbd8 *new_bd = bdp + stride;
1999
2000 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2001}
2002
2003static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2004 int ring_size)
2005{
2006 return skip_txbd(bdp, 1, base, ring_size);
2007}
2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009/* This is called by the kernel when a frame is ready for transmission. */
2010/* It is pointed to by the dev->hard_start_xmit function pointer */
2011static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2012{
2013 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002014 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002015 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002016 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002017 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002018 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002019 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002020 int i, rq = 0, do_tstamp = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002021 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05002022 unsigned long flags;
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002023 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002024
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00002025 /*
2026 * TOE=1 frames larger than 2500 bytes may see excess delays
2027 * before start of transmission.
2028 */
2029 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2030 skb->ip_summed == CHECKSUM_PARTIAL &&
2031 skb->len > 2500)) {
2032 int ret;
2033
2034 ret = skb_checksum_help(skb);
2035 if (ret)
2036 return ret;
2037 }
2038
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002039 rq = skb->queue_mapping;
2040 tx_queue = priv->tx_queue[rq];
2041 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002042 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002043 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002044
2045 /* check if time stamp should be generated */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002046 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002047 priv->hwts_tx_en)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002048 do_tstamp = 1;
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002049 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2050 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002051
Li Yang5b28bea2009-03-27 15:54:30 -07002052 /* make space for additional header when fcb is needed */
2053 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
Jesse Grosseab6d182010-10-20 13:56:03 +00002054 vlan_tx_tag_present(skb) ||
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002055 unlikely(do_tstamp)) &&
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002056 (skb_headroom(skb) < fcb_length)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002057 struct sk_buff *skb_new;
2058
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002059 skb_new = skb_realloc_headroom(skb, fcb_length);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002060 if (!skb_new) {
2061 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07002062 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002063 return NETDEV_TX_OK;
2064 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002065
2066 /* Steal sock reference for processing TX time stamps */
2067 swap(skb_new->sk, skb->sk);
2068 swap(skb_new->destructor, skb->destructor);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002069 kfree_skb(skb);
2070 skb = skb_new;
2071 }
2072
Dai Haruki4669bc92008-12-17 16:51:04 -08002073 /* total number of fragments in the SKB */
2074 nr_frags = skb_shinfo(skb)->nr_frags;
2075
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002076 /* calculate the required number of TxBDs for this skb */
2077 if (unlikely(do_tstamp))
2078 nr_txbds = nr_frags + 2;
2079 else
2080 nr_txbds = nr_frags + 1;
2081
Dai Haruki4669bc92008-12-17 16:51:04 -08002082 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002083 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002084 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002085 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002086 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002087 return NETDEV_TX_BUSY;
2088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090 /* Update transmit stats */
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002091 tx_queue->stats.tx_bytes += skb->len;
2092 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002094 txbdp = txbdp_start = tx_queue->cur_tx;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002095 lstatus = txbdp->lstatus;
2096
2097 /* Time stamp insertion requires one additional TxBD */
2098 if (unlikely(do_tstamp))
2099 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2100 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
Dai Haruki4669bc92008-12-17 16:51:04 -08002102 if (nr_frags == 0) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002103 if (unlikely(do_tstamp))
2104 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2105 TXBD_INTERRUPT);
2106 else
2107 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Dai Haruki4669bc92008-12-17 16:51:04 -08002108 } else {
2109 /* Place the fragment addresses and lengths into the TxBDs */
2110 for (i = 0; i < nr_frags; i++) {
2111 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002112 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Dai Haruki4669bc92008-12-17 16:51:04 -08002114 length = skb_shinfo(skb)->frags[i].size;
2115
2116 lstatus = txbdp->lstatus | length |
2117 BD_LFLAG(TXBD_READY);
2118
2119 /* Handle the last BD specially */
2120 if (i == nr_frags - 1)
2121 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2122
Ian Campbell2234a722011-08-29 23:18:29 +00002123 bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2124 &skb_shinfo(skb)->frags[i],
2125 0,
2126 length,
2127 DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08002128
2129 /* set the TxBD length and buffer pointer */
2130 txbdp->bufPtr = bufaddr;
2131 txbdp->lstatus = lstatus;
2132 }
2133
2134 lstatus = txbdp_start->lstatus;
2135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002137 /* Add TxPAL between FCB and frame if required */
2138 if (unlikely(do_tstamp)) {
2139 skb_push(skb, GMAC_TXPAL_LEN);
2140 memset(skb->data, 0, GMAC_TXPAL_LEN);
2141 }
2142
Kumar Gala0bbaf062005-06-20 10:54:21 -05002143 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08002144 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002145 fcb = gfar_add_fcb(skb);
Alex Dubov4363c2f2011-03-16 17:57:13 +00002146 /* as specified by errata */
2147 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2148 && ((unsigned long)fcb % 0x20) > 0x18)) {
2149 __skb_pull(skb, GMAC_FCB_LEN);
2150 skb_checksum_help(skb);
2151 } else {
2152 lstatus |= BD_LFLAG(TXBD_TOE);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002153 gfar_tx_checksum(skb, fcb, fcb_length);
Alex Dubov4363c2f2011-03-16 17:57:13 +00002154 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002155 }
2156
Jesse Grosseab6d182010-10-20 13:56:03 +00002157 if (vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002158 if (unlikely(NULL == fcb)) {
2159 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08002160 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002161 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002162
2163 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002164 }
2165
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002166 /* Setup tx hardware time stamping if requested */
2167 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002168 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002169 if (fcb == NULL)
2170 fcb = gfar_add_fcb(skb);
2171 fcb->ptp = 1;
2172 lstatus |= BD_LFLAG(TXBD_TOE);
2173 }
2174
Kumar Gala48268572009-03-18 23:28:22 -07002175 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Dai Haruki4669bc92008-12-17 16:51:04 -08002176 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002178 /*
2179 * If time stamping is requested one additional TxBD must be set up. The
2180 * first TxBD points to the FCB and must have a data length of
2181 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2182 * the full frame length.
2183 */
2184 if (unlikely(do_tstamp)) {
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002185 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002186 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002187 (skb_headlen(skb) - fcb_length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002188 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2189 } else {
2190 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002193 netdev_tx_sent_queue(txq, skb->len);
2194
Dai Haruki4669bc92008-12-17 16:51:04 -08002195 /*
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002196 * We can work in parallel with gfar_clean_tx_ring(), except
2197 * when modifying num_txbdfree. Note that we didn't grab the lock
2198 * when we were reading the num_txbdfree and checking for available
2199 * space, that's because outside of this function it can only grow,
2200 * and once we've got needed space, it cannot suddenly disappear.
2201 *
2202 * The lock also protects us from gfar_error(), which can modify
2203 * regs->tstat and thus retrigger the transfers, which is why we
2204 * also must grab the lock before setting ready bit for the first
2205 * to be transmitted BD.
2206 */
2207 spin_lock_irqsave(&tx_queue->txlock, flags);
2208
2209 /*
Dai Haruki4669bc92008-12-17 16:51:04 -08002210 * The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002211 * semantics (it requires synchronization between cacheable and
2212 * uncacheable mappings, which eieio doesn't provide and which we
2213 * don't need), thus requiring a more expensive sync instruction. At
2214 * some point, the set of architecture-independent barrier functions
2215 * should be expanded to include weaker barriers.
2216 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002217 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002218
Dai Haruki4669bc92008-12-17 16:51:04 -08002219 txbdp_start->lstatus = lstatus;
2220
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002221 eieio(); /* force lstatus write before tx_skbuff */
2222
2223 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2224
Dai Haruki4669bc92008-12-17 16:51:04 -08002225 /* Update the current skb pointer to the next entry we will use
2226 * (wrapping if necessary) */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002227 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2228 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002229
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002230 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002231
2232 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002233 tx_queue->num_txbdfree -= (nr_txbds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
2235 /* If the next BD still needs to be cleaned up, then the bds
2236 are full. We need to tell the kernel to stop sending us stuff. */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002237 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002238 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002240 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 }
2242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002244 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
2246 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002247 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002249 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250}
2251
2252/* Stops the kernel queue, and halts the controller */
2253static int gfar_close(struct net_device *dev)
2254{
2255 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002256
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002257 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002258
Sebastian Siewiorab939902008-08-19 21:12:45 +02002259 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 stop_gfar(dev);
2261
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002262 /* Disconnect from the PHY */
2263 phy_disconnect(priv->phydev);
2264 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002266 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
2268 return 0;
2269}
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002272static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002274 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
2276 return 0;
2277}
2278
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002279/* Check if rx parser should be activated */
2280void gfar_check_rx_parser_mode(struct gfar_private *priv)
2281{
2282 struct gfar __iomem *regs;
2283 u32 tempval;
2284
2285 regs = priv->gfargrp[0].regs;
2286
2287 tempval = gfar_read(&regs->rctrl);
2288 /* If parse is no longer required, then disable parser */
2289 if (tempval & RCTRL_REQ_PARSER)
2290 tempval |= RCTRL_PRSDEP_INIT;
2291 else
2292 tempval &= ~RCTRL_PRSDEP_INIT;
2293 gfar_write(&regs->rctrl, tempval);
2294}
2295
Kumar Gala0bbaf062005-06-20 10:54:21 -05002296/* Enables and disables VLAN insertion/extraction */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002297void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002298{
2299 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002300 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002301 unsigned long flags;
2302 u32 tempval;
2303
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002304 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002305 local_irq_save(flags);
2306 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002307
Jiri Pirko87c288c2011-07-20 04:54:19 +00002308 if (features & NETIF_F_HW_VLAN_TX) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002309 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002310 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002311 tempval |= TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002312 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002313 } else {
2314 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002315 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002316 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002317 gfar_write(&regs->tctrl, tempval);
Jiri Pirko87c288c2011-07-20 04:54:19 +00002318 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002319
Jiri Pirko87c288c2011-07-20 04:54:19 +00002320 if (features & NETIF_F_HW_VLAN_RX) {
2321 /* Enable VLAN tag extraction */
2322 tempval = gfar_read(&regs->rctrl);
2323 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2324 gfar_write(&regs->rctrl, tempval);
2325 } else {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002326 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002327 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002328 tempval &= ~RCTRL_VLEX;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002329 gfar_write(&regs->rctrl, tempval);
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002330
2331 gfar_check_rx_parser_mode(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002332 }
2333
Dai Haruki77ecaf22008-12-16 15:30:48 -08002334 gfar_change_mtu(dev, dev->mtu);
2335
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002336 unlock_rx_qs(priv);
2337 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002338}
2339
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2341{
2342 int tempsize, tempval;
2343 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002344 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002346 int frame_size = new_mtu + ETH_HLEN;
2347
Jiri Pirko87c288c2011-07-20 04:54:19 +00002348 if (gfar_is_vlan_on(priv))
Dai Harukifaa89572008-03-24 10:53:26 -05002349 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002350
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Joe Perches59deab22011-06-14 08:57:47 +00002352 netif_err(priv, drv, dev, "Invalid MTU setting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 return -EINVAL;
2354 }
2355
Dai Haruki77ecaf22008-12-16 15:30:48 -08002356 if (gfar_uses_fcb(priv))
2357 frame_size += GMAC_FCB_LEN;
2358
2359 frame_size += priv->padding;
2360
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 tempsize =
2362 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2363 INCREMENTAL_BUFFER_SIZE;
2364
2365 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06002366 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2368 stop_gfar(dev);
2369
2370 priv->rx_buffer_size = tempsize;
2371
2372 dev->mtu = new_mtu;
2373
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002374 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2375 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
2377 /* If the mtu is larger than the max size for standard
2378 * ethernet frames (ie, a jumbo frame), then set maccfg2
2379 * to allow huge frames, and to check the length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002380 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Anton Vorontsov7d350972010-06-30 06:39:12 +00002382 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2383 gfar_has_errata(priv, GFAR_ERRATA_74))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2385 else
2386 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2387
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002388 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
2390 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2391 startup_gfar(dev);
2392
2393 return 0;
2394}
2395
Sebastian Siewiorab939902008-08-19 21:12:45 +02002396/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 * transmitted after a set amount of time.
2398 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002399 * starting over will fix the problem.
2400 */
2401static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002403 struct gfar_private *priv = container_of(work, struct gfar_private,
2404 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002405 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
2407 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002408 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 stop_gfar(dev);
2410 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002411 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 }
2413
David S. Miller263ba322008-07-15 03:47:41 -07002414 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415}
2416
Sebastian Siewiorab939902008-08-19 21:12:45 +02002417static void gfar_timeout(struct net_device *dev)
2418{
2419 struct gfar_private *priv = netdev_priv(dev);
2420
2421 dev->stats.tx_errors++;
2422 schedule_work(&priv->reset_task);
2423}
2424
Eran Libertyacbc0f02010-07-07 15:54:54 -07002425static void gfar_align_skb(struct sk_buff *skb)
2426{
2427 /* We need the data buffer to be aligned properly. We will reserve
2428 * as many bytes as needed to align the data properly
2429 */
2430 skb_reserve(skb, RXBUF_ALIGNMENT -
2431 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2432}
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002435static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002437 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002438 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002439 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002440 struct gfar_priv_rx_q *rx_queue = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002441 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002442 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002443 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002444 struct sk_buff *skb;
2445 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002446 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002447 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002448 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002449 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002450 int tqi = tx_queue->qindex;
2451 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002452 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002453 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002455 rx_queue = priv->rx_queue[tqi];
2456 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002457 bdp = tx_queue->dirty_tx;
2458 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002459
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002460 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002461 unsigned long flags;
2462
Dai Haruki4669bc92008-12-17 16:51:04 -08002463 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002464
2465 /*
2466 * When time stamping, one additional TxBD must be freed.
2467 * Also, we need to dma_unmap_single() the TxPAL.
2468 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002469 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002470 nr_txbds = frags + 2;
2471 else
2472 nr_txbds = frags + 1;
2473
2474 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002475
2476 lstatus = lbdp->lstatus;
2477
2478 /* Only clean completed frames */
2479 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2480 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 break;
2482
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002483 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002484 next = next_txbd(bdp, base, tx_ring_size);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002485 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002486 } else
2487 buflen = bdp->length;
2488
2489 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2490 buflen, DMA_TO_DEVICE);
2491
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002492 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002493 struct skb_shared_hwtstamps shhwtstamps;
2494 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2495 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2496 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002497 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002498 skb_tstamp_tx(skb, &shhwtstamps);
2499 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2500 bdp = next;
2501 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002502
2503 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2504 bdp = next_txbd(bdp, base, tx_ring_size);
2505
2506 for (i = 0; i < frags; i++) {
Kumar Gala48268572009-03-18 23:28:22 -07002507 dma_unmap_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002508 bdp->bufPtr,
2509 bdp->length,
2510 DMA_TO_DEVICE);
2511 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2512 bdp = next_txbd(bdp, base, tx_ring_size);
2513 }
2514
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002515 bytes_sent += skb->len;
2516
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002517 /*
2518 * If there's room in the queue (limit it to rx_buffer_size)
2519 * we add this skb back into the pool, if it's the right size
2520 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002521 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002522 skb_recycle_check(skb, priv->rx_buffer_size +
Eran Libertyacbc0f02010-07-07 15:54:54 -07002523 RXBUF_ALIGNMENT)) {
2524 gfar_align_skb(skb);
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002525 skb_queue_head(&priv->rx_recycle, skb);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002526 } else
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002527 dev_kfree_skb_any(skb);
2528
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002529 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002530
2531 skb_dirtytx = (skb_dirtytx + 1) &
2532 TX_RING_MOD_MASK(tx_ring_size);
2533
Dai Harukid080cd62008-04-09 19:37:51 -05002534 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002535 spin_lock_irqsave(&tx_queue->txlock, flags);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002536 tx_queue->num_txbdfree += nr_txbds;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002537 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Dai Haruki4669bc92008-12-17 16:51:04 -08002540 /* If we freed a buffer, we can restart transmission, if necessary */
Paul Gortmaker5407b14c2012-03-18 17:11:22 -04002541 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002542 netif_wake_subqueue(dev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Dai Haruki4669bc92008-12-17 16:51:04 -08002544 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002545 tx_queue->skb_dirtytx = skb_dirtytx;
2546 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002548 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2549
Dai Harukid080cd62008-04-09 19:37:51 -05002550 return howmany;
2551}
2552
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002553static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002554{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002555 unsigned long flags;
2556
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002557 spin_lock_irqsave(&gfargrp->grplock, flags);
2558 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002559 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002560 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002561 } else {
2562 /*
2563 * Clear IEVENT, so interrupts aren't called again
2564 * because of the packets that have already arrived.
2565 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002566 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002567 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002568 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002569
Dai Haruki8c7396a2008-12-17 16:52:00 -08002570}
2571
Dai Harukid080cd62008-04-09 19:37:51 -05002572/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002573static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002574{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002575 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 return IRQ_HANDLED;
2577}
2578
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002579static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -05002580 struct sk_buff *skb)
2581{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002582 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002583 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002584 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002585
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002586 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2587 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002588 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002589}
2590
Eran Libertyacbc0f02010-07-07 15:54:54 -07002591static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2592{
2593 struct gfar_private *priv = netdev_priv(dev);
2594 struct sk_buff *skb = NULL;
2595
2596 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2597 if (!skb)
2598 return NULL;
2599
2600 gfar_align_skb(skb);
2601
2602 return skb;
2603}
Andy Fleming815b97c2008-04-22 17:18:29 -05002604
2605struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
2607 struct gfar_private *priv = netdev_priv(dev);
2608 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002610 skb = skb_dequeue(&priv->rx_recycle);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002611 if (!skb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002612 skb = gfar_alloc_skb(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 return skb;
2615}
2616
Li Yang298e1a92007-10-16 14:18:13 +08002617static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618{
Li Yang298e1a92007-10-16 14:18:13 +08002619 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002620 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 struct gfar_extra_stats *estats = &priv->extra_stats;
2622
2623 /* If the packet was truncated, none of the other errors
2624 * matter */
2625 if (status & RXBD_TRUNCATED) {
2626 stats->rx_length_errors++;
2627
2628 estats->rx_trunc++;
2629
2630 return;
2631 }
2632 /* Count the errors, if there were any */
2633 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2634 stats->rx_length_errors++;
2635
2636 if (status & RXBD_LARGE)
2637 estats->rx_large++;
2638 else
2639 estats->rx_short++;
2640 }
2641 if (status & RXBD_NONOCTET) {
2642 stats->rx_frame_errors++;
2643 estats->rx_nonoctet++;
2644 }
2645 if (status & RXBD_CRCERR) {
2646 estats->rx_crcerr++;
2647 stats->rx_crc_errors++;
2648 }
2649 if (status & RXBD_OVERRUN) {
2650 estats->rx_overrun++;
2651 stats->rx_crc_errors++;
2652 }
2653}
2654
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002655irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002657 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 return IRQ_HANDLED;
2659}
2660
Kumar Gala0bbaf062005-06-20 10:54:21 -05002661static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2662{
2663 /* If valid headers were found, and valid sums
2664 * were verified, then we tell the kernel that no
2665 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002666 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002667 skb->ip_summed = CHECKSUM_UNNECESSARY;
2668 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002669 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002670}
2671
2672
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673/* gfar_process_frame() -- handle one incoming packet if skb
2674 * isn't NULL. */
2675static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002676 int amount_pull, struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677{
2678 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002679 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002681 gro_result_t ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002682
Dai Haruki2c2db482008-12-16 15:31:15 -08002683 /* fcb is at the beginning if exists */
2684 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685
Dai Haruki2c2db482008-12-16 15:31:15 -08002686 /* Remove the FCB from the skb */
2687 /* Remove the padded bytes, if there are any */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002688 if (amount_pull) {
2689 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002690 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002691 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002692
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002693 /* Get receive timestamp from the skb */
2694 if (priv->hwts_rx_en) {
2695 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2696 u64 *ns = (u64 *) skb->data;
2697 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2698 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2699 }
2700
2701 if (priv->padding)
2702 skb_pull(skb, priv->padding);
2703
Michał Mirosław8b3afe92011-04-15 04:50:50 +00002704 if (dev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08002705 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002706
Dai Haruki2c2db482008-12-16 15:31:15 -08002707 /* Tell the skb what kind of packet this is */
2708 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002709
David S. Miller823dcd22011-08-20 10:39:12 -07002710 /*
2711 * There's need to check for NETIF_F_HW_VLAN_RX here.
2712 * Even if vlan rx accel is disabled, on some chips
2713 * RXFCB_VLN is pseudo randomly set.
2714 */
2715 if (dev->features & NETIF_F_HW_VLAN_RX &&
2716 fcb->flags & RXFCB_VLN)
Jiri Pirko87c288c2011-07-20 04:54:19 +00002717 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2718
Dai Haruki2c2db482008-12-16 15:31:15 -08002719 /* Send the packet up the stack */
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002720 ret = napi_gro_receive(napi, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002722 if (GRO_DROP == ret)
Dai Haruki2c2db482008-12-16 15:31:15 -08002723 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
2725 return 0;
2726}
2727
2728/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05002729 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 * of frames handled
2731 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002732int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002734 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002735 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002737 int pkt_len;
2738 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 int howmany = 0;
2740 struct gfar_private *priv = netdev_priv(dev);
2741
2742 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002743 bdp = rx_queue->cur_rx;
2744 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002746 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
Dai Haruki2c2db482008-12-16 15:31:15 -08002747
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002749 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05002750 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002751
2752 /* Add another skb for the future */
2753 newskb = gfar_new_skb(dev);
2754
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002755 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756
Kumar Gala48268572009-03-18 23:28:22 -07002757 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Andy Fleming81183052008-11-12 10:07:11 -06002758 priv->rx_buffer_size, DMA_FROM_DEVICE);
2759
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002760 if (unlikely(!(bdp->status & RXBD_ERR) &&
2761 bdp->length > priv->rx_buffer_size))
2762 bdp->status = RXBD_LARGE;
2763
Andy Fleming815b97c2008-04-22 17:18:29 -05002764 /* We drop the frame if we failed to allocate a new buffer */
2765 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2766 bdp->status & RXBD_ERR)) {
2767 count_errors(bdp->status, dev);
2768
2769 if (unlikely(!newskb))
2770 newskb = skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002771 else if (skb)
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002772 skb_queue_head(&priv->rx_recycle, skb);
Andy Fleming815b97c2008-04-22 17:18:29 -05002773 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002775 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 howmany++;
2777
Dai Haruki2c2db482008-12-16 15:31:15 -08002778 if (likely(skb)) {
2779 pkt_len = bdp->length - ETH_FCS_LEN;
2780 /* Remove the FCS from the packet length */
2781 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002782 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002783 skb_record_rx_queue(skb, rx_queue->qindex);
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002784 gfar_process_frame(dev, skb, amount_pull,
2785 &rx_queue->grp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
Dai Haruki2c2db482008-12-16 15:31:15 -08002787 } else {
Joe Perches59deab22011-06-14 08:57:47 +00002788 netif_warn(priv, rx_err, dev, "Missing skb!\n");
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002789 rx_queue->stats.rx_dropped++;
Dai Haruki2c2db482008-12-16 15:31:15 -08002790 priv->extra_stats.rx_skbmissing++;
2791 }
2792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 }
2794
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002795 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796
Andy Fleming815b97c2008-04-22 17:18:29 -05002797 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002798 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002801 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
2803 /* update to point at the next skb */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002804 rx_queue->skb_currx =
2805 (rx_queue->skb_currx + 1) &
2806 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 }
2808
2809 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002810 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 return howmany;
2813}
2814
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002815static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002817 struct gfar_priv_grp *gfargrp = container_of(napi,
2818 struct gfar_priv_grp, napi);
2819 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002820 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002821 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002822 struct gfar_priv_rx_q *rx_queue = NULL;
2823 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002824 int tx_cleaned = 0, i, left_over_budget = budget;
2825 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002826 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002827
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002828 num_queues = gfargrp->num_rx_queues;
2829 budget_per_queue = budget/num_queues;
2830
Dai Haruki8c7396a2008-12-17 16:52:00 -08002831 /* Clear IEVENT, so interrupts aren't called again
2832 * because of the packets that have already arrived */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002833 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002834
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002835 while (num_queues && left_over_budget) {
2836
2837 budget_per_queue = left_over_budget/num_queues;
2838 left_over_budget = 0;
2839
Akinobu Mita984b3f52010-03-05 13:41:37 -08002840 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002841 if (test_bit(i, &serviced_queues))
2842 continue;
2843 rx_queue = priv->rx_queue[i];
2844 tx_queue = priv->tx_queue[rx_queue->qindex];
2845
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002846 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002847 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2848 budget_per_queue);
2849 rx_cleaned += rx_cleaned_per_queue;
2850 if(rx_cleaned_per_queue < budget_per_queue) {
2851 left_over_budget = left_over_budget +
2852 (budget_per_queue - rx_cleaned_per_queue);
2853 set_bit(i, &serviced_queues);
2854 num_queues--;
2855 }
2856 }
Dai Harukid080cd62008-04-09 19:37:51 -05002857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858
Andy Fleming42199882008-12-17 16:52:30 -08002859 if (tx_cleaned)
2860 return budget;
2861
2862 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002863 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
2865 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002866 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002868 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
2870 /* If we are coalescing interrupts, update the timer */
2871 /* Otherwise, clear it */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002872 gfar_configure_coalescing(priv,
2873 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 }
2875
Andy Fleming42199882008-12-17 16:52:30 -08002876 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002879#ifdef CONFIG_NET_POLL_CONTROLLER
2880/*
2881 * Polling 'interrupt' - used by things like netconsole to send skbs
2882 * without having to re-enable interrupts. It's not called while
2883 * the interrupt routine is executing.
2884 */
2885static void gfar_netpoll(struct net_device *dev)
2886{
2887 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002888 int i = 0;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002889
2890 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002891 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002892 for (i = 0; i < priv->num_grps; i++) {
2893 disable_irq(priv->gfargrp[i].interruptTransmit);
2894 disable_irq(priv->gfargrp[i].interruptReceive);
2895 disable_irq(priv->gfargrp[i].interruptError);
2896 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2897 &priv->gfargrp[i]);
2898 enable_irq(priv->gfargrp[i].interruptError);
2899 enable_irq(priv->gfargrp[i].interruptReceive);
2900 enable_irq(priv->gfargrp[i].interruptTransmit);
2901 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002902 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002903 for (i = 0; i < priv->num_grps; i++) {
2904 disable_irq(priv->gfargrp[i].interruptTransmit);
2905 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2906 &priv->gfargrp[i]);
2907 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002908 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002909 }
2910}
2911#endif
2912
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002914static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002916 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917
2918 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002919 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002922 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002923 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
2925 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002926 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002927 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002929 /* Check for errors */
2930 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002931 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932
2933 return IRQ_HANDLED;
2934}
2935
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936/* Called every time the controller might need to be made
2937 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002938 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 * function converts those variables into the appropriate
2940 * register values, and can bring down the device if needed.
2941 */
2942static void adjust_link(struct net_device *dev)
2943{
2944 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002945 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002946 unsigned long flags;
2947 struct phy_device *phydev = priv->phydev;
2948 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002950 local_irq_save(flags);
2951 lock_tx_qs(priv);
2952
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002953 if (phydev->link) {
2954 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002955 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002956
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 /* Now we make sure that we can be in full duplex mode.
2958 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002959 if (phydev->duplex != priv->oldduplex) {
2960 new_state = 1;
2961 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002963 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002966 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 }
2968
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002969 if (phydev->speed != priv->oldspeed) {
2970 new_state = 1;
2971 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 tempval =
2974 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002975
2976 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 break;
2978 case 100:
2979 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 tempval =
2981 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002982
2983 /* Reduced mode distinguishes
2984 * between 10 and 100 */
2985 if (phydev->speed == SPEED_100)
2986 ecntrl |= ECNTRL_R100;
2987 else
2988 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 break;
2990 default:
Joe Perches59deab22011-06-14 08:57:47 +00002991 netif_warn(priv, link, dev,
2992 "Ack! Speed (%d) is not 10/100/1000!\n",
2993 phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 break;
2995 }
2996
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002997 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 }
2999
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003000 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003001 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003002
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003004 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003007 } else if (priv->oldlink) {
3008 new_state = 1;
3009 priv->oldlink = 0;
3010 priv->oldspeed = 0;
3011 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003014 if (new_state && netif_msg_link(priv))
3015 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003016 unlock_tx_qs(priv);
3017 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003018}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019
3020/* Update the hash table based on the current list of multicast
3021 * addresses we subscribe to. Also, change the promiscuity of
3022 * the device based on the flags (this function is called
3023 * whenever dev->flags is changed */
3024static void gfar_set_multi(struct net_device *dev)
3025{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003026 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003028 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 u32 tempval;
3030
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003031 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 /* Set RCTRL to PROM */
3033 tempval = gfar_read(&regs->rctrl);
3034 tempval |= RCTRL_PROM;
3035 gfar_write(&regs->rctrl, tempval);
3036 } else {
3037 /* Set RCTRL to not PROM */
3038 tempval = gfar_read(&regs->rctrl);
3039 tempval &= ~(RCTRL_PROM);
3040 gfar_write(&regs->rctrl, tempval);
3041 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003042
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003043 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003045 gfar_write(&regs->igaddr0, 0xffffffff);
3046 gfar_write(&regs->igaddr1, 0xffffffff);
3047 gfar_write(&regs->igaddr2, 0xffffffff);
3048 gfar_write(&regs->igaddr3, 0xffffffff);
3049 gfar_write(&regs->igaddr4, 0xffffffff);
3050 gfar_write(&regs->igaddr5, 0xffffffff);
3051 gfar_write(&regs->igaddr6, 0xffffffff);
3052 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 gfar_write(&regs->gaddr0, 0xffffffff);
3054 gfar_write(&regs->gaddr1, 0xffffffff);
3055 gfar_write(&regs->gaddr2, 0xffffffff);
3056 gfar_write(&regs->gaddr3, 0xffffffff);
3057 gfar_write(&regs->gaddr4, 0xffffffff);
3058 gfar_write(&regs->gaddr5, 0xffffffff);
3059 gfar_write(&regs->gaddr6, 0xffffffff);
3060 gfar_write(&regs->gaddr7, 0xffffffff);
3061 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003062 int em_num;
3063 int idx;
3064
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003066 gfar_write(&regs->igaddr0, 0x0);
3067 gfar_write(&regs->igaddr1, 0x0);
3068 gfar_write(&regs->igaddr2, 0x0);
3069 gfar_write(&regs->igaddr3, 0x0);
3070 gfar_write(&regs->igaddr4, 0x0);
3071 gfar_write(&regs->igaddr5, 0x0);
3072 gfar_write(&regs->igaddr6, 0x0);
3073 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074 gfar_write(&regs->gaddr0, 0x0);
3075 gfar_write(&regs->gaddr1, 0x0);
3076 gfar_write(&regs->gaddr2, 0x0);
3077 gfar_write(&regs->gaddr3, 0x0);
3078 gfar_write(&regs->gaddr4, 0x0);
3079 gfar_write(&regs->gaddr5, 0x0);
3080 gfar_write(&regs->gaddr6, 0x0);
3081 gfar_write(&regs->gaddr7, 0x0);
3082
Andy Fleming7f7f5312005-11-11 12:38:59 -06003083 /* If we have extended hash tables, we need to
3084 * clear the exact match registers to prepare for
3085 * setting them */
3086 if (priv->extended_hash) {
3087 em_num = GFAR_EM_NUM + 1;
3088 gfar_clear_exact_match(dev);
3089 idx = 1;
3090 } else {
3091 idx = 0;
3092 em_num = 0;
3093 }
3094
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003095 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 return;
3097
3098 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003099 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003100 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003101 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003102 idx++;
3103 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003104 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 }
3106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107}
3108
Andy Fleming7f7f5312005-11-11 12:38:59 -06003109
3110/* Clears each of the exact match registers to zero, so they
3111 * don't interfere with normal reception */
3112static void gfar_clear_exact_match(struct net_device *dev)
3113{
3114 int idx;
Joe Perches6a3c9102011-11-16 09:38:02 +00003115 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003116
3117 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003118 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003119}
3120
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121/* Set the appropriate hash bit for the given addr */
3122/* The algorithm works like so:
3123 * 1) Take the Destination Address (ie the multicast address), and
3124 * do a CRC on it (little endian), and reverse the bits of the
3125 * result.
3126 * 2) Use the 8 most significant bits as a hash into a 256-entry
3127 * table. The table is controlled through 8 32-bit registers:
3128 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3129 * gaddr7. This means that the 3 most significant bits in the
3130 * hash index which gaddr register to use, and the 5 other bits
3131 * indicate which bit (assuming an IBM numbering scheme, which
3132 * for PowerPC (tm) is usually the case) in the register holds
3133 * the entry. */
3134static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3135{
3136 u32 tempval;
3137 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c9102011-11-16 09:38:02 +00003138 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003139 int width = priv->hash_width;
3140 u8 whichbit = (result >> (32 - width)) & 0x1f;
3141 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 u32 value = (1 << (31-whichbit));
3143
Kumar Gala0bbaf062005-06-20 10:54:21 -05003144 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003146 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147}
3148
Andy Fleming7f7f5312005-11-11 12:38:59 -06003149
3150/* There are multiple MAC Address register pairs on some controllers
3151 * This function sets the numth pair to a given address
3152 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003153static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3154 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003155{
3156 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003157 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003158 int idx;
Joe Perches6a3c9102011-11-16 09:38:02 +00003159 char tmpbuf[ETH_ALEN];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003160 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003161 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003162
3163 macptr += num*2;
3164
3165 /* Now copy it into the mac registers backwards, cuz */
3166 /* little endian is silly */
Joe Perches6a3c9102011-11-16 09:38:02 +00003167 for (idx = 0; idx < ETH_ALEN; idx++)
3168 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003169
3170 gfar_write(macptr, *((u32 *) (tmpbuf)));
3171
3172 tempval = *((u32 *) (tmpbuf + 4));
3173
3174 gfar_write(macptr+1, tempval);
3175}
3176
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003178static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003180 struct gfar_priv_grp *gfargrp = grp_id;
3181 struct gfar __iomem *regs = gfargrp->regs;
3182 struct gfar_private *priv= gfargrp->priv;
3183 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184
3185 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003186 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
3188 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003189 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003190
3191 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003192 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003193 (events & IEVENT_MAG))
3194 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195
3196 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003197 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Joe Perches59deab22011-06-14 08:57:47 +00003198 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3199 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200
3201 /* Update the error counters */
3202 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003203 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204
3205 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003206 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003208 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003210 unsigned long flags;
3211
Joe Perches59deab22011-06-14 08:57:47 +00003212 netif_dbg(priv, tx_err, dev,
3213 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003214 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 priv->extra_stats.tx_underrun++;
3216
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003217 local_irq_save(flags);
3218 lock_tx_qs(priv);
3219
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003221 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003222
3223 unlock_tx_qs(priv);
3224 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 }
Joe Perches59deab22011-06-14 08:57:47 +00003226 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 }
3228 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003229 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 priv->extra_stats.rx_bsy++;
3231
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003232 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233
Joe Perches59deab22011-06-14 08:57:47 +00003234 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3235 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 }
3237 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003238 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 priv->extra_stats.rx_babr++;
3240
Joe Perches59deab22011-06-14 08:57:47 +00003241 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 }
3243 if (events & IEVENT_EBERR) {
3244 priv->extra_stats.eberr++;
Joe Perches59deab22011-06-14 08:57:47 +00003245 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 }
Joe Perches59deab22011-06-14 08:57:47 +00003247 if (events & IEVENT_RXC)
3248 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249
3250 if (events & IEVENT_BABT) {
3251 priv->extra_stats.tx_babt++;
Joe Perches59deab22011-06-14 08:57:47 +00003252 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 }
3254 return IRQ_HANDLED;
3255}
3256
Andy Flemingb31a1d82008-12-16 15:29:15 -08003257static struct of_device_id gfar_match[] =
3258{
3259 {
3260 .type = "network",
3261 .compatible = "gianfar",
3262 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003263 {
3264 .compatible = "fsl,etsec2",
3265 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003266 {},
3267};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003268MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003269
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003271static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003272 .driver = {
3273 .name = "fsl-gianfar",
3274 .owner = THIS_MODULE,
3275 .pm = GFAR_PM_OPS,
3276 .of_match_table = gfar_match,
3277 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 .probe = gfar_probe,
3279 .remove = gfar_remove,
3280};
3281
Axel Lindb62f682011-11-27 16:44:17 +00003282module_platform_driver(gfar_driver);