blob: d265c6e138730e50bf247aa0722bae73f8ebfcaa [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000013 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000014 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
26 *
27 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050028 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080029 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * last descriptor of the ring.
37 *
38 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050039 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040043 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020045 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050046 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
54 *
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
63 */
64
Joe Perches59deab22011-06-14 08:57:47 +000065#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66#define DEBUG
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/string.h>
70#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040071#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/slab.h>
73#include <linux/interrupt.h>
74#include <linux/init.h>
75#include <linux/delay.h>
76#include <linux/netdevice.h>
77#include <linux/etherdevice.h>
78#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050079#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/spinlock.h>
81#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Anton Vorontsov7d350972010-06-30 06:39:12 +000091#include <asm/reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <asm/irq.h>
93#include <asm/uaccess.h>
94#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/dma-mapping.h>
96#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040097#include <linux/mii.h>
98#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080099#include <linux/phy_fixed.h>
100#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700101#include <linux/of_net.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103#include "gianfar.h"
Andy Fleming1577ece2009-02-04 16:42:12 -0800104#include "fsl_pq_mdio.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#undef BRIEF_GFAR_ERRORS
108#undef VERBOSE_GFAR_ERRORS
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110const char gfar_driver_name[] = "Gianfar Ethernet";
Andy Fleming7f7f5312005-11-11 12:38:59 -0600111const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113static int gfar_enet_open(struct net_device *dev);
114static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200115static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static void gfar_timeout(struct net_device *dev);
117static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500118struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000119static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -0500120 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int gfar_set_mac_address(struct net_device *dev);
122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100123static irqreturn_t gfar_error(int irq, void *dev_id);
124static irqreturn_t gfar_transmit(int irq, void *dev_id);
125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static void adjust_link(struct net_device *dev);
127static void init_registers(struct net_device *dev);
128static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700129static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600130static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400131static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static void gfar_set_multi(struct net_device *dev);
133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500134static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700135static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300136#ifdef CONFIG_NET_POLL_CONTROLLER
137static void gfar_netpoll(struct net_device *dev);
138#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000139int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
140static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800141static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
142 int amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500143static void gfar_vlan_rx_register(struct net_device *netdev,
144 struct vlan_group *grp);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600145void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500146static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600147void gfar_start(struct net_device *dev);
148static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800149static void gfar_set_mac_for_addr(struct net_device *dev, int num,
150 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000151static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153MODULE_AUTHOR("Freescale Semiconductor, Inc");
154MODULE_DESCRIPTION("Gianfar Ethernet Driver");
155MODULE_LICENSE("GPL");
156
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000157static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000158 dma_addr_t buf)
159{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000160 u32 lstatus;
161
162 bdp->bufPtr = buf;
163
164 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000165 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000166 lstatus |= BD_LFLAG(RXBD_WRAP);
167
168 eieio();
169
170 bdp->lstatus = lstatus;
171}
172
Anton Vorontsov87283272009-10-12 06:00:39 +0000173static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000174{
Anton Vorontsov87283272009-10-12 06:00:39 +0000175 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000176 struct gfar_priv_tx_q *tx_queue = NULL;
177 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000178 struct txbd8 *txbdp;
179 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000180 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000181
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000182 for (i = 0; i < priv->num_tx_queues; i++) {
183 tx_queue = priv->tx_queue[i];
184 /* Initialize some variables in our dev structure */
185 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
186 tx_queue->dirty_tx = tx_queue->tx_bd_base;
187 tx_queue->cur_tx = tx_queue->tx_bd_base;
188 tx_queue->skb_curtx = 0;
189 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000190
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000191 /* Initialize Transmit Descriptor Ring */
192 txbdp = tx_queue->tx_bd_base;
193 for (j = 0; j < tx_queue->tx_ring_size; j++) {
194 txbdp->lstatus = 0;
195 txbdp->bufPtr = 0;
196 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000197 }
198
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000199 /* Set the last descriptor in the ring to indicate wrap */
200 txbdp--;
201 txbdp->status |= TXBD_WRAP;
202 }
203
204 for (i = 0; i < priv->num_rx_queues; i++) {
205 rx_queue = priv->rx_queue[i];
206 rx_queue->cur_rx = rx_queue->rx_bd_base;
207 rx_queue->skb_currx = 0;
208 rxbdp = rx_queue->rx_bd_base;
209
210 for (j = 0; j < rx_queue->rx_ring_size; j++) {
211 struct sk_buff *skb = rx_queue->rx_skbuff[j];
212
213 if (skb) {
214 gfar_init_rxbdp(rx_queue, rxbdp,
215 rxbdp->bufPtr);
216 } else {
217 skb = gfar_new_skb(ndev);
218 if (!skb) {
Joe Perches59deab22011-06-14 08:57:47 +0000219 netdev_err(ndev, "Can't allocate RX buffers\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000220 goto err_rxalloc_fail;
221 }
222 rx_queue->rx_skbuff[j] = skb;
223
224 gfar_new_rxbdp(rx_queue, rxbdp, skb);
225 }
226
227 rxbdp++;
228 }
229
Anton Vorontsov87283272009-10-12 06:00:39 +0000230 }
231
232 return 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000233
234err_rxalloc_fail:
235 free_skb_resources(priv);
236 return -ENOMEM;
Anton Vorontsov87283272009-10-12 06:00:39 +0000237}
238
239static int gfar_alloc_skb_resources(struct net_device *ndev)
240{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000241 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000242 dma_addr_t addr;
243 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000244 struct gfar_private *priv = netdev_priv(ndev);
245 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000246 struct gfar_priv_tx_q *tx_queue = NULL;
247 struct gfar_priv_rx_q *rx_queue = NULL;
248
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000249 priv->total_tx_ring_size = 0;
250 for (i = 0; i < priv->num_tx_queues; i++)
251 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
252
253 priv->total_rx_ring_size = 0;
254 for (i = 0; i < priv->num_rx_queues; i++)
255 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000256
257 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000258 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000259 sizeof(struct txbd8) * priv->total_tx_ring_size +
260 sizeof(struct rxbd8) * priv->total_rx_ring_size,
261 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000262 if (!vaddr) {
Joe Perches59deab22011-06-14 08:57:47 +0000263 netif_err(priv, ifup, ndev,
264 "Could not allocate buffer descriptors!\n");
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000265 return -ENOMEM;
266 }
267
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000268 for (i = 0; i < priv->num_tx_queues; i++) {
269 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000270 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000271 tx_queue->tx_bd_dma_base = addr;
272 tx_queue->dev = ndev;
273 /* enet DMA only understands physical addresses */
274 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
275 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
276 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000277
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000278 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000279 for (i = 0; i < priv->num_rx_queues; i++) {
280 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000281 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000282 rx_queue->rx_bd_dma_base = addr;
283 rx_queue->dev = ndev;
284 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
285 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
286 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000287
288 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000289 for (i = 0; i < priv->num_tx_queues; i++) {
290 tx_queue = priv->tx_queue[i];
291 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000292 tx_queue->tx_ring_size, GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000293 if (!tx_queue->tx_skbuff) {
Joe Perches59deab22011-06-14 08:57:47 +0000294 netif_err(priv, ifup, ndev,
295 "Could not allocate tx_skbuff\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000296 goto cleanup;
297 }
298
299 for (k = 0; k < tx_queue->tx_ring_size; k++)
300 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000301 }
302
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000303 for (i = 0; i < priv->num_rx_queues; i++) {
304 rx_queue = priv->rx_queue[i];
305 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000306 rx_queue->rx_ring_size, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000307
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000308 if (!rx_queue->rx_skbuff) {
Joe Perches59deab22011-06-14 08:57:47 +0000309 netif_err(priv, ifup, ndev,
310 "Could not allocate rx_skbuff\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000311 goto cleanup;
312 }
313
314 for (j = 0; j < rx_queue->rx_ring_size; j++)
315 rx_queue->rx_skbuff[j] = NULL;
316 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000317
Anton Vorontsov87283272009-10-12 06:00:39 +0000318 if (gfar_init_bds(ndev))
319 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000320
321 return 0;
322
323cleanup:
324 free_skb_resources(priv);
325 return -ENOMEM;
326}
327
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000328static void gfar_init_tx_rx_base(struct gfar_private *priv)
329{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000330 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000331 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000332 int i;
333
334 baddr = &regs->tbase0;
335 for(i = 0; i < priv->num_tx_queues; i++) {
336 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
337 baddr += 2;
338 }
339
340 baddr = &regs->rbase0;
341 for(i = 0; i < priv->num_rx_queues; i++) {
342 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
343 baddr += 2;
344 }
345}
346
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000347static void gfar_init_mac(struct net_device *ndev)
348{
349 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000350 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000351 u32 rctrl = 0;
352 u32 tctrl = 0;
353 u32 attrs = 0;
354
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000355 /* write the tx/rx base registers */
356 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000357
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000358 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000359 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000360
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000361 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000362 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000363 /* Program the RIR0 reg with the required distribution */
364 gfar_write(&regs->rir0, DEFAULT_RIR0);
365 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000366
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000367 if (ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000368 rctrl |= RCTRL_CHECKSUMMING;
369
370 if (priv->extended_hash) {
371 rctrl |= RCTRL_EXTHASH;
372
373 gfar_clear_exact_match(ndev);
374 rctrl |= RCTRL_EMEN;
375 }
376
377 if (priv->padding) {
378 rctrl &= ~RCTRL_PAL_MASK;
379 rctrl |= RCTRL_PADDING(priv->padding);
380 }
381
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000382 /* Insert receive time stamps into padding alignment bytes */
383 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
384 rctrl &= ~RCTRL_PAL_MASK;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000385 rctrl |= RCTRL_PADDING(8);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000386 priv->padding = 8;
387 }
388
Manfred Rudigier97553f72010-06-11 01:49:05 +0000389 /* Enable HW time stamping if requested from user space */
390 if (priv->hwts_rx_en)
391 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
392
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000393 /* keep vlan related bits if it's enabled */
394 if (priv->vlgrp) {
395 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
396 tctrl |= TCTRL_VLINS;
397 }
398
399 /* Init rctrl based on our settings */
400 gfar_write(&regs->rctrl, rctrl);
401
402 if (ndev->features & NETIF_F_IP_CSUM)
403 tctrl |= TCTRL_INIT_CSUM;
404
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000405 tctrl |= TCTRL_TXSCHED_PRIO;
406
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000407 gfar_write(&regs->tctrl, tctrl);
408
409 /* Set the extraction length and index */
410 attrs = ATTRELI_EL(priv->rx_stash_size) |
411 ATTRELI_EI(priv->rx_stash_index);
412
413 gfar_write(&regs->attreli, attrs);
414
415 /* Start with defaults, and add stashing or locking
416 * depending on the approprate variables */
417 attrs = ATTR_INIT_SETTINGS;
418
419 if (priv->bd_stash_en)
420 attrs |= ATTR_BDSTASH;
421
422 if (priv->rx_stash_size != 0)
423 attrs |= ATTR_BUFSTASH;
424
425 gfar_write(&regs->attr, attrs);
426
427 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
428 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
429 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
430}
431
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000432static struct net_device_stats *gfar_get_stats(struct net_device *dev)
433{
434 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000435 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
436 unsigned long tx_packets = 0, tx_bytes = 0;
437 int i = 0;
438
439 for (i = 0; i < priv->num_rx_queues; i++) {
440 rx_packets += priv->rx_queue[i]->stats.rx_packets;
441 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
442 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
443 }
444
445 dev->stats.rx_packets = rx_packets;
446 dev->stats.rx_bytes = rx_bytes;
447 dev->stats.rx_dropped = rx_dropped;
448
449 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000450 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
451 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000452 }
453
454 dev->stats.tx_bytes = tx_bytes;
455 dev->stats.tx_packets = tx_packets;
456
457 return &dev->stats;
458}
459
Andy Fleming26ccfc32009-03-10 12:58:28 +0000460static const struct net_device_ops gfar_netdev_ops = {
461 .ndo_open = gfar_enet_open,
462 .ndo_start_xmit = gfar_start_xmit,
463 .ndo_stop = gfar_close,
464 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000465 .ndo_set_features = gfar_set_features,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000466 .ndo_set_multicast_list = gfar_set_multi,
467 .ndo_tx_timeout = gfar_timeout,
468 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000469 .ndo_get_stats = gfar_get_stats,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000470 .ndo_vlan_rx_register = gfar_vlan_rx_register,
Ben Hutchings240c1022009-07-09 17:54:35 +0000471 .ndo_set_mac_address = eth_mac_addr,
472 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000473#ifdef CONFIG_NET_POLL_CONTROLLER
474 .ndo_poll_controller = gfar_netpoll,
475#endif
476};
477
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000478void lock_rx_qs(struct gfar_private *priv)
479{
480 int i = 0x0;
481
482 for (i = 0; i < priv->num_rx_queues; i++)
483 spin_lock(&priv->rx_queue[i]->rxlock);
484}
485
486void lock_tx_qs(struct gfar_private *priv)
487{
488 int i = 0x0;
489
490 for (i = 0; i < priv->num_tx_queues; i++)
491 spin_lock(&priv->tx_queue[i]->txlock);
492}
493
494void unlock_rx_qs(struct gfar_private *priv)
495{
496 int i = 0x0;
497
498 for (i = 0; i < priv->num_rx_queues; i++)
499 spin_unlock(&priv->rx_queue[i]->rxlock);
500}
501
502void unlock_tx_qs(struct gfar_private *priv)
503{
504 int i = 0x0;
505
506 for (i = 0; i < priv->num_tx_queues; i++)
507 spin_unlock(&priv->tx_queue[i]->txlock);
508}
509
Andy Fleming7f7f5312005-11-11 12:38:59 -0600510/* Returns 1 if incoming frames use an FCB */
511static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500512{
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000513 return priv->vlgrp || (priv->ndev->features & NETIF_F_RXCSUM) ||
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000514 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500515}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400516
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000517static void free_tx_pointers(struct gfar_private *priv)
518{
519 int i = 0;
520
521 for (i = 0; i < priv->num_tx_queues; i++)
522 kfree(priv->tx_queue[i]);
523}
524
525static void free_rx_pointers(struct gfar_private *priv)
526{
527 int i = 0;
528
529 for (i = 0; i < priv->num_rx_queues; i++)
530 kfree(priv->rx_queue[i]);
531}
532
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000533static void unmap_group_regs(struct gfar_private *priv)
534{
535 int i = 0;
536
537 for (i = 0; i < MAXGROUPS; i++)
538 if (priv->gfargrp[i].regs)
539 iounmap(priv->gfargrp[i].regs);
540}
541
542static void disable_napi(struct gfar_private *priv)
543{
544 int i = 0;
545
546 for (i = 0; i < priv->num_grps; i++)
547 napi_disable(&priv->gfargrp[i].napi);
548}
549
550static void enable_napi(struct gfar_private *priv)
551{
552 int i = 0;
553
554 for (i = 0; i < priv->num_grps; i++)
555 napi_enable(&priv->gfargrp[i].napi);
556}
557
558static int gfar_parse_group(struct device_node *np,
559 struct gfar_private *priv, const char *model)
560{
561 u32 *queue_mask;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000562
Anton Vorontsov7ce97d42010-04-23 07:12:44 +0000563 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000564 if (!priv->gfargrp[priv->num_grps].regs)
565 return -ENOMEM;
566
567 priv->gfargrp[priv->num_grps].interruptTransmit =
568 irq_of_parse_and_map(np, 0);
569
570 /* If we aren't the FEC we have multiple interrupts */
571 if (model && strcasecmp(model, "FEC")) {
572 priv->gfargrp[priv->num_grps].interruptReceive =
573 irq_of_parse_and_map(np, 1);
574 priv->gfargrp[priv->num_grps].interruptError =
575 irq_of_parse_and_map(np,2);
Nicolas Kaiser28cb6cc2010-11-15 10:59:42 +0000576 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
577 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
578 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000579 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000580 }
581
582 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
583 priv->gfargrp[priv->num_grps].priv = priv;
584 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
585 if(priv->mode == MQ_MG_MODE) {
586 queue_mask = (u32 *)of_get_property(np,
587 "fsl,rx-bit-map", NULL);
588 priv->gfargrp[priv->num_grps].rx_bit_map =
589 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
590 queue_mask = (u32 *)of_get_property(np,
591 "fsl,tx-bit-map", NULL);
592 priv->gfargrp[priv->num_grps].tx_bit_map =
593 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
594 } else {
595 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
596 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
597 }
598 priv->num_grps++;
599
600 return 0;
601}
602
Grant Likely2dc11582010-08-06 09:25:50 -0600603static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800604{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800605 const char *model;
606 const char *ctype;
607 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000608 int err = 0, i;
609 struct net_device *dev = NULL;
610 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700611 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000612 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800613 const u32 *stash;
614 const u32 *stash_len;
615 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000616 unsigned int num_tx_qs, num_rx_qs;
617 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800618
619 if (!np || !of_device_is_available(np))
620 return -ENODEV;
621
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000622 /* parse the num of tx and rx queues */
623 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
624 num_tx_qs = tx_queues ? *tx_queues : 1;
625
626 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000627 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
628 num_tx_qs, MAX_TX_QS);
629 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000630 return -EINVAL;
631 }
632
633 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
634 num_rx_qs = rx_queues ? *rx_queues : 1;
635
636 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000637 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
638 num_rx_qs, MAX_RX_QS);
639 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000640 return -EINVAL;
641 }
642
643 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
644 dev = *pdev;
645 if (NULL == dev)
646 return -ENOMEM;
647
648 priv = netdev_priv(dev);
Grant Likely61c7a082010-04-13 16:12:29 -0700649 priv->node = ofdev->dev.of_node;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000650 priv->ndev = dev;
651
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000652 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000653 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000654 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000655 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800656
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700657 /* Init Rx queue filer rule set linked list*/
658 INIT_LIST_HEAD(&priv->rx_list.list);
659 priv->rx_list.count = 0;
660 mutex_init(&priv->rx_queue_access);
661
Andy Flemingb31a1d82008-12-16 15:29:15 -0800662 model = of_get_property(np, "model", NULL);
663
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000664 for (i = 0; i < MAXGROUPS; i++)
665 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800666
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000667 /* Parse and initialize group specific information */
668 if (of_device_is_compatible(np, "fsl,etsec2")) {
669 priv->mode = MQ_MG_MODE;
670 for_each_child_of_node(np, child) {
671 err = gfar_parse_group(child, priv, model);
672 if (err)
673 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800674 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000675 } else {
676 priv->mode = SQ_SG_MODE;
677 err = gfar_parse_group(np, priv, model);
678 if(err)
679 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800680 }
681
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000682 for (i = 0; i < priv->num_tx_queues; i++)
683 priv->tx_queue[i] = NULL;
684 for (i = 0; i < priv->num_rx_queues; i++)
685 priv->rx_queue[i] = NULL;
686
687 for (i = 0; i < priv->num_tx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000688 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
689 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000690 if (!priv->tx_queue[i]) {
691 err = -ENOMEM;
692 goto tx_alloc_failed;
693 }
694 priv->tx_queue[i]->tx_skbuff = NULL;
695 priv->tx_queue[i]->qindex = i;
696 priv->tx_queue[i]->dev = dev;
697 spin_lock_init(&(priv->tx_queue[i]->txlock));
698 }
699
700 for (i = 0; i < priv->num_rx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000701 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
702 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000703 if (!priv->rx_queue[i]) {
704 err = -ENOMEM;
705 goto rx_alloc_failed;
706 }
707 priv->rx_queue[i]->rx_skbuff = NULL;
708 priv->rx_queue[i]->qindex = i;
709 priv->rx_queue[i]->dev = dev;
710 spin_lock_init(&(priv->rx_queue[i]->rxlock));
711 }
712
713
Andy Fleming4d7902f2009-02-04 16:43:44 -0800714 stash = of_get_property(np, "bd-stash", NULL);
715
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000716 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800717 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
718 priv->bd_stash_en = 1;
719 }
720
721 stash_len = of_get_property(np, "rx-stash-len", NULL);
722
723 if (stash_len)
724 priv->rx_stash_size = *stash_len;
725
726 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
727
728 if (stash_idx)
729 priv->rx_stash_index = *stash_idx;
730
731 if (stash_len || stash_idx)
732 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
733
Andy Flemingb31a1d82008-12-16 15:29:15 -0800734 mac_addr = of_get_mac_address(np);
735 if (mac_addr)
736 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
737
738 if (model && !strcasecmp(model, "TSEC"))
739 priv->device_flags =
740 FSL_GIANFAR_DEV_HAS_GIGABIT |
741 FSL_GIANFAR_DEV_HAS_COALESCE |
742 FSL_GIANFAR_DEV_HAS_RMON |
743 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
744 if (model && !strcasecmp(model, "eTSEC"))
745 priv->device_flags =
746 FSL_GIANFAR_DEV_HAS_GIGABIT |
747 FSL_GIANFAR_DEV_HAS_COALESCE |
748 FSL_GIANFAR_DEV_HAS_RMON |
749 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Dai Haruki2c2db482008-12-16 15:31:15 -0800750 FSL_GIANFAR_DEV_HAS_PADDING |
Andy Flemingb31a1d82008-12-16 15:29:15 -0800751 FSL_GIANFAR_DEV_HAS_CSUM |
752 FSL_GIANFAR_DEV_HAS_VLAN |
753 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
Manfred Rudigier97553f72010-06-11 01:49:05 +0000754 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
755 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800756
757 ctype = of_get_property(np, "phy-connection-type", NULL);
758
759 /* We only care about rgmii-id. The rest are autodetected */
760 if (ctype && !strcmp(ctype, "rgmii-id"))
761 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
762 else
763 priv->interface = PHY_INTERFACE_MODE_MII;
764
765 if (of_get_property(np, "fsl,magic-packet", NULL))
766 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
767
Grant Likelyfe192a42009-04-25 12:53:12 +0000768 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800769
770 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000771 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800772
773 return 0;
774
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000775rx_alloc_failed:
776 free_rx_pointers(priv);
777tx_alloc_failed:
778 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000779err_grp_init:
780 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000781 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800782 return err;
783}
784
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000785static int gfar_hwtstamp_ioctl(struct net_device *netdev,
786 struct ifreq *ifr, int cmd)
787{
788 struct hwtstamp_config config;
789 struct gfar_private *priv = netdev_priv(netdev);
790
791 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
792 return -EFAULT;
793
794 /* reserved for future extensions */
795 if (config.flags)
796 return -EINVAL;
797
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000798 switch (config.tx_type) {
799 case HWTSTAMP_TX_OFF:
800 priv->hwts_tx_en = 0;
801 break;
802 case HWTSTAMP_TX_ON:
803 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
804 return -ERANGE;
805 priv->hwts_tx_en = 1;
806 break;
807 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000808 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000809 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000810
811 switch (config.rx_filter) {
812 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000813 if (priv->hwts_rx_en) {
814 stop_gfar(netdev);
815 priv->hwts_rx_en = 0;
816 startup_gfar(netdev);
817 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000818 break;
819 default:
820 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
821 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000822 if (!priv->hwts_rx_en) {
823 stop_gfar(netdev);
824 priv->hwts_rx_en = 1;
825 startup_gfar(netdev);
826 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000827 config.rx_filter = HWTSTAMP_FILTER_ALL;
828 break;
829 }
830
831 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
832 -EFAULT : 0;
833}
834
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000835/* Ioctl MII Interface */
836static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
837{
838 struct gfar_private *priv = netdev_priv(dev);
839
840 if (!netif_running(dev))
841 return -EINVAL;
842
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000843 if (cmd == SIOCSHWTSTAMP)
844 return gfar_hwtstamp_ioctl(dev, rq, cmd);
845
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000846 if (!priv->phydev)
847 return -ENODEV;
848
Richard Cochran28b04112010-07-17 08:48:55 +0000849 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000850}
851
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000852static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
853{
854 unsigned int new_bit_map = 0x0;
855 int mask = 0x1 << (max_qs - 1), i;
856 for (i = 0; i < max_qs; i++) {
857 if (bit_map & mask)
858 new_bit_map = new_bit_map + (1 << i);
859 mask = mask >> 0x1;
860 }
861 return new_bit_map;
862}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000863
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000864static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
865 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000866{
867 u32 rqfpr = FPR_FILER_MASK;
868 u32 rqfcr = 0x0;
869
870 rqfar--;
871 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000872 priv->ftp_rqfpr[rqfar] = rqfpr;
873 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000874 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
875
876 rqfar--;
877 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000878 priv->ftp_rqfpr[rqfar] = rqfpr;
879 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000880 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
881
882 rqfar--;
883 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
884 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000885 priv->ftp_rqfcr[rqfar] = rqfcr;
886 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000887 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
888
889 rqfar--;
890 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
891 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000892 priv->ftp_rqfcr[rqfar] = rqfcr;
893 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000894 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
895
896 return rqfar;
897}
898
899static void gfar_init_filer_table(struct gfar_private *priv)
900{
901 int i = 0x0;
902 u32 rqfar = MAX_FILER_IDX;
903 u32 rqfcr = 0x0;
904 u32 rqfpr = FPR_FILER_MASK;
905
906 /* Default rule */
907 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000908 priv->ftp_rqfcr[rqfar] = rqfcr;
909 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000910 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
911
912 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
916 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
917 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
918
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +0200919 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000920 priv->cur_filer_idx = rqfar;
921
922 /* Rest are masked rules */
923 rqfcr = RQFCR_CMP_NOMATCH;
924 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000925 priv->ftp_rqfcr[i] = rqfcr;
926 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000927 gfar_write_filer(priv, i, rqfcr, rqfpr);
928 }
929}
930
Anton Vorontsov7d350972010-06-30 06:39:12 +0000931static void gfar_detect_errata(struct gfar_private *priv)
932{
933 struct device *dev = &priv->ofdev->dev;
934 unsigned int pvr = mfspr(SPRN_PVR);
935 unsigned int svr = mfspr(SPRN_SVR);
936 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
937 unsigned int rev = svr & 0xffff;
938
939 /* MPC8313 Rev 2.0 and higher; All MPC837x */
940 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
941 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
942 priv->errata |= GFAR_ERRATA_74;
943
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000944 /* MPC8313 and MPC837x all rev */
945 if ((pvr == 0x80850010 && mod == 0x80b0) ||
946 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
947 priv->errata |= GFAR_ERRATA_76;
948
Anton Vorontsov511d9342010-06-30 06:39:15 +0000949 /* MPC8313 and MPC837x all rev */
950 if ((pvr == 0x80850010 && mod == 0x80b0) ||
951 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
952 priv->errata |= GFAR_ERRATA_A002;
953
Alex Dubov4363c2f2011-03-16 17:57:13 +0000954 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
955 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
956 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
957 priv->errata |= GFAR_ERRATA_12;
958
Anton Vorontsov7d350972010-06-30 06:39:12 +0000959 if (priv->errata)
960 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
961 priv->errata);
962}
963
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400964/* Set up the ethernet device structure, private data,
965 * and anything else we need before we start */
Grant Likely74888762011-02-22 21:05:51 -0700966static int gfar_probe(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
968 u32 tempval;
969 struct net_device *dev = NULL;
970 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000971 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000972 int err = 0, i, grp_idx = 0;
Dai Harukic50a5d92008-12-17 16:51:32 -0800973 int len_devname;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000974 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000975 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000976 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000978 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000980 if (err)
981 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700984 priv->ndev = dev;
985 priv->ofdev = ofdev;
Grant Likely61c7a082010-04-13 16:12:29 -0700986 priv->node = ofdev->dev.of_node;
Kumar Gala48268572009-03-18 23:28:22 -0700987 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Scott Woodd87eb122008-07-11 18:04:45 -0500989 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200990 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Andy Flemingb31a1d82008-12-16 15:29:15 -0800992 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000993 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Anton Vorontsov7d350972010-06-30 06:39:12 +0000995 gfar_detect_errata(priv);
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 /* Stop the DMA engine now, in case it was running before */
998 /* (The firmware could have used it, and left it running). */
Andy Fleming257d9382008-12-16 15:25:45 -0800999 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001002 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
Andy Flemingb98ac702009-02-04 16:38:05 -08001004 /* We need to delay at least 3 TX clocks */
1005 udelay(2);
1006
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001008 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001011 tempval = MACCFG2_INIT_SETTINGS;
1012 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1013 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1014 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001017 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001020 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
Andy Flemingb31a1d82008-12-16 15:29:15 -08001022 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +00001027 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001028 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001030 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001031 for (i = 0; i < priv->num_grps; i++)
1032 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001033
Andy Flemingb31a1d82008-12-16 15:29:15 -08001034 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Michał Mirosław8b3afe92011-04-15 04:50:50 +00001035 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1036 NETIF_F_RXCSUM;
1037 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1038 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
Kumar Gala0bbaf062005-06-20 10:54:21 -05001041 priv->vlgrp = NULL;
1042
Andy Fleming26ccfc32009-03-10 12:58:28 +00001043 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001044 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001045
Andy Flemingb31a1d82008-12-16 15:29:15 -08001046 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001047 priv->extended_hash = 1;
1048 priv->hash_width = 9;
1049
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001050 priv->hash_regs[0] = &regs->igaddr0;
1051 priv->hash_regs[1] = &regs->igaddr1;
1052 priv->hash_regs[2] = &regs->igaddr2;
1053 priv->hash_regs[3] = &regs->igaddr3;
1054 priv->hash_regs[4] = &regs->igaddr4;
1055 priv->hash_regs[5] = &regs->igaddr5;
1056 priv->hash_regs[6] = &regs->igaddr6;
1057 priv->hash_regs[7] = &regs->igaddr7;
1058 priv->hash_regs[8] = &regs->gaddr0;
1059 priv->hash_regs[9] = &regs->gaddr1;
1060 priv->hash_regs[10] = &regs->gaddr2;
1061 priv->hash_regs[11] = &regs->gaddr3;
1062 priv->hash_regs[12] = &regs->gaddr4;
1063 priv->hash_regs[13] = &regs->gaddr5;
1064 priv->hash_regs[14] = &regs->gaddr6;
1065 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001066
1067 } else {
1068 priv->extended_hash = 0;
1069 priv->hash_width = 8;
1070
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001071 priv->hash_regs[0] = &regs->gaddr0;
1072 priv->hash_regs[1] = &regs->gaddr1;
1073 priv->hash_regs[2] = &regs->gaddr2;
1074 priv->hash_regs[3] = &regs->gaddr3;
1075 priv->hash_regs[4] = &regs->gaddr4;
1076 priv->hash_regs[5] = &regs->gaddr5;
1077 priv->hash_regs[6] = &regs->gaddr6;
1078 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001079 }
1080
Andy Flemingb31a1d82008-12-16 15:29:15 -08001081 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001082 priv->padding = DEFAULT_PADDING;
1083 else
1084 priv->padding = 0;
1085
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001086 if (dev->features & NETIF_F_IP_CSUM ||
1087 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001088 dev->hard_header_len += GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001090 /* Program the isrg regs only if number of grps > 1 */
1091 if (priv->num_grps > 1) {
1092 baddr = &regs->isrg0;
1093 for (i = 0; i < priv->num_grps; i++) {
1094 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1095 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1096 gfar_write(baddr, isrg);
1097 baddr++;
1098 isrg = 0x0;
1099 }
1100 }
1101
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001102 /* Need to reverse the bit maps as bit_map's MSB is q0
Akinobu Mita984b3f52010-03-05 13:41:37 -08001103 * but, for_each_set_bit parses from right to left, which
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001104 * basically reverses the queue numbers */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001105 for (i = 0; i< priv->num_grps; i++) {
1106 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1107 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1108 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1109 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1110 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001111
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001112 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1113 * also assign queues to groups */
1114 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1115 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001116 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001117 priv->num_rx_queues) {
1118 priv->gfargrp[grp_idx].num_rx_queues++;
1119 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1120 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1121 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1122 }
1123 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001124 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001125 priv->num_tx_queues) {
1126 priv->gfargrp[grp_idx].num_tx_queues++;
1127 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1128 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1129 tqueue = tqueue | (TQUEUE_EN0 >> i);
1130 }
1131 priv->gfargrp[grp_idx].rstat = rstat;
1132 priv->gfargrp[grp_idx].tstat = tstat;
1133 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001134 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001135
1136 gfar_write(&regs->rqueue, rqueue);
1137 gfar_write(&regs->tqueue, tqueue);
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001141 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001142 for (i = 0; i < priv->num_tx_queues; i++) {
1143 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1144 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1145 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1146 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1147 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001148
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001149 for (i = 0; i < priv->num_rx_queues; i++) {
1150 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1151 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1152 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001155 /* always enable rx filer*/
1156 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001157 /* Enable most messages by default */
1158 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1159
Trent Piephod3eab822008-10-02 11:12:24 +00001160 /* Carrier starts down, phylib will bring it up */
1161 netif_carrier_off(dev);
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 err = register_netdev(dev);
1164
1165 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001166 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 goto register_fail;
1168 }
1169
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001170 device_init_wakeup(&dev->dev,
1171 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1172
Dai Harukic50a5d92008-12-17 16:51:32 -08001173 /* fill out IRQ number and name fields */
1174 len_devname = strlen(dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001175 for (i = 0; i < priv->num_grps; i++) {
1176 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1177 len_devname);
1178 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1179 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1180 "_g", sizeof("_g"));
1181 priv->gfargrp[i].int_name_tx[
1182 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1183 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1184 priv->gfargrp[i].int_name_tx)],
1185 "_tx", sizeof("_tx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001186
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001187 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1188 len_devname);
1189 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1190 "_g", sizeof("_g"));
1191 priv->gfargrp[i].int_name_rx[
1192 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1193 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1194 priv->gfargrp[i].int_name_rx)],
1195 "_rx", sizeof("_rx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001196
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001197 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1198 len_devname);
1199 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1200 "_g", sizeof("_g"));
1201 priv->gfargrp[i].int_name_er[strlen(
1202 priv->gfargrp[i].int_name_er)] = i+48;
1203 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1204 priv->gfargrp[i].int_name_er)],
1205 "_er", sizeof("_er") + 1);
1206 } else
1207 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1208 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001209
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001210 /* Initialize the filer table */
1211 gfar_init_filer_table(priv);
1212
Andy Fleming7f7f5312005-11-11 12:38:59 -06001213 /* Create all the sysfs files */
1214 gfar_init_sysfs(dev);
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001217 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001220 /* provided which set of benchmarks. */
Joe Perches59deab22011-06-14 08:57:47 +00001221 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001222 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001223 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1224 i, priv->rx_queue[i]->rx_ring_size);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001225 for(i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001226 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1227 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
1229 return 0;
1230
1231register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001232 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001233 free_tx_pointers(priv);
1234 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001235 if (priv->phy_node)
1236 of_node_put(priv->phy_node);
1237 if (priv->tbi_node)
1238 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001240 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}
1242
Grant Likely2dc11582010-08-06 09:25:50 -06001243static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001245 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
Grant Likelyfe192a42009-04-25 12:53:12 +00001247 if (priv->phy_node)
1248 of_node_put(priv->phy_node);
1249 if (priv->tbi_node)
1250 of_node_put(priv->tbi_node);
1251
Andy Flemingb31a1d82008-12-16 15:29:15 -08001252 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
David S. Millerd9d8e042009-09-06 01:41:02 -07001254 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001255 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001256 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 return 0;
1259}
1260
Scott Woodd87eb122008-07-11 18:04:45 -05001261#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001262
1263static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001264{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001265 struct gfar_private *priv = dev_get_drvdata(dev);
1266 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001267 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001268 unsigned long flags;
1269 u32 tempval;
1270
1271 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001272 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001273
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001274 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001275
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001276 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001277
1278 local_irq_save(flags);
1279 lock_tx_qs(priv);
1280 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001281
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001282 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001283
1284 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001285 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001286
1287 tempval &= ~MACCFG1_TX_EN;
1288
1289 if (!magic_packet)
1290 tempval &= ~MACCFG1_RX_EN;
1291
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001292 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001293
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001294 unlock_rx_qs(priv);
1295 unlock_tx_qs(priv);
1296 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001297
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001298 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001299
1300 if (magic_packet) {
1301 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001302 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001303
1304 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001305 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001306 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001307 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001308 } else {
1309 phy_stop(priv->phydev);
1310 }
1311 }
1312
1313 return 0;
1314}
1315
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001316static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001317{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001318 struct gfar_private *priv = dev_get_drvdata(dev);
1319 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001320 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001321 unsigned long flags;
1322 u32 tempval;
1323 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001324 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001325
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001326 if (!netif_running(ndev)) {
1327 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001328 return 0;
1329 }
1330
1331 if (!magic_packet && priv->phydev)
1332 phy_start(priv->phydev);
1333
1334 /* Disable Magic Packet mode, in case something
1335 * else woke us up.
1336 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001337 local_irq_save(flags);
1338 lock_tx_qs(priv);
1339 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001340
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001341 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001342 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001343 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001344
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001345 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001346
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001347 unlock_rx_qs(priv);
1348 unlock_tx_qs(priv);
1349 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001350
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001351 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001352
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001353 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001354
1355 return 0;
1356}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001357
1358static int gfar_restore(struct device *dev)
1359{
1360 struct gfar_private *priv = dev_get_drvdata(dev);
1361 struct net_device *ndev = priv->ndev;
1362
1363 if (!netif_running(ndev))
1364 return 0;
1365
1366 gfar_init_bds(ndev);
1367 init_registers(ndev);
1368 gfar_set_mac_address(ndev);
1369 gfar_init_mac(ndev);
1370 gfar_start(ndev);
1371
1372 priv->oldlink = 0;
1373 priv->oldspeed = 0;
1374 priv->oldduplex = -1;
1375
1376 if (priv->phydev)
1377 phy_start(priv->phydev);
1378
1379 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001380 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001381
1382 return 0;
1383}
1384
1385static struct dev_pm_ops gfar_pm_ops = {
1386 .suspend = gfar_suspend,
1387 .resume = gfar_resume,
1388 .freeze = gfar_suspend,
1389 .thaw = gfar_resume,
1390 .restore = gfar_restore,
1391};
1392
1393#define GFAR_PM_OPS (&gfar_pm_ops)
1394
Scott Woodd87eb122008-07-11 18:04:45 -05001395#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001396
1397#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001398
Scott Woodd87eb122008-07-11 18:04:45 -05001399#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001401/* Reads the controller's registers to determine what interface
1402 * connects it to the PHY.
1403 */
1404static phy_interface_t gfar_get_interface(struct net_device *dev)
1405{
1406 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001407 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001408 u32 ecntrl;
1409
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001410 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001411
1412 if (ecntrl & ECNTRL_SGMII_MODE)
1413 return PHY_INTERFACE_MODE_SGMII;
1414
1415 if (ecntrl & ECNTRL_TBI_MODE) {
1416 if (ecntrl & ECNTRL_REDUCED_MODE)
1417 return PHY_INTERFACE_MODE_RTBI;
1418 else
1419 return PHY_INTERFACE_MODE_TBI;
1420 }
1421
1422 if (ecntrl & ECNTRL_REDUCED_MODE) {
1423 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1424 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001425 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001426 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001427
1428 /*
1429 * This isn't autodetected right now, so it must
1430 * be set by the device tree or platform code.
1431 */
1432 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1433 return PHY_INTERFACE_MODE_RGMII_ID;
1434
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001435 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001436 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001437 }
1438
Andy Flemingb31a1d82008-12-16 15:29:15 -08001439 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001440 return PHY_INTERFACE_MODE_GMII;
1441
1442 return PHY_INTERFACE_MODE_MII;
1443}
1444
1445
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001446/* Initializes driver's PHY state, and attaches to the PHY.
1447 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 */
1449static int init_phy(struct net_device *dev)
1450{
1451 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001452 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001453 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001454 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001455 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
1457 priv->oldlink = 0;
1458 priv->oldspeed = 0;
1459 priv->oldduplex = -1;
1460
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001461 interface = gfar_get_interface(dev);
1462
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001463 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1464 interface);
1465 if (!priv->phydev)
1466 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1467 interface);
1468 if (!priv->phydev) {
1469 dev_err(&dev->dev, "could not attach to PHY\n");
1470 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Kapil Junejad3c12872007-05-11 18:25:11 -05001473 if (interface == PHY_INTERFACE_MODE_SGMII)
1474 gfar_configure_serdes(dev);
1475
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001476 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001477 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1478 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
1482
Paul Gortmakerd0313582008-04-17 00:08:10 -04001483/*
1484 * Initialize TBI PHY interface for communicating with the
1485 * SERDES lynx PHY on the chip. We communicate with this PHY
1486 * through the MDIO bus on each controller, treating it as a
1487 * "normal" PHY at the address found in the TBIPA register. We assume
1488 * that the TBIPA register is valid. Either the MDIO bus code will set
1489 * it to a value that doesn't conflict with other PHYs on the bus, or the
1490 * value doesn't matter, as there are no other PHYs on the bus.
1491 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001492static void gfar_configure_serdes(struct net_device *dev)
1493{
1494 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001495 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001496
Grant Likelyfe192a42009-04-25 12:53:12 +00001497 if (!priv->tbi_node) {
1498 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1499 "device tree specify a tbi-handle\n");
1500 return;
1501 }
1502
1503 tbiphy = of_phy_find_device(priv->tbi_node);
1504 if (!tbiphy) {
1505 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001506 return;
1507 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001508
Andy Flemingb31a1d82008-12-16 15:29:15 -08001509 /*
1510 * If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001511 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1512 * everything for us? Resetting it takes the link down and requires
1513 * several seconds for it to come back.
1514 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001515 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001516 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001517
Paul Gortmakerd0313582008-04-17 00:08:10 -04001518 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001519 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001520
Grant Likelyfe192a42009-04-25 12:53:12 +00001521 phy_write(tbiphy, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -05001522 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1523 ADVERTISE_1000XPSE_ASYM);
1524
Grant Likelyfe192a42009-04-25 12:53:12 +00001525 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -05001526 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1527}
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529static void init_registers(struct net_device *dev)
1530{
1531 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001532 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001533 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001535 for (i = 0; i < priv->num_grps; i++) {
1536 regs = priv->gfargrp[i].regs;
1537 /* Clear IEVENT */
1538 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001540 /* Initialize IMASK */
1541 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001544 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001546 gfar_write(&regs->igaddr0, 0);
1547 gfar_write(&regs->igaddr1, 0);
1548 gfar_write(&regs->igaddr2, 0);
1549 gfar_write(&regs->igaddr3, 0);
1550 gfar_write(&regs->igaddr4, 0);
1551 gfar_write(&regs->igaddr5, 0);
1552 gfar_write(&regs->igaddr6, 0);
1553 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001555 gfar_write(&regs->gaddr0, 0);
1556 gfar_write(&regs->gaddr1, 0);
1557 gfar_write(&regs->gaddr2, 0);
1558 gfar_write(&regs->gaddr3, 0);
1559 gfar_write(&regs->gaddr4, 0);
1560 gfar_write(&regs->gaddr5, 0);
1561 gfar_write(&regs->gaddr6, 0);
1562 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001565 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001566 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
1568 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001569 gfar_write(&regs->rmon.cam1, 0xffffffff);
1570 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 }
1572
1573 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001574 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001577 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578}
1579
Anton Vorontsov511d9342010-06-30 06:39:15 +00001580static int __gfar_is_rx_idle(struct gfar_private *priv)
1581{
1582 u32 res;
1583
1584 /*
1585 * Normaly TSEC should not hang on GRS commands, so we should
1586 * actually wait for IEVENT_GRSC flag.
1587 */
1588 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1589 return 0;
1590
1591 /*
1592 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1593 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1594 * and the Rx can be safely reset.
1595 */
1596 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1597 res &= 0x7f807f80;
1598 if ((res & 0xffff) == (res >> 16))
1599 return 1;
1600
1601 return 0;
1602}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001603
1604/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001605static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606{
1607 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001608 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001610 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001612 for (i = 0; i < priv->num_grps; i++) {
1613 regs = priv->gfargrp[i].regs;
1614 /* Mask all interrupts */
1615 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001617 /* Clear all interrupts */
1618 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001621 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001623 tempval = gfar_read(&regs->dmactrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1625 != (DMACTRL_GRS | DMACTRL_GTS)) {
Anton Vorontsov511d9342010-06-30 06:39:15 +00001626 int ret;
1627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001629 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Anton Vorontsov511d9342010-06-30 06:39:15 +00001631 do {
1632 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1633 (IEVENT_GRSC | IEVENT_GTSC)) ==
1634 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1635 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1636 ret = __gfar_is_rx_idle(priv);
1637 } while (!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 }
Scott Woodd87eb122008-07-11 18:04:45 -05001639}
Scott Woodd87eb122008-07-11 18:04:45 -05001640
1641/* Halt the receive and transmit queues */
1642void gfar_halt(struct net_device *dev)
1643{
1644 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001645 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001646 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
Scott Wood2a54adc2008-08-12 15:10:46 -05001648 gfar_halt_nodisable(dev);
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 /* Disable Rx and Tx */
1651 tempval = gfar_read(&regs->maccfg1);
1652 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1653 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001654}
1655
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001656static void free_grp_irqs(struct gfar_priv_grp *grp)
1657{
1658 free_irq(grp->interruptError, grp);
1659 free_irq(grp->interruptTransmit, grp);
1660 free_irq(grp->interruptReceive, grp);
1661}
1662
Kumar Gala0bbaf062005-06-20 10:54:21 -05001663void stop_gfar(struct net_device *dev)
1664{
1665 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001666 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001667 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001668
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001669 phy_stop(priv->phydev);
1670
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001671
Kumar Gala0bbaf062005-06-20 10:54:21 -05001672 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001673 local_irq_save(flags);
1674 lock_tx_qs(priv);
1675 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001676
Kumar Gala0bbaf062005-06-20 10:54:21 -05001677 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001679 unlock_rx_qs(priv);
1680 unlock_tx_qs(priv);
1681 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
1683 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001684 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001685 for (i = 0; i < priv->num_grps; i++)
1686 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001688 for (i = 0; i < priv->num_grps; i++)
1689 free_irq(priv->gfargrp[i].interruptTransmit,
1690 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 }
1692
1693 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694}
1695
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001696static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001699 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001700 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001702 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001704 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1705 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001706 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Kumar Gala48268572009-03-18 23:28:22 -07001708 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001709 txbdp->length, DMA_TO_DEVICE);
1710 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001711 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1712 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001713 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001714 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001715 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001717 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001718 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1719 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001721 kfree(tx_queue->tx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001722}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001724static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1725{
1726 struct rxbd8 *rxbdp;
1727 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1728 int i;
1729
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001730 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001732 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1733 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001734 dma_unmap_single(&priv->ofdev->dev,
1735 rxbdp->bufPtr, priv->rx_buffer_size,
Anton Vorontsove69edd22009-10-12 06:00:30 +00001736 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001737 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1738 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001740 rxbdp->lstatus = 0;
1741 rxbdp->bufPtr = 0;
1742 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001744 kfree(rx_queue->rx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001745}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001746
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001747/* If there are any tx skbs or rx skbs still around, free them.
1748 * Then free tx_skbuff and rx_skbuff */
1749static void free_skb_resources(struct gfar_private *priv)
1750{
1751 struct gfar_priv_tx_q *tx_queue = NULL;
1752 struct gfar_priv_rx_q *rx_queue = NULL;
1753 int i;
1754
1755 /* Go through all the buffer descriptors and free their data buffers */
1756 for (i = 0; i < priv->num_tx_queues; i++) {
1757 tx_queue = priv->tx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001758 if(tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001759 free_skb_tx_queue(tx_queue);
1760 }
1761
1762 for (i = 0; i < priv->num_rx_queues; i++) {
1763 rx_queue = priv->rx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001764 if(rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001765 free_skb_rx_queue(rx_queue);
1766 }
1767
1768 dma_free_coherent(&priv->ofdev->dev,
1769 sizeof(struct txbd8) * priv->total_tx_ring_size +
1770 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1771 priv->tx_queue[0]->tx_bd_base,
1772 priv->tx_queue[0]->tx_bd_dma_base);
Sebastian Andrzej Siewior7df9c432010-05-04 22:30:47 +00001773 skb_queue_purge(&priv->rx_recycle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774}
1775
Kumar Gala0bbaf062005-06-20 10:54:21 -05001776void gfar_start(struct net_device *dev)
1777{
1778 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001779 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001780 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001781 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001782
1783 /* Enable Rx and Tx in MACCFG1 */
1784 tempval = gfar_read(&regs->maccfg1);
1785 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1786 gfar_write(&regs->maccfg1, tempval);
1787
1788 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001789 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001790 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001791 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001792
Kumar Gala0bbaf062005-06-20 10:54:21 -05001793 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001794 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001795 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001796 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001797
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001798 for (i = 0; i < priv->num_grps; i++) {
1799 regs = priv->gfargrp[i].regs;
1800 /* Clear THLT/RHLT, so that the DMA starts polling now */
1801 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1802 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1803 /* Unmask the interrupts we look for */
1804 gfar_write(&regs->imask, IMASK_DEFAULT);
1805 }
Dai Haruki12dea572008-12-16 15:30:20 -08001806
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001807 dev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001808}
1809
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001810void gfar_configure_coalescing(struct gfar_private *priv,
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001811 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001813 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001814 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001815 int i = 0;
1816
1817 /* Backward compatible case ---- even if we enable
1818 * multiple queues, there's only single reg to program
1819 */
1820 gfar_write(&regs->txic, 0);
1821 if(likely(priv->tx_queue[0]->txcoalescing))
1822 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1823
1824 gfar_write(&regs->rxic, 0);
1825 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1826 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1827
1828 if (priv->mode == MQ_MG_MODE) {
1829 baddr = &regs->txic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001830 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001831 if (likely(priv->tx_queue[i]->txcoalescing)) {
1832 gfar_write(baddr + i, 0);
1833 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1834 }
1835 }
1836
1837 baddr = &regs->rxic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001838 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001839 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1840 gfar_write(baddr + i, 0);
1841 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1842 }
1843 }
1844 }
1845}
1846
1847static int register_grp_irqs(struct gfar_priv_grp *grp)
1848{
1849 struct gfar_private *priv = grp->priv;
1850 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001851 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 /* If the device has multiple interrupts, register for
1854 * them. Otherwise, only register for the one */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001855 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001856 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 * Transmit, and Receive */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001858 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1859 grp->int_name_er,grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001860 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1861 grp->interruptError);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001862
Julia Lawall2145f1a2010-08-05 10:26:20 +00001863 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 }
1865
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001866 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1867 0, grp->int_name_tx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001868 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1869 grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 goto tx_irq_fail;
1871 }
1872
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001873 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1874 grp->int_name_rx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001875 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1876 grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 goto rx_irq_fail;
1878 }
1879 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001880 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1881 grp->int_name_tx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001882 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1883 grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 goto err_irq_fail;
1885 }
1886 }
1887
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001888 return 0;
1889
1890rx_irq_fail:
1891 free_irq(grp->interruptTransmit, grp);
1892tx_irq_fail:
1893 free_irq(grp->interruptError, grp);
1894err_irq_fail:
1895 return err;
1896
1897}
1898
1899/* Bring the controller up and running */
1900int startup_gfar(struct net_device *ndev)
1901{
1902 struct gfar_private *priv = netdev_priv(ndev);
1903 struct gfar __iomem *regs = NULL;
1904 int err, i, j;
1905
1906 for (i = 0; i < priv->num_grps; i++) {
1907 regs= priv->gfargrp[i].regs;
1908 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1909 }
1910
1911 regs= priv->gfargrp[0].regs;
1912 err = gfar_alloc_skb_resources(ndev);
1913 if (err)
1914 return err;
1915
1916 gfar_init_mac(ndev);
1917
1918 for (i = 0; i < priv->num_grps; i++) {
1919 err = register_grp_irqs(&priv->gfargrp[i]);
1920 if (err) {
1921 for (j = 0; j < i; j++)
1922 free_grp_irqs(&priv->gfargrp[j]);
Anton Vorontsovff760152011-01-18 02:36:02 +00001923 goto irq_fail;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001924 }
1925 }
1926
Andy Fleming7f7f5312005-11-11 12:38:59 -06001927 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001928 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001930 phy_start(priv->phydev);
1931
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001932 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 return 0;
1935
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001936irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001937 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 return err;
1939}
1940
1941/* Called when something needs to use the ethernet device */
1942/* Returns 0 for success. */
1943static int gfar_enet_open(struct net_device *dev)
1944{
Li Yang94e8cc32007-10-12 21:53:51 +08001945 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 int err;
1947
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001948 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001949
Andy Fleming0fd56bb2009-02-04 16:43:16 -08001950 skb_queue_head_init(&priv->rx_recycle);
1951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 /* Initialize a bunch of registers */
1953 init_registers(dev);
1954
1955 gfar_set_mac_address(dev);
1956
1957 err = init_phy(dev);
1958
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001959 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001960 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001965 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001966 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001967 return err;
1968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001970 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001972 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 return err;
1975}
1976
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001977static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001978{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001979 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001980
1981 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001982
Kumar Gala0bbaf062005-06-20 10:54:21 -05001983 return fcb;
1984}
1985
1986static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1987{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001988 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001989
1990 /* If we're here, it's a IP packet with a TCP or UDP
1991 * payload. We set it to checksum, using a pseudo-header
1992 * we provide
1993 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001994 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001995
Andy Fleming7f7f5312005-11-11 12:38:59 -06001996 /* Tell the controller what the protocol is */
1997 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001998 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001999 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03002000 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002001 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05002002 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002003
2004 /* l3os is the distance between the start of the
2005 * frame (skb->data) and the start of the IP hdr.
2006 * l4os is the distance between the start of the
2007 * l3 hdr and the l4 hdr */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002008 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002009 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002010
Andy Fleming7f7f5312005-11-11 12:38:59 -06002011 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002012}
2013
Andy Fleming7f7f5312005-11-11 12:38:59 -06002014void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002015{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002016 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002017 fcb->vlctl = vlan_tx_tag_get(skb);
2018}
2019
Dai Haruki4669bc92008-12-17 16:51:04 -08002020static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2021 struct txbd8 *base, int ring_size)
2022{
2023 struct txbd8 *new_bd = bdp + stride;
2024
2025 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2026}
2027
2028static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2029 int ring_size)
2030{
2031 return skip_txbd(bdp, 1, base, ring_size);
2032}
2033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034/* This is called by the kernel when a frame is ready for transmission. */
2035/* It is pointed to by the dev->hard_start_xmit function pointer */
2036static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2037{
2038 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002039 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002040 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002041 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002042 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002043 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002044 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002045 int i, rq = 0, do_tstamp = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002046 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05002047 unsigned long flags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002048 unsigned int nr_frags, nr_txbds, length;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002049
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00002050 /*
2051 * TOE=1 frames larger than 2500 bytes may see excess delays
2052 * before start of transmission.
2053 */
2054 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2055 skb->ip_summed == CHECKSUM_PARTIAL &&
2056 skb->len > 2500)) {
2057 int ret;
2058
2059 ret = skb_checksum_help(skb);
2060 if (ret)
2061 return ret;
2062 }
2063
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002064 rq = skb->queue_mapping;
2065 tx_queue = priv->tx_queue[rq];
2066 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002067 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002068 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002069
2070 /* check if time stamp should be generated */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002071 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2072 priv->hwts_tx_en))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002073 do_tstamp = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -08002074
Li Yang5b28bea2009-03-27 15:54:30 -07002075 /* make space for additional header when fcb is needed */
2076 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
Jesse Grosseab6d182010-10-20 13:56:03 +00002077 vlan_tx_tag_present(skb) ||
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002078 unlikely(do_tstamp)) &&
Li Yang5b28bea2009-03-27 15:54:30 -07002079 (skb_headroom(skb) < GMAC_FCB_LEN)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002080 struct sk_buff *skb_new;
2081
2082 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
2083 if (!skb_new) {
2084 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07002085 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002086 return NETDEV_TX_OK;
2087 }
2088 kfree_skb(skb);
2089 skb = skb_new;
2090 }
2091
Dai Haruki4669bc92008-12-17 16:51:04 -08002092 /* total number of fragments in the SKB */
2093 nr_frags = skb_shinfo(skb)->nr_frags;
2094
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002095 /* calculate the required number of TxBDs for this skb */
2096 if (unlikely(do_tstamp))
2097 nr_txbds = nr_frags + 2;
2098 else
2099 nr_txbds = nr_frags + 1;
2100
Dai Haruki4669bc92008-12-17 16:51:04 -08002101 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002102 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002103 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002104 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002105 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002106 return NETDEV_TX_BUSY;
2107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109 /* Update transmit stats */
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002110 tx_queue->stats.tx_bytes += skb->len;
2111 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002113 txbdp = txbdp_start = tx_queue->cur_tx;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002114 lstatus = txbdp->lstatus;
2115
2116 /* Time stamp insertion requires one additional TxBD */
2117 if (unlikely(do_tstamp))
2118 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2119 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
Dai Haruki4669bc92008-12-17 16:51:04 -08002121 if (nr_frags == 0) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002122 if (unlikely(do_tstamp))
2123 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2124 TXBD_INTERRUPT);
2125 else
2126 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Dai Haruki4669bc92008-12-17 16:51:04 -08002127 } else {
2128 /* Place the fragment addresses and lengths into the TxBDs */
2129 for (i = 0; i < nr_frags; i++) {
2130 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002131 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Dai Haruki4669bc92008-12-17 16:51:04 -08002133 length = skb_shinfo(skb)->frags[i].size;
2134
2135 lstatus = txbdp->lstatus | length |
2136 BD_LFLAG(TXBD_READY);
2137
2138 /* Handle the last BD specially */
2139 if (i == nr_frags - 1)
2140 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2141
Kumar Gala48268572009-03-18 23:28:22 -07002142 bufaddr = dma_map_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002143 skb_shinfo(skb)->frags[i].page,
2144 skb_shinfo(skb)->frags[i].page_offset,
2145 length,
2146 DMA_TO_DEVICE);
2147
2148 /* set the TxBD length and buffer pointer */
2149 txbdp->bufPtr = bufaddr;
2150 txbdp->lstatus = lstatus;
2151 }
2152
2153 lstatus = txbdp_start->lstatus;
2154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
Kumar Gala0bbaf062005-06-20 10:54:21 -05002156 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08002157 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002158 fcb = gfar_add_fcb(skb);
Alex Dubov4363c2f2011-03-16 17:57:13 +00002159 /* as specified by errata */
2160 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2161 && ((unsigned long)fcb % 0x20) > 0x18)) {
2162 __skb_pull(skb, GMAC_FCB_LEN);
2163 skb_checksum_help(skb);
2164 } else {
2165 lstatus |= BD_LFLAG(TXBD_TOE);
2166 gfar_tx_checksum(skb, fcb);
2167 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002168 }
2169
Jesse Grosseab6d182010-10-20 13:56:03 +00002170 if (vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002171 if (unlikely(NULL == fcb)) {
2172 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08002173 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002174 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002175
2176 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002177 }
2178
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002179 /* Setup tx hardware time stamping if requested */
2180 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002181 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002182 if (fcb == NULL)
2183 fcb = gfar_add_fcb(skb);
2184 fcb->ptp = 1;
2185 lstatus |= BD_LFLAG(TXBD_TOE);
2186 }
2187
Kumar Gala48268572009-03-18 23:28:22 -07002188 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Dai Haruki4669bc92008-12-17 16:51:04 -08002189 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002191 /*
2192 * If time stamping is requested one additional TxBD must be set up. The
2193 * first TxBD points to the FCB and must have a data length of
2194 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2195 * the full frame length.
2196 */
2197 if (unlikely(do_tstamp)) {
2198 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2199 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2200 (skb_headlen(skb) - GMAC_FCB_LEN);
2201 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2202 } else {
2203 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Dai Haruki4669bc92008-12-17 16:51:04 -08002206 /*
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002207 * We can work in parallel with gfar_clean_tx_ring(), except
2208 * when modifying num_txbdfree. Note that we didn't grab the lock
2209 * when we were reading the num_txbdfree and checking for available
2210 * space, that's because outside of this function it can only grow,
2211 * and once we've got needed space, it cannot suddenly disappear.
2212 *
2213 * The lock also protects us from gfar_error(), which can modify
2214 * regs->tstat and thus retrigger the transfers, which is why we
2215 * also must grab the lock before setting ready bit for the first
2216 * to be transmitted BD.
2217 */
2218 spin_lock_irqsave(&tx_queue->txlock, flags);
2219
2220 /*
Dai Haruki4669bc92008-12-17 16:51:04 -08002221 * The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002222 * semantics (it requires synchronization between cacheable and
2223 * uncacheable mappings, which eieio doesn't provide and which we
2224 * don't need), thus requiring a more expensive sync instruction. At
2225 * some point, the set of architecture-independent barrier functions
2226 * should be expanded to include weaker barriers.
2227 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002228 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002229
Dai Haruki4669bc92008-12-17 16:51:04 -08002230 txbdp_start->lstatus = lstatus;
2231
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002232 eieio(); /* force lstatus write before tx_skbuff */
2233
2234 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2235
Dai Haruki4669bc92008-12-17 16:51:04 -08002236 /* Update the current skb pointer to the next entry we will use
2237 * (wrapping if necessary) */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002238 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2239 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002240
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002241 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002242
2243 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002244 tx_queue->num_txbdfree -= (nr_txbds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
2246 /* If the next BD still needs to be cleaned up, then the bds
2247 are full. We need to tell the kernel to stop sending us stuff. */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002248 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002249 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002251 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 }
2253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002255 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
2257 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002258 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002260 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261}
2262
2263/* Stops the kernel queue, and halts the controller */
2264static int gfar_close(struct net_device *dev)
2265{
2266 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002267
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002268 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002269
Sebastian Siewiorab939902008-08-19 21:12:45 +02002270 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 stop_gfar(dev);
2272
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002273 /* Disconnect from the PHY */
2274 phy_disconnect(priv->phydev);
2275 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002277 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 return 0;
2280}
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002283static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002285 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
2287 return 0;
2288}
2289
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002290/* Check if rx parser should be activated */
2291void gfar_check_rx_parser_mode(struct gfar_private *priv)
2292{
2293 struct gfar __iomem *regs;
2294 u32 tempval;
2295
2296 regs = priv->gfargrp[0].regs;
2297
2298 tempval = gfar_read(&regs->rctrl);
2299 /* If parse is no longer required, then disable parser */
2300 if (tempval & RCTRL_REQ_PARSER)
2301 tempval |= RCTRL_PRSDEP_INIT;
2302 else
2303 tempval &= ~RCTRL_PRSDEP_INIT;
2304 gfar_write(&regs->rctrl, tempval);
2305}
2306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Kumar Gala0bbaf062005-06-20 10:54:21 -05002308/* Enables and disables VLAN insertion/extraction */
2309static void gfar_vlan_rx_register(struct net_device *dev,
2310 struct vlan_group *grp)
2311{
2312 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002313 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002314 unsigned long flags;
2315 u32 tempval;
2316
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002317 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002318 local_irq_save(flags);
2319 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002320
Anton Vorontsovcd1f55a2009-01-26 14:33:23 -08002321 priv->vlgrp = grp;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002322
2323 if (grp) {
2324 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002325 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002326 tempval |= TCTRL_VLINS;
2327
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002328 gfar_write(&regs->tctrl, tempval);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002329
Kumar Gala0bbaf062005-06-20 10:54:21 -05002330 /* Enable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002331 tempval = gfar_read(&regs->rctrl);
Dai Haruki77ecaf22008-12-16 15:30:48 -08002332 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002333 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002334 } else {
2335 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002336 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002337 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002338 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002339
2340 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002341 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002342 tempval &= ~RCTRL_VLEX;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002343 gfar_write(&regs->rctrl, tempval);
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002344
2345 gfar_check_rx_parser_mode(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002346 }
2347
Dai Haruki77ecaf22008-12-16 15:30:48 -08002348 gfar_change_mtu(dev, dev->mtu);
2349
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002350 unlock_rx_qs(priv);
2351 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002352}
2353
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2355{
2356 int tempsize, tempval;
2357 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002358 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002360 int frame_size = new_mtu + ETH_HLEN;
2361
Dai Haruki77ecaf22008-12-16 15:30:48 -08002362 if (priv->vlgrp)
Dai Harukifaa89572008-03-24 10:53:26 -05002363 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Joe Perches59deab22011-06-14 08:57:47 +00002366 netif_err(priv, drv, dev, "Invalid MTU setting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 return -EINVAL;
2368 }
2369
Dai Haruki77ecaf22008-12-16 15:30:48 -08002370 if (gfar_uses_fcb(priv))
2371 frame_size += GMAC_FCB_LEN;
2372
2373 frame_size += priv->padding;
2374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 tempsize =
2376 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2377 INCREMENTAL_BUFFER_SIZE;
2378
2379 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06002380 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2382 stop_gfar(dev);
2383
2384 priv->rx_buffer_size = tempsize;
2385
2386 dev->mtu = new_mtu;
2387
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002388 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2389 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
2391 /* If the mtu is larger than the max size for standard
2392 * ethernet frames (ie, a jumbo frame), then set maccfg2
2393 * to allow huge frames, and to check the length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002394 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
Anton Vorontsov7d350972010-06-30 06:39:12 +00002396 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2397 gfar_has_errata(priv, GFAR_ERRATA_74))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2399 else
2400 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2401
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002402 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
2404 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2405 startup_gfar(dev);
2406
2407 return 0;
2408}
2409
Sebastian Siewiorab939902008-08-19 21:12:45 +02002410/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 * transmitted after a set amount of time.
2412 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002413 * starting over will fix the problem.
2414 */
2415static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002417 struct gfar_private *priv = container_of(work, struct gfar_private,
2418 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002419 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002422 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 stop_gfar(dev);
2424 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002425 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 }
2427
David S. Miller263ba322008-07-15 03:47:41 -07002428 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429}
2430
Sebastian Siewiorab939902008-08-19 21:12:45 +02002431static void gfar_timeout(struct net_device *dev)
2432{
2433 struct gfar_private *priv = netdev_priv(dev);
2434
2435 dev->stats.tx_errors++;
2436 schedule_work(&priv->reset_task);
2437}
2438
Eran Libertyacbc0f02010-07-07 15:54:54 -07002439static void gfar_align_skb(struct sk_buff *skb)
2440{
2441 /* We need the data buffer to be aligned properly. We will reserve
2442 * as many bytes as needed to align the data properly
2443 */
2444 skb_reserve(skb, RXBUF_ALIGNMENT -
2445 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2446}
2447
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002449static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002451 struct net_device *dev = tx_queue->dev;
Dai Harukid080cd62008-04-09 19:37:51 -05002452 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002453 struct gfar_priv_rx_q *rx_queue = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002454 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002455 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002456 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002457 struct sk_buff *skb;
2458 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002459 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002460 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002461 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002462 int howmany = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002463 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002464 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002466 rx_queue = priv->rx_queue[tx_queue->qindex];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002467 bdp = tx_queue->dirty_tx;
2468 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002469
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002470 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002471 unsigned long flags;
2472
Dai Haruki4669bc92008-12-17 16:51:04 -08002473 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002474
2475 /*
2476 * When time stamping, one additional TxBD must be freed.
2477 * Also, we need to dma_unmap_single() the TxPAL.
2478 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002479 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002480 nr_txbds = frags + 2;
2481 else
2482 nr_txbds = frags + 1;
2483
2484 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002485
2486 lstatus = lbdp->lstatus;
2487
2488 /* Only clean completed frames */
2489 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2490 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 break;
2492
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002493 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002494 next = next_txbd(bdp, base, tx_ring_size);
2495 buflen = next->length + GMAC_FCB_LEN;
2496 } else
2497 buflen = bdp->length;
2498
2499 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2500 buflen, DMA_TO_DEVICE);
2501
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002502 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002503 struct skb_shared_hwtstamps shhwtstamps;
2504 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2505 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2506 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2507 skb_tstamp_tx(skb, &shhwtstamps);
2508 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2509 bdp = next;
2510 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002511
2512 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2513 bdp = next_txbd(bdp, base, tx_ring_size);
2514
2515 for (i = 0; i < frags; i++) {
Kumar Gala48268572009-03-18 23:28:22 -07002516 dma_unmap_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002517 bdp->bufPtr,
2518 bdp->length,
2519 DMA_TO_DEVICE);
2520 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2521 bdp = next_txbd(bdp, base, tx_ring_size);
2522 }
2523
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002524 /*
2525 * If there's room in the queue (limit it to rx_buffer_size)
2526 * we add this skb back into the pool, if it's the right size
2527 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002528 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002529 skb_recycle_check(skb, priv->rx_buffer_size +
Eran Libertyacbc0f02010-07-07 15:54:54 -07002530 RXBUF_ALIGNMENT)) {
2531 gfar_align_skb(skb);
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002532 skb_queue_head(&priv->rx_recycle, skb);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002533 } else
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002534 dev_kfree_skb_any(skb);
2535
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002536 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002537
2538 skb_dirtytx = (skb_dirtytx + 1) &
2539 TX_RING_MOD_MASK(tx_ring_size);
2540
Dai Harukid080cd62008-04-09 19:37:51 -05002541 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002542 spin_lock_irqsave(&tx_queue->txlock, flags);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002543 tx_queue->num_txbdfree += nr_txbds;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002544 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Dai Haruki4669bc92008-12-17 16:51:04 -08002547 /* If we freed a buffer, we can restart transmission, if necessary */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002548 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2549 netif_wake_subqueue(dev, tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Dai Haruki4669bc92008-12-17 16:51:04 -08002551 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002552 tx_queue->skb_dirtytx = skb_dirtytx;
2553 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554
Dai Harukid080cd62008-04-09 19:37:51 -05002555 return howmany;
2556}
2557
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002558static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002559{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002560 unsigned long flags;
2561
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002562 spin_lock_irqsave(&gfargrp->grplock, flags);
2563 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002564 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002565 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002566 } else {
2567 /*
2568 * Clear IEVENT, so interrupts aren't called again
2569 * because of the packets that have already arrived.
2570 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002571 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002572 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002573 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002574
Dai Haruki8c7396a2008-12-17 16:52:00 -08002575}
2576
Dai Harukid080cd62008-04-09 19:37:51 -05002577/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002578static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002579{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002580 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 return IRQ_HANDLED;
2582}
2583
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002584static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -05002585 struct sk_buff *skb)
2586{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002587 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002588 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002589 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002590
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002591 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2592 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002593 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002594}
2595
Eran Libertyacbc0f02010-07-07 15:54:54 -07002596static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2597{
2598 struct gfar_private *priv = netdev_priv(dev);
2599 struct sk_buff *skb = NULL;
2600
2601 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2602 if (!skb)
2603 return NULL;
2604
2605 gfar_align_skb(skb);
2606
2607 return skb;
2608}
Andy Fleming815b97c2008-04-22 17:18:29 -05002609
2610struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611{
2612 struct gfar_private *priv = netdev_priv(dev);
2613 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002615 skb = skb_dequeue(&priv->rx_recycle);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002616 if (!skb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002617 skb = gfar_alloc_skb(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 return skb;
2620}
2621
Li Yang298e1a92007-10-16 14:18:13 +08002622static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623{
Li Yang298e1a92007-10-16 14:18:13 +08002624 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002625 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 struct gfar_extra_stats *estats = &priv->extra_stats;
2627
2628 /* If the packet was truncated, none of the other errors
2629 * matter */
2630 if (status & RXBD_TRUNCATED) {
2631 stats->rx_length_errors++;
2632
2633 estats->rx_trunc++;
2634
2635 return;
2636 }
2637 /* Count the errors, if there were any */
2638 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2639 stats->rx_length_errors++;
2640
2641 if (status & RXBD_LARGE)
2642 estats->rx_large++;
2643 else
2644 estats->rx_short++;
2645 }
2646 if (status & RXBD_NONOCTET) {
2647 stats->rx_frame_errors++;
2648 estats->rx_nonoctet++;
2649 }
2650 if (status & RXBD_CRCERR) {
2651 estats->rx_crcerr++;
2652 stats->rx_crc_errors++;
2653 }
2654 if (status & RXBD_OVERRUN) {
2655 estats->rx_overrun++;
2656 stats->rx_crc_errors++;
2657 }
2658}
2659
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002660irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002662 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 return IRQ_HANDLED;
2664}
2665
Kumar Gala0bbaf062005-06-20 10:54:21 -05002666static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2667{
2668 /* If valid headers were found, and valid sums
2669 * were verified, then we tell the kernel that no
2670 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002671 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002672 skb->ip_summed = CHECKSUM_UNNECESSARY;
2673 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002674 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002675}
2676
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678/* gfar_process_frame() -- handle one incoming packet if skb
2679 * isn't NULL. */
2680static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Dai Haruki2c2db482008-12-16 15:31:15 -08002681 int amount_pull)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682{
2683 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002684 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685
Dai Haruki2c2db482008-12-16 15:31:15 -08002686 int ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002687
Dai Haruki2c2db482008-12-16 15:31:15 -08002688 /* fcb is at the beginning if exists */
2689 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
Dai Haruki2c2db482008-12-16 15:31:15 -08002691 /* Remove the FCB from the skb */
2692 /* Remove the padded bytes, if there are any */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002693 if (amount_pull) {
2694 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002695 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002696 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002697
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002698 /* Get receive timestamp from the skb */
2699 if (priv->hwts_rx_en) {
2700 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2701 u64 *ns = (u64 *) skb->data;
2702 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2703 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2704 }
2705
2706 if (priv->padding)
2707 skb_pull(skb, priv->padding);
2708
Michał Mirosław8b3afe92011-04-15 04:50:50 +00002709 if (dev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08002710 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002711
Dai Haruki2c2db482008-12-16 15:31:15 -08002712 /* Tell the skb what kind of packet this is */
2713 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002714
Dai Haruki2c2db482008-12-16 15:31:15 -08002715 /* Send the packet up the stack */
2716 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2717 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2718 else
2719 ret = netif_receive_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720
Dai Haruki2c2db482008-12-16 15:31:15 -08002721 if (NET_RX_DROP == ret)
2722 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723
2724 return 0;
2725}
2726
2727/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05002728 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 * of frames handled
2730 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002731int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002733 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002734 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002736 int pkt_len;
2737 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 int howmany = 0;
2739 struct gfar_private *priv = netdev_priv(dev);
2740
2741 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002742 bdp = rx_queue->cur_rx;
2743 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002745 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
Dai Haruki2c2db482008-12-16 15:31:15 -08002746
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002748 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05002749 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002750
2751 /* Add another skb for the future */
2752 newskb = gfar_new_skb(dev);
2753
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002754 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Kumar Gala48268572009-03-18 23:28:22 -07002756 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Andy Fleming81183052008-11-12 10:07:11 -06002757 priv->rx_buffer_size, DMA_FROM_DEVICE);
2758
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002759 if (unlikely(!(bdp->status & RXBD_ERR) &&
2760 bdp->length > priv->rx_buffer_size))
2761 bdp->status = RXBD_LARGE;
2762
Andy Fleming815b97c2008-04-22 17:18:29 -05002763 /* We drop the frame if we failed to allocate a new buffer */
2764 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2765 bdp->status & RXBD_ERR)) {
2766 count_errors(bdp->status, dev);
2767
2768 if (unlikely(!newskb))
2769 newskb = skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002770 else if (skb)
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002771 skb_queue_head(&priv->rx_recycle, skb);
Andy Fleming815b97c2008-04-22 17:18:29 -05002772 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002774 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 howmany++;
2776
Dai Haruki2c2db482008-12-16 15:31:15 -08002777 if (likely(skb)) {
2778 pkt_len = bdp->length - ETH_FCS_LEN;
2779 /* Remove the FCS from the packet length */
2780 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002781 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002782 skb_record_rx_queue(skb, rx_queue->qindex);
Dai Haruki2c2db482008-12-16 15:31:15 -08002783 gfar_process_frame(dev, skb, amount_pull);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784
Dai Haruki2c2db482008-12-16 15:31:15 -08002785 } else {
Joe Perches59deab22011-06-14 08:57:47 +00002786 netif_warn(priv, rx_err, dev, "Missing skb!\n");
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002787 rx_queue->stats.rx_dropped++;
Dai Haruki2c2db482008-12-16 15:31:15 -08002788 priv->extra_stats.rx_skbmissing++;
2789 }
2790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 }
2792
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002793 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
Andy Fleming815b97c2008-04-22 17:18:29 -05002795 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002796 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
2798 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002799 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800
2801 /* update to point at the next skb */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002802 rx_queue->skb_currx =
2803 (rx_queue->skb_currx + 1) &
2804 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 }
2806
2807 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002808 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 return howmany;
2811}
2812
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002813static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002815 struct gfar_priv_grp *gfargrp = container_of(napi,
2816 struct gfar_priv_grp, napi);
2817 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002818 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002819 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002820 struct gfar_priv_rx_q *rx_queue = NULL;
2821 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002822 int tx_cleaned = 0, i, left_over_budget = budget;
2823 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002824 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002825
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002826 num_queues = gfargrp->num_rx_queues;
2827 budget_per_queue = budget/num_queues;
2828
Dai Haruki8c7396a2008-12-17 16:52:00 -08002829 /* Clear IEVENT, so interrupts aren't called again
2830 * because of the packets that have already arrived */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002831 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002832
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002833 while (num_queues && left_over_budget) {
2834
2835 budget_per_queue = left_over_budget/num_queues;
2836 left_over_budget = 0;
2837
Akinobu Mita984b3f52010-03-05 13:41:37 -08002838 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002839 if (test_bit(i, &serviced_queues))
2840 continue;
2841 rx_queue = priv->rx_queue[i];
2842 tx_queue = priv->tx_queue[rx_queue->qindex];
2843
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002844 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002845 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2846 budget_per_queue);
2847 rx_cleaned += rx_cleaned_per_queue;
2848 if(rx_cleaned_per_queue < budget_per_queue) {
2849 left_over_budget = left_over_budget +
2850 (budget_per_queue - rx_cleaned_per_queue);
2851 set_bit(i, &serviced_queues);
2852 num_queues--;
2853 }
2854 }
Dai Harukid080cd62008-04-09 19:37:51 -05002855 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
Andy Fleming42199882008-12-17 16:52:30 -08002857 if (tx_cleaned)
2858 return budget;
2859
2860 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002861 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
2863 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002864 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002866 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
2868 /* If we are coalescing interrupts, update the timer */
2869 /* Otherwise, clear it */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002870 gfar_configure_coalescing(priv,
2871 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 }
2873
Andy Fleming42199882008-12-17 16:52:30 -08002874 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002877#ifdef CONFIG_NET_POLL_CONTROLLER
2878/*
2879 * Polling 'interrupt' - used by things like netconsole to send skbs
2880 * without having to re-enable interrupts. It's not called while
2881 * the interrupt routine is executing.
2882 */
2883static void gfar_netpoll(struct net_device *dev)
2884{
2885 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002886 int i = 0;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002887
2888 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002889 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002890 for (i = 0; i < priv->num_grps; i++) {
2891 disable_irq(priv->gfargrp[i].interruptTransmit);
2892 disable_irq(priv->gfargrp[i].interruptReceive);
2893 disable_irq(priv->gfargrp[i].interruptError);
2894 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2895 &priv->gfargrp[i]);
2896 enable_irq(priv->gfargrp[i].interruptError);
2897 enable_irq(priv->gfargrp[i].interruptReceive);
2898 enable_irq(priv->gfargrp[i].interruptTransmit);
2899 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002900 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002901 for (i = 0; i < priv->num_grps; i++) {
2902 disable_irq(priv->gfargrp[i].interruptTransmit);
2903 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2904 &priv->gfargrp[i]);
2905 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002906 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002907 }
2908}
2909#endif
2910
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002912static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002914 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915
2916 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002917 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002920 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002921 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
2923 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002924 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002925 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002927 /* Check for errors */
2928 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002929 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
2931 return IRQ_HANDLED;
2932}
2933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934/* Called every time the controller might need to be made
2935 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002936 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 * function converts those variables into the appropriate
2938 * register values, and can bring down the device if needed.
2939 */
2940static void adjust_link(struct net_device *dev)
2941{
2942 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002943 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002944 unsigned long flags;
2945 struct phy_device *phydev = priv->phydev;
2946 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002948 local_irq_save(flags);
2949 lock_tx_qs(priv);
2950
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002951 if (phydev->link) {
2952 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002953 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002954
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 /* Now we make sure that we can be in full duplex mode.
2956 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002957 if (phydev->duplex != priv->oldduplex) {
2958 new_state = 1;
2959 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002961 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002964 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 }
2966
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002967 if (phydev->speed != priv->oldspeed) {
2968 new_state = 1;
2969 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 tempval =
2972 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002973
2974 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 break;
2976 case 100:
2977 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 tempval =
2979 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002980
2981 /* Reduced mode distinguishes
2982 * between 10 and 100 */
2983 if (phydev->speed == SPEED_100)
2984 ecntrl |= ECNTRL_R100;
2985 else
2986 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 break;
2988 default:
Joe Perches59deab22011-06-14 08:57:47 +00002989 netif_warn(priv, link, dev,
2990 "Ack! Speed (%d) is not 10/100/1000!\n",
2991 phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 break;
2993 }
2994
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002995 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 }
2997
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002998 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002999 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003000
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003002 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003005 } else if (priv->oldlink) {
3006 new_state = 1;
3007 priv->oldlink = 0;
3008 priv->oldspeed = 0;
3009 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003012 if (new_state && netif_msg_link(priv))
3013 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003014 unlock_tx_qs(priv);
3015 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003016}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017
3018/* Update the hash table based on the current list of multicast
3019 * addresses we subscribe to. Also, change the promiscuity of
3020 * the device based on the flags (this function is called
3021 * whenever dev->flags is changed */
3022static void gfar_set_multi(struct net_device *dev)
3023{
Jiri Pirko22bedad2010-04-01 21:22:57 +00003024 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003026 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 u32 tempval;
3028
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003029 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 /* Set RCTRL to PROM */
3031 tempval = gfar_read(&regs->rctrl);
3032 tempval |= RCTRL_PROM;
3033 gfar_write(&regs->rctrl, tempval);
3034 } else {
3035 /* Set RCTRL to not PROM */
3036 tempval = gfar_read(&regs->rctrl);
3037 tempval &= ~(RCTRL_PROM);
3038 gfar_write(&regs->rctrl, tempval);
3039 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003040
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003041 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003043 gfar_write(&regs->igaddr0, 0xffffffff);
3044 gfar_write(&regs->igaddr1, 0xffffffff);
3045 gfar_write(&regs->igaddr2, 0xffffffff);
3046 gfar_write(&regs->igaddr3, 0xffffffff);
3047 gfar_write(&regs->igaddr4, 0xffffffff);
3048 gfar_write(&regs->igaddr5, 0xffffffff);
3049 gfar_write(&regs->igaddr6, 0xffffffff);
3050 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 gfar_write(&regs->gaddr0, 0xffffffff);
3052 gfar_write(&regs->gaddr1, 0xffffffff);
3053 gfar_write(&regs->gaddr2, 0xffffffff);
3054 gfar_write(&regs->gaddr3, 0xffffffff);
3055 gfar_write(&regs->gaddr4, 0xffffffff);
3056 gfar_write(&regs->gaddr5, 0xffffffff);
3057 gfar_write(&regs->gaddr6, 0xffffffff);
3058 gfar_write(&regs->gaddr7, 0xffffffff);
3059 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003060 int em_num;
3061 int idx;
3062
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003064 gfar_write(&regs->igaddr0, 0x0);
3065 gfar_write(&regs->igaddr1, 0x0);
3066 gfar_write(&regs->igaddr2, 0x0);
3067 gfar_write(&regs->igaddr3, 0x0);
3068 gfar_write(&regs->igaddr4, 0x0);
3069 gfar_write(&regs->igaddr5, 0x0);
3070 gfar_write(&regs->igaddr6, 0x0);
3071 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 gfar_write(&regs->gaddr0, 0x0);
3073 gfar_write(&regs->gaddr1, 0x0);
3074 gfar_write(&regs->gaddr2, 0x0);
3075 gfar_write(&regs->gaddr3, 0x0);
3076 gfar_write(&regs->gaddr4, 0x0);
3077 gfar_write(&regs->gaddr5, 0x0);
3078 gfar_write(&regs->gaddr6, 0x0);
3079 gfar_write(&regs->gaddr7, 0x0);
3080
Andy Fleming7f7f5312005-11-11 12:38:59 -06003081 /* If we have extended hash tables, we need to
3082 * clear the exact match registers to prepare for
3083 * setting them */
3084 if (priv->extended_hash) {
3085 em_num = GFAR_EM_NUM + 1;
3086 gfar_clear_exact_match(dev);
3087 idx = 1;
3088 } else {
3089 idx = 0;
3090 em_num = 0;
3091 }
3092
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003093 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 return;
3095
3096 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad2010-04-01 21:22:57 +00003097 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003098 if (idx < em_num) {
Jiri Pirko22bedad2010-04-01 21:22:57 +00003099 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003100 idx++;
3101 } else
Jiri Pirko22bedad2010-04-01 21:22:57 +00003102 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 }
3104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105}
3106
Andy Fleming7f7f5312005-11-11 12:38:59 -06003107
3108/* Clears each of the exact match registers to zero, so they
3109 * don't interfere with normal reception */
3110static void gfar_clear_exact_match(struct net_device *dev)
3111{
3112 int idx;
Joe Perchesb6bc7652010-12-21 02:16:08 -08003113 static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003114
3115 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003116 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003117}
3118
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119/* Set the appropriate hash bit for the given addr */
3120/* The algorithm works like so:
3121 * 1) Take the Destination Address (ie the multicast address), and
3122 * do a CRC on it (little endian), and reverse the bits of the
3123 * result.
3124 * 2) Use the 8 most significant bits as a hash into a 256-entry
3125 * table. The table is controlled through 8 32-bit registers:
3126 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3127 * gaddr7. This means that the 3 most significant bits in the
3128 * hash index which gaddr register to use, and the 5 other bits
3129 * indicate which bit (assuming an IBM numbering scheme, which
3130 * for PowerPC (tm) is usually the case) in the register holds
3131 * the entry. */
3132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3133{
3134 u32 tempval;
3135 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 u32 result = ether_crc(MAC_ADDR_LEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003137 int width = priv->hash_width;
3138 u8 whichbit = (result >> (32 - width)) & 0x1f;
3139 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 u32 value = (1 << (31-whichbit));
3141
Kumar Gala0bbaf062005-06-20 10:54:21 -05003142 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003144 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145}
3146
Andy Fleming7f7f5312005-11-11 12:38:59 -06003147
3148/* There are multiple MAC Address register pairs on some controllers
3149 * This function sets the numth pair to a given address
3150 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003151static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3152 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003153{
3154 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003155 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003156 int idx;
3157 char tmpbuf[MAC_ADDR_LEN];
3158 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003159 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003160
3161 macptr += num*2;
3162
3163 /* Now copy it into the mac registers backwards, cuz */
3164 /* little endian is silly */
3165 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
3166 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
3167
3168 gfar_write(macptr, *((u32 *) (tmpbuf)));
3169
3170 tempval = *((u32 *) (tmpbuf + 4));
3171
3172 gfar_write(macptr+1, tempval);
3173}
3174
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003176static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003178 struct gfar_priv_grp *gfargrp = grp_id;
3179 struct gfar __iomem *regs = gfargrp->regs;
3180 struct gfar_private *priv= gfargrp->priv;
3181 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
3183 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003184 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185
3186 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003187 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003188
3189 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003190 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003191 (events & IEVENT_MAG))
3192 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193
3194 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003195 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Joe Perches59deab22011-06-14 08:57:47 +00003196 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3197 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198
3199 /* Update the error counters */
3200 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003201 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202
3203 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003204 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003206 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003208 unsigned long flags;
3209
Joe Perches59deab22011-06-14 08:57:47 +00003210 netif_dbg(priv, tx_err, dev,
3211 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003212 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 priv->extra_stats.tx_underrun++;
3214
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003215 local_irq_save(flags);
3216 lock_tx_qs(priv);
3217
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003219 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003220
3221 unlock_tx_qs(priv);
3222 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 }
Joe Perches59deab22011-06-14 08:57:47 +00003224 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 }
3226 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003227 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 priv->extra_stats.rx_bsy++;
3229
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003230 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
Joe Perches59deab22011-06-14 08:57:47 +00003232 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3233 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 }
3235 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003236 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 priv->extra_stats.rx_babr++;
3238
Joe Perches59deab22011-06-14 08:57:47 +00003239 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 }
3241 if (events & IEVENT_EBERR) {
3242 priv->extra_stats.eberr++;
Joe Perches59deab22011-06-14 08:57:47 +00003243 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 }
Joe Perches59deab22011-06-14 08:57:47 +00003245 if (events & IEVENT_RXC)
3246 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
3248 if (events & IEVENT_BABT) {
3249 priv->extra_stats.tx_babt++;
Joe Perches59deab22011-06-14 08:57:47 +00003250 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 }
3252 return IRQ_HANDLED;
3253}
3254
Andy Flemingb31a1d82008-12-16 15:29:15 -08003255static struct of_device_id gfar_match[] =
3256{
3257 {
3258 .type = "network",
3259 .compatible = "gianfar",
3260 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003261 {
3262 .compatible = "fsl,etsec2",
3263 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003264 {},
3265};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003266MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003267
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003269static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003270 .driver = {
3271 .name = "fsl-gianfar",
3272 .owner = THIS_MODULE,
3273 .pm = GFAR_PM_OPS,
3274 .of_match_table = gfar_match,
3275 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 .probe = gfar_probe,
3277 .remove = gfar_remove,
3278};
3279
3280static int __init gfar_init(void)
3281{
Grant Likely74888762011-02-22 21:05:51 -07003282 return platform_driver_register(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283}
3284
3285static void __exit gfar_exit(void)
3286{
Grant Likely74888762011-02-22 21:05:51 -07003287 platform_driver_unregister(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288}
3289
3290module_init(gfar_init);
3291module_exit(gfar_exit);
3292