blob: f30adbf86bb2978b731e0b9c8ac60aef3e98e2ec [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
26 *
27 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050028 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080029 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * last descriptor of the ring.
37 *
38 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050039 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040043 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020045 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050046 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
54 *
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
63 */
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/string.h>
67#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040068#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/slab.h>
70#include <linux/interrupt.h>
71#include <linux/init.h>
72#include <linux/delay.h>
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050076#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/spinlock.h>
78#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000079#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080080#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050081#include <linux/ip.h>
82#include <linux/tcp.h>
83#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080084#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000085#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87#include <asm/io.h>
Anton Vorontsov7d350972010-06-30 06:39:12 +000088#include <asm/reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <asm/irq.h>
90#include <asm/uaccess.h>
91#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <linux/dma-mapping.h>
93#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040094#include <linux/mii.h>
95#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080096#include <linux/phy_fixed.h>
97#include <linux/of.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
99#include "gianfar.h"
Andy Fleming1577ece2009-02-04 16:42:12 -0800100#include "fsl_pq_mdio.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#undef BRIEF_GFAR_ERRORS
104#undef VERBOSE_GFAR_ERRORS
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106const char gfar_driver_name[] = "Gianfar Ethernet";
Andy Fleming7f7f5312005-11-11 12:38:59 -0600107const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109static int gfar_enet_open(struct net_device *dev);
110static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200111static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static void gfar_timeout(struct net_device *dev);
113static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500114struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000115static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -0500116 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117static int gfar_set_mac_address(struct net_device *dev);
118static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100119static irqreturn_t gfar_error(int irq, void *dev_id);
120static irqreturn_t gfar_transmit(int irq, void *dev_id);
121static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122static void adjust_link(struct net_device *dev);
123static void init_registers(struct net_device *dev);
124static int init_phy(struct net_device *dev);
Grant Likely2dc11582010-08-06 09:25:50 -0600125static int gfar_probe(struct platform_device *ofdev,
Andy Flemingb31a1d82008-12-16 15:29:15 -0800126 const struct of_device_id *match);
Grant Likely2dc11582010-08-06 09:25:50 -0600127static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400128static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129static void gfar_set_multi(struct net_device *dev);
130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500131static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700132static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300133#ifdef CONFIG_NET_POLL_CONTROLLER
134static void gfar_netpoll(struct net_device *dev);
135#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
139 int amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500140static void gfar_vlan_rx_register(struct net_device *netdev,
141 struct vlan_group *grp);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600142void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500143static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600144void gfar_start(struct net_device *dev);
145static void gfar_clear_exact_match(struct net_device *dev);
146static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149MODULE_AUTHOR("Freescale Semiconductor, Inc");
150MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151MODULE_LICENSE("GPL");
152
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000153static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000154 dma_addr_t buf)
155{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000156 u32 lstatus;
157
158 bdp->bufPtr = buf;
159
160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000162 lstatus |= BD_LFLAG(RXBD_WRAP);
163
164 eieio();
165
166 bdp->lstatus = lstatus;
167}
168
Anton Vorontsov87283272009-10-12 06:00:39 +0000169static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000170{
Anton Vorontsov87283272009-10-12 06:00:39 +0000171 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000172 struct gfar_priv_tx_q *tx_queue = NULL;
173 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000174 struct txbd8 *txbdp;
175 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000176 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000177
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000178 for (i = 0; i < priv->num_tx_queues; i++) {
179 tx_queue = priv->tx_queue[i];
180 /* Initialize some variables in our dev structure */
181 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
182 tx_queue->dirty_tx = tx_queue->tx_bd_base;
183 tx_queue->cur_tx = tx_queue->tx_bd_base;
184 tx_queue->skb_curtx = 0;
185 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000186
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000187 /* Initialize Transmit Descriptor Ring */
188 txbdp = tx_queue->tx_bd_base;
189 for (j = 0; j < tx_queue->tx_ring_size; j++) {
190 txbdp->lstatus = 0;
191 txbdp->bufPtr = 0;
192 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000193 }
194
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000195 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp--;
197 txbdp->status |= TXBD_WRAP;
198 }
199
200 for (i = 0; i < priv->num_rx_queues; i++) {
201 rx_queue = priv->rx_queue[i];
202 rx_queue->cur_rx = rx_queue->rx_bd_base;
203 rx_queue->skb_currx = 0;
204 rxbdp = rx_queue->rx_bd_base;
205
206 for (j = 0; j < rx_queue->rx_ring_size; j++) {
207 struct sk_buff *skb = rx_queue->rx_skbuff[j];
208
209 if (skb) {
210 gfar_init_rxbdp(rx_queue, rxbdp,
211 rxbdp->bufPtr);
212 } else {
213 skb = gfar_new_skb(ndev);
214 if (!skb) {
215 pr_err("%s: Can't allocate RX buffers\n",
216 ndev->name);
217 goto err_rxalloc_fail;
218 }
219 rx_queue->rx_skbuff[j] = skb;
220
221 gfar_new_rxbdp(rx_queue, rxbdp, skb);
222 }
223
224 rxbdp++;
225 }
226
Anton Vorontsov87283272009-10-12 06:00:39 +0000227 }
228
229 return 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000230
231err_rxalloc_fail:
232 free_skb_resources(priv);
233 return -ENOMEM;
Anton Vorontsov87283272009-10-12 06:00:39 +0000234}
235
236static int gfar_alloc_skb_resources(struct net_device *ndev)
237{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000238 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000239 dma_addr_t addr;
240 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000241 struct gfar_private *priv = netdev_priv(ndev);
242 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000243 struct gfar_priv_tx_q *tx_queue = NULL;
244 struct gfar_priv_rx_q *rx_queue = NULL;
245
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000246 priv->total_tx_ring_size = 0;
247 for (i = 0; i < priv->num_tx_queues; i++)
248 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
249
250 priv->total_rx_ring_size = 0;
251 for (i = 0; i < priv->num_rx_queues; i++)
252 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000253
254 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000255 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000256 sizeof(struct txbd8) * priv->total_tx_ring_size +
257 sizeof(struct rxbd8) * priv->total_rx_ring_size,
258 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000259 if (!vaddr) {
260 if (netif_msg_ifup(priv))
261 pr_err("%s: Could not allocate buffer descriptors!\n",
262 ndev->name);
263 return -ENOMEM;
264 }
265
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000266 for (i = 0; i < priv->num_tx_queues; i++) {
267 tx_queue = priv->tx_queue[i];
268 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
269 tx_queue->tx_bd_dma_base = addr;
270 tx_queue->dev = ndev;
271 /* enet DMA only understands physical addresses */
272 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
274 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000275
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000276 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000277 for (i = 0; i < priv->num_rx_queues; i++) {
278 rx_queue = priv->rx_queue[i];
279 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
280 rx_queue->rx_bd_dma_base = addr;
281 rx_queue->dev = ndev;
282 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
284 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000285
286 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000287 for (i = 0; i < priv->num_tx_queues; i++) {
288 tx_queue = priv->tx_queue[i];
289 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000290 tx_queue->tx_ring_size, GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000291 if (!tx_queue->tx_skbuff) {
292 if (netif_msg_ifup(priv))
293 pr_err("%s: Could not allocate tx_skbuff\n",
294 ndev->name);
295 goto cleanup;
296 }
297
298 for (k = 0; k < tx_queue->tx_ring_size; k++)
299 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000300 }
301
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000302 for (i = 0; i < priv->num_rx_queues; i++) {
303 rx_queue = priv->rx_queue[i];
304 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000305 rx_queue->rx_ring_size, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000306
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000307 if (!rx_queue->rx_skbuff) {
308 if (netif_msg_ifup(priv))
309 pr_err("%s: Could not allocate rx_skbuff\n",
310 ndev->name);
311 goto cleanup;
312 }
313
314 for (j = 0; j < rx_queue->rx_ring_size; j++)
315 rx_queue->rx_skbuff[j] = NULL;
316 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000317
Anton Vorontsov87283272009-10-12 06:00:39 +0000318 if (gfar_init_bds(ndev))
319 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000320
321 return 0;
322
323cleanup:
324 free_skb_resources(priv);
325 return -ENOMEM;
326}
327
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000328static void gfar_init_tx_rx_base(struct gfar_private *priv)
329{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000330 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000331 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000332 int i;
333
334 baddr = &regs->tbase0;
335 for(i = 0; i < priv->num_tx_queues; i++) {
336 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
337 baddr += 2;
338 }
339
340 baddr = &regs->rbase0;
341 for(i = 0; i < priv->num_rx_queues; i++) {
342 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
343 baddr += 2;
344 }
345}
346
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000347static void gfar_init_mac(struct net_device *ndev)
348{
349 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000350 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000351 u32 rctrl = 0;
352 u32 tctrl = 0;
353 u32 attrs = 0;
354
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000355 /* write the tx/rx base registers */
356 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000357
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000358 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000359 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000360
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000361 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000362 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000363 /* Program the RIR0 reg with the required distribution */
364 gfar_write(&regs->rir0, DEFAULT_RIR0);
365 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000366
367 if (priv->rx_csum_enable)
368 rctrl |= RCTRL_CHECKSUMMING;
369
370 if (priv->extended_hash) {
371 rctrl |= RCTRL_EXTHASH;
372
373 gfar_clear_exact_match(ndev);
374 rctrl |= RCTRL_EMEN;
375 }
376
377 if (priv->padding) {
378 rctrl &= ~RCTRL_PAL_MASK;
379 rctrl |= RCTRL_PADDING(priv->padding);
380 }
381
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000382 /* Insert receive time stamps into padding alignment bytes */
383 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
384 rctrl &= ~RCTRL_PAL_MASK;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000385 rctrl |= RCTRL_PADDING(8);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000386 priv->padding = 8;
387 }
388
Manfred Rudigier97553f72010-06-11 01:49:05 +0000389 /* Enable HW time stamping if requested from user space */
390 if (priv->hwts_rx_en)
391 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
392
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000393 /* keep vlan related bits if it's enabled */
394 if (priv->vlgrp) {
395 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
396 tctrl |= TCTRL_VLINS;
397 }
398
399 /* Init rctrl based on our settings */
400 gfar_write(&regs->rctrl, rctrl);
401
402 if (ndev->features & NETIF_F_IP_CSUM)
403 tctrl |= TCTRL_INIT_CSUM;
404
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000405 tctrl |= TCTRL_TXSCHED_PRIO;
406
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000407 gfar_write(&regs->tctrl, tctrl);
408
409 /* Set the extraction length and index */
410 attrs = ATTRELI_EL(priv->rx_stash_size) |
411 ATTRELI_EI(priv->rx_stash_index);
412
413 gfar_write(&regs->attreli, attrs);
414
415 /* Start with defaults, and add stashing or locking
416 * depending on the approprate variables */
417 attrs = ATTR_INIT_SETTINGS;
418
419 if (priv->bd_stash_en)
420 attrs |= ATTR_BDSTASH;
421
422 if (priv->rx_stash_size != 0)
423 attrs |= ATTR_BUFSTASH;
424
425 gfar_write(&regs->attr, attrs);
426
427 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
428 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
429 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
430}
431
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000432static struct net_device_stats *gfar_get_stats(struct net_device *dev)
433{
434 struct gfar_private *priv = netdev_priv(dev);
435 struct netdev_queue *txq;
436 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
437 unsigned long tx_packets = 0, tx_bytes = 0;
438 int i = 0;
439
440 for (i = 0; i < priv->num_rx_queues; i++) {
441 rx_packets += priv->rx_queue[i]->stats.rx_packets;
442 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
443 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
444 }
445
446 dev->stats.rx_packets = rx_packets;
447 dev->stats.rx_bytes = rx_bytes;
448 dev->stats.rx_dropped = rx_dropped;
449
450 for (i = 0; i < priv->num_tx_queues; i++) {
451 txq = netdev_get_tx_queue(dev, i);
452 tx_bytes += txq->tx_bytes;
453 tx_packets += txq->tx_packets;
454 }
455
456 dev->stats.tx_bytes = tx_bytes;
457 dev->stats.tx_packets = tx_packets;
458
459 return &dev->stats;
460}
461
Andy Fleming26ccfc32009-03-10 12:58:28 +0000462static const struct net_device_ops gfar_netdev_ops = {
463 .ndo_open = gfar_enet_open,
464 .ndo_start_xmit = gfar_start_xmit,
465 .ndo_stop = gfar_close,
466 .ndo_change_mtu = gfar_change_mtu,
467 .ndo_set_multicast_list = gfar_set_multi,
468 .ndo_tx_timeout = gfar_timeout,
469 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000470 .ndo_get_stats = gfar_get_stats,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000471 .ndo_vlan_rx_register = gfar_vlan_rx_register,
Ben Hutchings240c1022009-07-09 17:54:35 +0000472 .ndo_set_mac_address = eth_mac_addr,
473 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000474#ifdef CONFIG_NET_POLL_CONTROLLER
475 .ndo_poll_controller = gfar_netpoll,
476#endif
477};
478
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000479unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
480unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
481
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000482void lock_rx_qs(struct gfar_private *priv)
483{
484 int i = 0x0;
485
486 for (i = 0; i < priv->num_rx_queues; i++)
487 spin_lock(&priv->rx_queue[i]->rxlock);
488}
489
490void lock_tx_qs(struct gfar_private *priv)
491{
492 int i = 0x0;
493
494 for (i = 0; i < priv->num_tx_queues; i++)
495 spin_lock(&priv->tx_queue[i]->txlock);
496}
497
498void unlock_rx_qs(struct gfar_private *priv)
499{
500 int i = 0x0;
501
502 for (i = 0; i < priv->num_rx_queues; i++)
503 spin_unlock(&priv->rx_queue[i]->rxlock);
504}
505
506void unlock_tx_qs(struct gfar_private *priv)
507{
508 int i = 0x0;
509
510 for (i = 0; i < priv->num_tx_queues; i++)
511 spin_unlock(&priv->tx_queue[i]->txlock);
512}
513
Andy Fleming7f7f5312005-11-11 12:38:59 -0600514/* Returns 1 if incoming frames use an FCB */
515static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500516{
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000517 return priv->vlgrp || priv->rx_csum_enable ||
518 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500519}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400520
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000521static void free_tx_pointers(struct gfar_private *priv)
522{
523 int i = 0;
524
525 for (i = 0; i < priv->num_tx_queues; i++)
526 kfree(priv->tx_queue[i]);
527}
528
529static void free_rx_pointers(struct gfar_private *priv)
530{
531 int i = 0;
532
533 for (i = 0; i < priv->num_rx_queues; i++)
534 kfree(priv->rx_queue[i]);
535}
536
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000537static void unmap_group_regs(struct gfar_private *priv)
538{
539 int i = 0;
540
541 for (i = 0; i < MAXGROUPS; i++)
542 if (priv->gfargrp[i].regs)
543 iounmap(priv->gfargrp[i].regs);
544}
545
546static void disable_napi(struct gfar_private *priv)
547{
548 int i = 0;
549
550 for (i = 0; i < priv->num_grps; i++)
551 napi_disable(&priv->gfargrp[i].napi);
552}
553
554static void enable_napi(struct gfar_private *priv)
555{
556 int i = 0;
557
558 for (i = 0; i < priv->num_grps; i++)
559 napi_enable(&priv->gfargrp[i].napi);
560}
561
562static int gfar_parse_group(struct device_node *np,
563 struct gfar_private *priv, const char *model)
564{
565 u32 *queue_mask;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000566
Anton Vorontsov7ce97d42010-04-23 07:12:44 +0000567 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000568 if (!priv->gfargrp[priv->num_grps].regs)
569 return -ENOMEM;
570
571 priv->gfargrp[priv->num_grps].interruptTransmit =
572 irq_of_parse_and_map(np, 0);
573
574 /* If we aren't the FEC we have multiple interrupts */
575 if (model && strcasecmp(model, "FEC")) {
576 priv->gfargrp[priv->num_grps].interruptReceive =
577 irq_of_parse_and_map(np, 1);
578 priv->gfargrp[priv->num_grps].interruptError =
579 irq_of_parse_and_map(np,2);
580 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
581 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
582 priv->gfargrp[priv->num_grps].interruptError < 0) {
583 return -EINVAL;
584 }
585 }
586
587 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
588 priv->gfargrp[priv->num_grps].priv = priv;
589 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
590 if(priv->mode == MQ_MG_MODE) {
591 queue_mask = (u32 *)of_get_property(np,
592 "fsl,rx-bit-map", NULL);
593 priv->gfargrp[priv->num_grps].rx_bit_map =
594 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
595 queue_mask = (u32 *)of_get_property(np,
596 "fsl,tx-bit-map", NULL);
597 priv->gfargrp[priv->num_grps].tx_bit_map =
598 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
599 } else {
600 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
601 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
602 }
603 priv->num_grps++;
604
605 return 0;
606}
607
Grant Likely2dc11582010-08-06 09:25:50 -0600608static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800609{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800610 const char *model;
611 const char *ctype;
612 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000613 int err = 0, i;
614 struct net_device *dev = NULL;
615 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700616 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000617 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800618 const u32 *stash;
619 const u32 *stash_len;
620 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000621 unsigned int num_tx_qs, num_rx_qs;
622 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800623
624 if (!np || !of_device_is_available(np))
625 return -ENODEV;
626
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000627 /* parse the num of tx and rx queues */
628 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
629 num_tx_qs = tx_queues ? *tx_queues : 1;
630
631 if (num_tx_qs > MAX_TX_QS) {
632 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
633 num_tx_qs, MAX_TX_QS);
634 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
635 return -EINVAL;
636 }
637
638 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
639 num_rx_qs = rx_queues ? *rx_queues : 1;
640
641 if (num_rx_qs > MAX_RX_QS) {
642 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
643 num_tx_qs, MAX_TX_QS);
644 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
645 return -EINVAL;
646 }
647
648 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
649 dev = *pdev;
650 if (NULL == dev)
651 return -ENOMEM;
652
653 priv = netdev_priv(dev);
Grant Likely61c7a082010-04-13 16:12:29 -0700654 priv->node = ofdev->dev.of_node;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000655 priv->ndev = dev;
656
657 dev->num_tx_queues = num_tx_qs;
658 dev->real_num_tx_queues = num_tx_qs;
659 priv->num_tx_queues = num_tx_qs;
660 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000661 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800662
663 model = of_get_property(np, "model", NULL);
664
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000665 for (i = 0; i < MAXGROUPS; i++)
666 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800667
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000668 /* Parse and initialize group specific information */
669 if (of_device_is_compatible(np, "fsl,etsec2")) {
670 priv->mode = MQ_MG_MODE;
671 for_each_child_of_node(np, child) {
672 err = gfar_parse_group(child, priv, model);
673 if (err)
674 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800675 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000676 } else {
677 priv->mode = SQ_SG_MODE;
678 err = gfar_parse_group(np, priv, model);
679 if(err)
680 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800681 }
682
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000683 for (i = 0; i < priv->num_tx_queues; i++)
684 priv->tx_queue[i] = NULL;
685 for (i = 0; i < priv->num_rx_queues; i++)
686 priv->rx_queue[i] = NULL;
687
688 for (i = 0; i < priv->num_tx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000689 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
690 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000691 if (!priv->tx_queue[i]) {
692 err = -ENOMEM;
693 goto tx_alloc_failed;
694 }
695 priv->tx_queue[i]->tx_skbuff = NULL;
696 priv->tx_queue[i]->qindex = i;
697 priv->tx_queue[i]->dev = dev;
698 spin_lock_init(&(priv->tx_queue[i]->txlock));
699 }
700
701 for (i = 0; i < priv->num_rx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000702 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
703 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000704 if (!priv->rx_queue[i]) {
705 err = -ENOMEM;
706 goto rx_alloc_failed;
707 }
708 priv->rx_queue[i]->rx_skbuff = NULL;
709 priv->rx_queue[i]->qindex = i;
710 priv->rx_queue[i]->dev = dev;
711 spin_lock_init(&(priv->rx_queue[i]->rxlock));
712 }
713
714
Andy Fleming4d7902f2009-02-04 16:43:44 -0800715 stash = of_get_property(np, "bd-stash", NULL);
716
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000717 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800718 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
719 priv->bd_stash_en = 1;
720 }
721
722 stash_len = of_get_property(np, "rx-stash-len", NULL);
723
724 if (stash_len)
725 priv->rx_stash_size = *stash_len;
726
727 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
728
729 if (stash_idx)
730 priv->rx_stash_index = *stash_idx;
731
732 if (stash_len || stash_idx)
733 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
734
Andy Flemingb31a1d82008-12-16 15:29:15 -0800735 mac_addr = of_get_mac_address(np);
736 if (mac_addr)
737 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
738
739 if (model && !strcasecmp(model, "TSEC"))
740 priv->device_flags =
741 FSL_GIANFAR_DEV_HAS_GIGABIT |
742 FSL_GIANFAR_DEV_HAS_COALESCE |
743 FSL_GIANFAR_DEV_HAS_RMON |
744 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
745 if (model && !strcasecmp(model, "eTSEC"))
746 priv->device_flags =
747 FSL_GIANFAR_DEV_HAS_GIGABIT |
748 FSL_GIANFAR_DEV_HAS_COALESCE |
749 FSL_GIANFAR_DEV_HAS_RMON |
750 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Dai Haruki2c2db482008-12-16 15:31:15 -0800751 FSL_GIANFAR_DEV_HAS_PADDING |
Andy Flemingb31a1d82008-12-16 15:29:15 -0800752 FSL_GIANFAR_DEV_HAS_CSUM |
753 FSL_GIANFAR_DEV_HAS_VLAN |
754 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
Manfred Rudigier97553f72010-06-11 01:49:05 +0000755 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
756 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800757
758 ctype = of_get_property(np, "phy-connection-type", NULL);
759
760 /* We only care about rgmii-id. The rest are autodetected */
761 if (ctype && !strcmp(ctype, "rgmii-id"))
762 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
763 else
764 priv->interface = PHY_INTERFACE_MODE_MII;
765
766 if (of_get_property(np, "fsl,magic-packet", NULL))
767 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
768
Grant Likelyfe192a42009-04-25 12:53:12 +0000769 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800770
771 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000772 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800773
774 return 0;
775
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000776rx_alloc_failed:
777 free_rx_pointers(priv);
778tx_alloc_failed:
779 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000780err_grp_init:
781 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000782 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800783 return err;
784}
785
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000786static int gfar_hwtstamp_ioctl(struct net_device *netdev,
787 struct ifreq *ifr, int cmd)
788{
789 struct hwtstamp_config config;
790 struct gfar_private *priv = netdev_priv(netdev);
791
792 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
793 return -EFAULT;
794
795 /* reserved for future extensions */
796 if (config.flags)
797 return -EINVAL;
798
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000799 switch (config.tx_type) {
800 case HWTSTAMP_TX_OFF:
801 priv->hwts_tx_en = 0;
802 break;
803 case HWTSTAMP_TX_ON:
804 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
805 return -ERANGE;
806 priv->hwts_tx_en = 1;
807 break;
808 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000809 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000810 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000811
812 switch (config.rx_filter) {
813 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000814 if (priv->hwts_rx_en) {
815 stop_gfar(netdev);
816 priv->hwts_rx_en = 0;
817 startup_gfar(netdev);
818 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000819 break;
820 default:
821 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
822 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000823 if (!priv->hwts_rx_en) {
824 stop_gfar(netdev);
825 priv->hwts_rx_en = 1;
826 startup_gfar(netdev);
827 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000828 config.rx_filter = HWTSTAMP_FILTER_ALL;
829 break;
830 }
831
832 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
833 -EFAULT : 0;
834}
835
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000836/* Ioctl MII Interface */
837static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
838{
839 struct gfar_private *priv = netdev_priv(dev);
840
841 if (!netif_running(dev))
842 return -EINVAL;
843
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000844 if (cmd == SIOCSHWTSTAMP)
845 return gfar_hwtstamp_ioctl(dev, rq, cmd);
846
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000847 if (!priv->phydev)
848 return -ENODEV;
849
Richard Cochran28b04112010-07-17 08:48:55 +0000850 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000851}
852
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000853static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
854{
855 unsigned int new_bit_map = 0x0;
856 int mask = 0x1 << (max_qs - 1), i;
857 for (i = 0; i < max_qs; i++) {
858 if (bit_map & mask)
859 new_bit_map = new_bit_map + (1 << i);
860 mask = mask >> 0x1;
861 }
862 return new_bit_map;
863}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000864
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000865static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
866 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000867{
868 u32 rqfpr = FPR_FILER_MASK;
869 u32 rqfcr = 0x0;
870
871 rqfar--;
872 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
873 ftp_rqfpr[rqfar] = rqfpr;
874 ftp_rqfcr[rqfar] = rqfcr;
875 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
876
877 rqfar--;
878 rqfcr = RQFCR_CMP_NOMATCH;
879 ftp_rqfpr[rqfar] = rqfpr;
880 ftp_rqfcr[rqfar] = rqfcr;
881 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
882
883 rqfar--;
884 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
885 rqfpr = class;
886 ftp_rqfcr[rqfar] = rqfcr;
887 ftp_rqfpr[rqfar] = rqfpr;
888 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
889
890 rqfar--;
891 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
892 rqfpr = class;
893 ftp_rqfcr[rqfar] = rqfcr;
894 ftp_rqfpr[rqfar] = rqfpr;
895 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
896
897 return rqfar;
898}
899
900static void gfar_init_filer_table(struct gfar_private *priv)
901{
902 int i = 0x0;
903 u32 rqfar = MAX_FILER_IDX;
904 u32 rqfcr = 0x0;
905 u32 rqfpr = FPR_FILER_MASK;
906
907 /* Default rule */
908 rqfcr = RQFCR_CMP_MATCH;
909 ftp_rqfcr[rqfar] = rqfcr;
910 ftp_rqfpr[rqfar] = rqfpr;
911 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
912
913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
916 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
917 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
918 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
919
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +0200920 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000921 priv->cur_filer_idx = rqfar;
922
923 /* Rest are masked rules */
924 rqfcr = RQFCR_CMP_NOMATCH;
925 for (i = 0; i < rqfar; i++) {
926 ftp_rqfcr[i] = rqfcr;
927 ftp_rqfpr[i] = rqfpr;
928 gfar_write_filer(priv, i, rqfcr, rqfpr);
929 }
930}
931
Anton Vorontsov7d350972010-06-30 06:39:12 +0000932static void gfar_detect_errata(struct gfar_private *priv)
933{
934 struct device *dev = &priv->ofdev->dev;
935 unsigned int pvr = mfspr(SPRN_PVR);
936 unsigned int svr = mfspr(SPRN_SVR);
937 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
938 unsigned int rev = svr & 0xffff;
939
940 /* MPC8313 Rev 2.0 and higher; All MPC837x */
941 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
942 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
943 priv->errata |= GFAR_ERRATA_74;
944
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000945 /* MPC8313 and MPC837x all rev */
946 if ((pvr == 0x80850010 && mod == 0x80b0) ||
947 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
948 priv->errata |= GFAR_ERRATA_76;
949
Anton Vorontsov511d9342010-06-30 06:39:15 +0000950 /* MPC8313 and MPC837x all rev */
951 if ((pvr == 0x80850010 && mod == 0x80b0) ||
952 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
953 priv->errata |= GFAR_ERRATA_A002;
954
Anton Vorontsov7d350972010-06-30 06:39:12 +0000955 if (priv->errata)
956 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
957 priv->errata);
958}
959
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400960/* Set up the ethernet device structure, private data,
961 * and anything else we need before we start */
Grant Likely2dc11582010-08-06 09:25:50 -0600962static int gfar_probe(struct platform_device *ofdev,
Andy Flemingb31a1d82008-12-16 15:29:15 -0800963 const struct of_device_id *match)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
965 u32 tempval;
966 struct net_device *dev = NULL;
967 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000968 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000969 int err = 0, i, grp_idx = 0;
Dai Harukic50a5d92008-12-17 16:51:32 -0800970 int len_devname;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000971 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000972 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000973 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000975 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000977 if (err)
978 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700981 priv->ndev = dev;
982 priv->ofdev = ofdev;
Grant Likely61c7a082010-04-13 16:12:29 -0700983 priv->node = ofdev->dev.of_node;
Kumar Gala48268572009-03-18 23:28:22 -0700984 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Scott Woodd87eb122008-07-11 18:04:45 -0500986 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200987 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Andy Flemingb31a1d82008-12-16 15:29:15 -0800989 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000990 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Anton Vorontsov7d350972010-06-30 06:39:12 +0000992 gfar_detect_errata(priv);
993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 /* Stop the DMA engine now, in case it was running before */
995 /* (The firmware could have used it, and left it running). */
Andy Fleming257d9382008-12-16 15:25:45 -0800996 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000999 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Andy Flemingb98ac702009-02-04 16:38:05 -08001001 /* We need to delay at least 3 TX clocks */
1002 udelay(2);
1003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001005 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001008 tempval = MACCFG2_INIT_SETTINGS;
1009 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1010 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1011 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001014 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001017 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Andy Flemingb31a1d82008-12-16 15:29:15 -08001019 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +00001024 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001025 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001027 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001028 for (i = 0; i < priv->num_grps; i++)
1029 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001030
Andy Flemingb31a1d82008-12-16 15:29:15 -08001031 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001032 priv->rx_csum_enable = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -08001033 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001034 } else
1035 priv->rx_csum_enable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Kumar Gala0bbaf062005-06-20 10:54:21 -05001037 priv->vlgrp = NULL;
1038
Andy Fleming26ccfc32009-03-10 12:58:28 +00001039 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001040 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001041
Andy Flemingb31a1d82008-12-16 15:29:15 -08001042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001043 priv->extended_hash = 1;
1044 priv->hash_width = 9;
1045
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001046 priv->hash_regs[0] = &regs->igaddr0;
1047 priv->hash_regs[1] = &regs->igaddr1;
1048 priv->hash_regs[2] = &regs->igaddr2;
1049 priv->hash_regs[3] = &regs->igaddr3;
1050 priv->hash_regs[4] = &regs->igaddr4;
1051 priv->hash_regs[5] = &regs->igaddr5;
1052 priv->hash_regs[6] = &regs->igaddr6;
1053 priv->hash_regs[7] = &regs->igaddr7;
1054 priv->hash_regs[8] = &regs->gaddr0;
1055 priv->hash_regs[9] = &regs->gaddr1;
1056 priv->hash_regs[10] = &regs->gaddr2;
1057 priv->hash_regs[11] = &regs->gaddr3;
1058 priv->hash_regs[12] = &regs->gaddr4;
1059 priv->hash_regs[13] = &regs->gaddr5;
1060 priv->hash_regs[14] = &regs->gaddr6;
1061 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001062
1063 } else {
1064 priv->extended_hash = 0;
1065 priv->hash_width = 8;
1066
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001067 priv->hash_regs[0] = &regs->gaddr0;
1068 priv->hash_regs[1] = &regs->gaddr1;
1069 priv->hash_regs[2] = &regs->gaddr2;
1070 priv->hash_regs[3] = &regs->gaddr3;
1071 priv->hash_regs[4] = &regs->gaddr4;
1072 priv->hash_regs[5] = &regs->gaddr5;
1073 priv->hash_regs[6] = &regs->gaddr6;
1074 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001075 }
1076
Andy Flemingb31a1d82008-12-16 15:29:15 -08001077 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001078 priv->padding = DEFAULT_PADDING;
1079 else
1080 priv->padding = 0;
1081
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001082 if (dev->features & NETIF_F_IP_CSUM ||
1083 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001084 dev->hard_header_len += GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001086 /* Program the isrg regs only if number of grps > 1 */
1087 if (priv->num_grps > 1) {
1088 baddr = &regs->isrg0;
1089 for (i = 0; i < priv->num_grps; i++) {
1090 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1091 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1092 gfar_write(baddr, isrg);
1093 baddr++;
1094 isrg = 0x0;
1095 }
1096 }
1097
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001098 /* Need to reverse the bit maps as bit_map's MSB is q0
Akinobu Mita984b3f52010-03-05 13:41:37 -08001099 * but, for_each_set_bit parses from right to left, which
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001100 * basically reverses the queue numbers */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001101 for (i = 0; i< priv->num_grps; i++) {
1102 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1103 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1104 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1105 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1106 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001107
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001108 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1109 * also assign queues to groups */
1110 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1111 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001112 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001113 priv->num_rx_queues) {
1114 priv->gfargrp[grp_idx].num_rx_queues++;
1115 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1116 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1117 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1118 }
1119 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001120 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001121 priv->num_tx_queues) {
1122 priv->gfargrp[grp_idx].num_tx_queues++;
1123 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1124 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1125 tqueue = tqueue | (TQUEUE_EN0 >> i);
1126 }
1127 priv->gfargrp[grp_idx].rstat = rstat;
1128 priv->gfargrp[grp_idx].tstat = tstat;
1129 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001130 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001131
1132 gfar_write(&regs->rqueue, rqueue);
1133 gfar_write(&regs->tqueue, tqueue);
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001137 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001138 for (i = 0; i < priv->num_tx_queues; i++) {
1139 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1140 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1141 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1142 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1143 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001144
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001145 for (i = 0; i < priv->num_rx_queues; i++) {
1146 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1147 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1148 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1149 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +00001151 /* enable filer if using multiple RX queues*/
1152 if(priv->num_rx_queues > 1)
1153 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001154 /* Enable most messages by default */
1155 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1156
Trent Piephod3eab822008-10-02 11:12:24 +00001157 /* Carrier starts down, phylib will bring it up */
1158 netif_carrier_off(dev);
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 err = register_netdev(dev);
1161
1162 if (err) {
1163 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1164 dev->name);
1165 goto register_fail;
1166 }
1167
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001168 device_init_wakeup(&dev->dev,
1169 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1170
Dai Harukic50a5d92008-12-17 16:51:32 -08001171 /* fill out IRQ number and name fields */
1172 len_devname = strlen(dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001173 for (i = 0; i < priv->num_grps; i++) {
1174 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1175 len_devname);
1176 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1177 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1178 "_g", sizeof("_g"));
1179 priv->gfargrp[i].int_name_tx[
1180 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1181 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1182 priv->gfargrp[i].int_name_tx)],
1183 "_tx", sizeof("_tx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001184
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001185 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1186 len_devname);
1187 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1188 "_g", sizeof("_g"));
1189 priv->gfargrp[i].int_name_rx[
1190 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1191 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1192 priv->gfargrp[i].int_name_rx)],
1193 "_rx", sizeof("_rx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001194
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001195 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1196 len_devname);
1197 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1198 "_g", sizeof("_g"));
1199 priv->gfargrp[i].int_name_er[strlen(
1200 priv->gfargrp[i].int_name_er)] = i+48;
1201 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1202 priv->gfargrp[i].int_name_er)],
1203 "_er", sizeof("_er") + 1);
1204 } else
1205 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1206 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001207
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001208 /* Initialize the filer table */
1209 gfar_init_filer_table(priv);
1210
Andy Fleming7f7f5312005-11-11 12:38:59 -06001211 /* Create all the sysfs files */
1212 gfar_init_sysfs(dev);
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 /* Print out the device info */
Johannes Berge1749612008-10-27 15:59:26 -07001215 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001218 /* provided which set of benchmarks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001220 for (i = 0; i < priv->num_rx_queues; i++)
Kim Phillipsddc01b32010-03-30 11:54:22 +00001221 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001222 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1223 for(i = 0; i < priv->num_tx_queues; i++)
Kim Phillipsddc01b32010-03-30 11:54:22 +00001224 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001225 dev->name, i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 return 0;
1228
1229register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001230 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001231 free_tx_pointers(priv);
1232 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001233 if (priv->phy_node)
1234 of_node_put(priv->phy_node);
1235 if (priv->tbi_node)
1236 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001238 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239}
1240
Grant Likely2dc11582010-08-06 09:25:50 -06001241static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001243 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Grant Likelyfe192a42009-04-25 12:53:12 +00001245 if (priv->phy_node)
1246 of_node_put(priv->phy_node);
1247 if (priv->tbi_node)
1248 of_node_put(priv->tbi_node);
1249
Andy Flemingb31a1d82008-12-16 15:29:15 -08001250 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
David S. Millerd9d8e042009-09-06 01:41:02 -07001252 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001253 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001254 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
1256 return 0;
1257}
1258
Scott Woodd87eb122008-07-11 18:04:45 -05001259#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001260
1261static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001262{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001263 struct gfar_private *priv = dev_get_drvdata(dev);
1264 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001265 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001266 unsigned long flags;
1267 u32 tempval;
1268
1269 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001270 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001271
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001272 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001273
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001274 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001275
1276 local_irq_save(flags);
1277 lock_tx_qs(priv);
1278 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001279
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001280 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001281
1282 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001283 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001284
1285 tempval &= ~MACCFG1_TX_EN;
1286
1287 if (!magic_packet)
1288 tempval &= ~MACCFG1_RX_EN;
1289
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001290 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001291
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001292 unlock_rx_qs(priv);
1293 unlock_tx_qs(priv);
1294 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001295
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001296 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001297
1298 if (magic_packet) {
1299 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001300 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001301
1302 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001303 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001304 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001305 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001306 } else {
1307 phy_stop(priv->phydev);
1308 }
1309 }
1310
1311 return 0;
1312}
1313
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001314static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001315{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001316 struct gfar_private *priv = dev_get_drvdata(dev);
1317 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001318 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001319 unsigned long flags;
1320 u32 tempval;
1321 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001322 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001323
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001324 if (!netif_running(ndev)) {
1325 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001326 return 0;
1327 }
1328
1329 if (!magic_packet && priv->phydev)
1330 phy_start(priv->phydev);
1331
1332 /* Disable Magic Packet mode, in case something
1333 * else woke us up.
1334 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001335 local_irq_save(flags);
1336 lock_tx_qs(priv);
1337 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001338
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001339 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001340 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001341 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001342
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001343 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001344
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001345 unlock_rx_qs(priv);
1346 unlock_tx_qs(priv);
1347 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001348
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001349 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001350
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001351 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001352
1353 return 0;
1354}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001355
1356static int gfar_restore(struct device *dev)
1357{
1358 struct gfar_private *priv = dev_get_drvdata(dev);
1359 struct net_device *ndev = priv->ndev;
1360
1361 if (!netif_running(ndev))
1362 return 0;
1363
1364 gfar_init_bds(ndev);
1365 init_registers(ndev);
1366 gfar_set_mac_address(ndev);
1367 gfar_init_mac(ndev);
1368 gfar_start(ndev);
1369
1370 priv->oldlink = 0;
1371 priv->oldspeed = 0;
1372 priv->oldduplex = -1;
1373
1374 if (priv->phydev)
1375 phy_start(priv->phydev);
1376
1377 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001378 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001379
1380 return 0;
1381}
1382
1383static struct dev_pm_ops gfar_pm_ops = {
1384 .suspend = gfar_suspend,
1385 .resume = gfar_resume,
1386 .freeze = gfar_suspend,
1387 .thaw = gfar_resume,
1388 .restore = gfar_restore,
1389};
1390
1391#define GFAR_PM_OPS (&gfar_pm_ops)
1392
Scott Woodd87eb122008-07-11 18:04:45 -05001393#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001394
1395#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001396
Scott Woodd87eb122008-07-11 18:04:45 -05001397#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001399/* Reads the controller's registers to determine what interface
1400 * connects it to the PHY.
1401 */
1402static phy_interface_t gfar_get_interface(struct net_device *dev)
1403{
1404 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001405 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001406 u32 ecntrl;
1407
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001408 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001409
1410 if (ecntrl & ECNTRL_SGMII_MODE)
1411 return PHY_INTERFACE_MODE_SGMII;
1412
1413 if (ecntrl & ECNTRL_TBI_MODE) {
1414 if (ecntrl & ECNTRL_REDUCED_MODE)
1415 return PHY_INTERFACE_MODE_RTBI;
1416 else
1417 return PHY_INTERFACE_MODE_TBI;
1418 }
1419
1420 if (ecntrl & ECNTRL_REDUCED_MODE) {
1421 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1422 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001423 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001424 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001425
1426 /*
1427 * This isn't autodetected right now, so it must
1428 * be set by the device tree or platform code.
1429 */
1430 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1431 return PHY_INTERFACE_MODE_RGMII_ID;
1432
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001433 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001434 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001435 }
1436
Andy Flemingb31a1d82008-12-16 15:29:15 -08001437 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001438 return PHY_INTERFACE_MODE_GMII;
1439
1440 return PHY_INTERFACE_MODE_MII;
1441}
1442
1443
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001444/* Initializes driver's PHY state, and attaches to the PHY.
1445 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 */
1447static int init_phy(struct net_device *dev)
1448{
1449 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001450 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001451 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001452 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001453 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 priv->oldlink = 0;
1456 priv->oldspeed = 0;
1457 priv->oldduplex = -1;
1458
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001459 interface = gfar_get_interface(dev);
1460
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001461 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1462 interface);
1463 if (!priv->phydev)
1464 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1465 interface);
1466 if (!priv->phydev) {
1467 dev_err(&dev->dev, "could not attach to PHY\n");
1468 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
Kapil Junejad3c12872007-05-11 18:25:11 -05001471 if (interface == PHY_INTERFACE_MODE_SGMII)
1472 gfar_configure_serdes(dev);
1473
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001474 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001475 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1476 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479}
1480
Paul Gortmakerd0313582008-04-17 00:08:10 -04001481/*
1482 * Initialize TBI PHY interface for communicating with the
1483 * SERDES lynx PHY on the chip. We communicate with this PHY
1484 * through the MDIO bus on each controller, treating it as a
1485 * "normal" PHY at the address found in the TBIPA register. We assume
1486 * that the TBIPA register is valid. Either the MDIO bus code will set
1487 * it to a value that doesn't conflict with other PHYs on the bus, or the
1488 * value doesn't matter, as there are no other PHYs on the bus.
1489 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001490static void gfar_configure_serdes(struct net_device *dev)
1491{
1492 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001493 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001494
Grant Likelyfe192a42009-04-25 12:53:12 +00001495 if (!priv->tbi_node) {
1496 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1497 "device tree specify a tbi-handle\n");
1498 return;
1499 }
1500
1501 tbiphy = of_phy_find_device(priv->tbi_node);
1502 if (!tbiphy) {
1503 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001504 return;
1505 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001506
Andy Flemingb31a1d82008-12-16 15:29:15 -08001507 /*
1508 * If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001509 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1510 * everything for us? Resetting it takes the link down and requires
1511 * several seconds for it to come back.
1512 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001513 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001514 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001515
Paul Gortmakerd0313582008-04-17 00:08:10 -04001516 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001517 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001518
Grant Likelyfe192a42009-04-25 12:53:12 +00001519 phy_write(tbiphy, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -05001520 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1521 ADVERTISE_1000XPSE_ASYM);
1522
Grant Likelyfe192a42009-04-25 12:53:12 +00001523 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -05001524 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1525}
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527static void init_registers(struct net_device *dev)
1528{
1529 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001530 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001531 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001533 for (i = 0; i < priv->num_grps; i++) {
1534 regs = priv->gfargrp[i].regs;
1535 /* Clear IEVENT */
1536 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001538 /* Initialize IMASK */
1539 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001542 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001544 gfar_write(&regs->igaddr0, 0);
1545 gfar_write(&regs->igaddr1, 0);
1546 gfar_write(&regs->igaddr2, 0);
1547 gfar_write(&regs->igaddr3, 0);
1548 gfar_write(&regs->igaddr4, 0);
1549 gfar_write(&regs->igaddr5, 0);
1550 gfar_write(&regs->igaddr6, 0);
1551 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001553 gfar_write(&regs->gaddr0, 0);
1554 gfar_write(&regs->gaddr1, 0);
1555 gfar_write(&regs->gaddr2, 0);
1556 gfar_write(&regs->gaddr3, 0);
1557 gfar_write(&regs->gaddr4, 0);
1558 gfar_write(&regs->gaddr5, 0);
1559 gfar_write(&regs->gaddr6, 0);
1560 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001563 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001564 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
1566 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001567 gfar_write(&regs->rmon.cam1, 0xffffffff);
1568 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 }
1570
1571 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001572 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001575 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576}
1577
Anton Vorontsov511d9342010-06-30 06:39:15 +00001578static int __gfar_is_rx_idle(struct gfar_private *priv)
1579{
1580 u32 res;
1581
1582 /*
1583 * Normaly TSEC should not hang on GRS commands, so we should
1584 * actually wait for IEVENT_GRSC flag.
1585 */
1586 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1587 return 0;
1588
1589 /*
1590 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1591 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1592 * and the Rx can be safely reset.
1593 */
1594 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1595 res &= 0x7f807f80;
1596 if ((res & 0xffff) == (res >> 16))
1597 return 1;
1598
1599 return 0;
1600}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001601
1602/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001603static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
1605 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001606 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001608 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001610 for (i = 0; i < priv->num_grps; i++) {
1611 regs = priv->gfargrp[i].regs;
1612 /* Mask all interrupts */
1613 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001615 /* Clear all interrupts */
1616 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1617 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001619 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001621 tempval = gfar_read(&regs->dmactrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1623 != (DMACTRL_GRS | DMACTRL_GTS)) {
Anton Vorontsov511d9342010-06-30 06:39:15 +00001624 int ret;
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001627 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Anton Vorontsov511d9342010-06-30 06:39:15 +00001629 do {
1630 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1631 (IEVENT_GRSC | IEVENT_GTSC)) ==
1632 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1633 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1634 ret = __gfar_is_rx_idle(priv);
1635 } while (!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 }
Scott Woodd87eb122008-07-11 18:04:45 -05001637}
Scott Woodd87eb122008-07-11 18:04:45 -05001638
1639/* Halt the receive and transmit queues */
1640void gfar_halt(struct net_device *dev)
1641{
1642 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001643 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001644 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
Scott Wood2a54adc2008-08-12 15:10:46 -05001646 gfar_halt_nodisable(dev);
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 /* Disable Rx and Tx */
1649 tempval = gfar_read(&regs->maccfg1);
1650 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1651 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001652}
1653
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001654static void free_grp_irqs(struct gfar_priv_grp *grp)
1655{
1656 free_irq(grp->interruptError, grp);
1657 free_irq(grp->interruptTransmit, grp);
1658 free_irq(grp->interruptReceive, grp);
1659}
1660
Kumar Gala0bbaf062005-06-20 10:54:21 -05001661void stop_gfar(struct net_device *dev)
1662{
1663 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001664 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001665 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001666
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001667 phy_stop(priv->phydev);
1668
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001669
Kumar Gala0bbaf062005-06-20 10:54:21 -05001670 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001671 local_irq_save(flags);
1672 lock_tx_qs(priv);
1673 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001674
Kumar Gala0bbaf062005-06-20 10:54:21 -05001675 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001677 unlock_rx_qs(priv);
1678 unlock_tx_qs(priv);
1679 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001682 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001683 for (i = 0; i < priv->num_grps; i++)
1684 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001686 for (i = 0; i < priv->num_grps; i++)
1687 free_irq(priv->gfargrp[i].interruptTransmit,
1688 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 }
1690
1691 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692}
1693
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001694static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001697 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001698 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001700 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001702 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1703 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001704 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
Kumar Gala48268572009-03-18 23:28:22 -07001706 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001707 txbdp->length, DMA_TO_DEVICE);
1708 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001709 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1710 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001711 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001712 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001713 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001715 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001716 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1717 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001719 kfree(tx_queue->tx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001720}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001722static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1723{
1724 struct rxbd8 *rxbdp;
1725 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1726 int i;
1727
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001728 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001730 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1731 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001732 dma_unmap_single(&priv->ofdev->dev,
1733 rxbdp->bufPtr, priv->rx_buffer_size,
Anton Vorontsove69edd22009-10-12 06:00:30 +00001734 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001735 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1736 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001738 rxbdp->lstatus = 0;
1739 rxbdp->bufPtr = 0;
1740 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001742 kfree(rx_queue->rx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001743}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001744
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001745/* If there are any tx skbs or rx skbs still around, free them.
1746 * Then free tx_skbuff and rx_skbuff */
1747static void free_skb_resources(struct gfar_private *priv)
1748{
1749 struct gfar_priv_tx_q *tx_queue = NULL;
1750 struct gfar_priv_rx_q *rx_queue = NULL;
1751 int i;
1752
1753 /* Go through all the buffer descriptors and free their data buffers */
1754 for (i = 0; i < priv->num_tx_queues; i++) {
1755 tx_queue = priv->tx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001756 if(tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001757 free_skb_tx_queue(tx_queue);
1758 }
1759
1760 for (i = 0; i < priv->num_rx_queues; i++) {
1761 rx_queue = priv->rx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001762 if(rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001763 free_skb_rx_queue(rx_queue);
1764 }
1765
1766 dma_free_coherent(&priv->ofdev->dev,
1767 sizeof(struct txbd8) * priv->total_tx_ring_size +
1768 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1769 priv->tx_queue[0]->tx_bd_base,
1770 priv->tx_queue[0]->tx_bd_dma_base);
Sebastian Andrzej Siewior7df9c432010-05-04 22:30:47 +00001771 skb_queue_purge(&priv->rx_recycle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
1773
Kumar Gala0bbaf062005-06-20 10:54:21 -05001774void gfar_start(struct net_device *dev)
1775{
1776 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001777 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001778 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001779 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001780
1781 /* Enable Rx and Tx in MACCFG1 */
1782 tempval = gfar_read(&regs->maccfg1);
1783 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1784 gfar_write(&regs->maccfg1, tempval);
1785
1786 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001787 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001788 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001789 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001790
Kumar Gala0bbaf062005-06-20 10:54:21 -05001791 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001792 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001793 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001794 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001795
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001796 for (i = 0; i < priv->num_grps; i++) {
1797 regs = priv->gfargrp[i].regs;
1798 /* Clear THLT/RHLT, so that the DMA starts polling now */
1799 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1800 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1801 /* Unmask the interrupts we look for */
1802 gfar_write(&regs->imask, IMASK_DEFAULT);
1803 }
Dai Haruki12dea572008-12-16 15:30:20 -08001804
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001805 dev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001806}
1807
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001808void gfar_configure_coalescing(struct gfar_private *priv,
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001809 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001811 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001812 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001813 int i = 0;
1814
1815 /* Backward compatible case ---- even if we enable
1816 * multiple queues, there's only single reg to program
1817 */
1818 gfar_write(&regs->txic, 0);
1819 if(likely(priv->tx_queue[0]->txcoalescing))
1820 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1821
1822 gfar_write(&regs->rxic, 0);
1823 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1824 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1825
1826 if (priv->mode == MQ_MG_MODE) {
1827 baddr = &regs->txic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001828 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001829 if (likely(priv->tx_queue[i]->txcoalescing)) {
1830 gfar_write(baddr + i, 0);
1831 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1832 }
1833 }
1834
1835 baddr = &regs->rxic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001836 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001837 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1838 gfar_write(baddr + i, 0);
1839 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1840 }
1841 }
1842 }
1843}
1844
1845static int register_grp_irqs(struct gfar_priv_grp *grp)
1846{
1847 struct gfar_private *priv = grp->priv;
1848 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001849 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 /* If the device has multiple interrupts, register for
1852 * them. Otherwise, only register for the one */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001853 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001854 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 * Transmit, and Receive */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001856 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1857 grp->int_name_er,grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001858 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001859 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1860 dev->name, grp->interruptError);
1861
Julia Lawall2145f1a2010-08-05 10:26:20 +00001862 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 }
1864
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001865 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1866 0, grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001867 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001868 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1869 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 goto tx_irq_fail;
1871 }
1872
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001873 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1874 grp->int_name_rx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001875 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001876 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1877 dev->name, grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 goto rx_irq_fail;
1879 }
1880 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001881 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1882 grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001883 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001884 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1885 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 goto err_irq_fail;
1887 }
1888 }
1889
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001890 return 0;
1891
1892rx_irq_fail:
1893 free_irq(grp->interruptTransmit, grp);
1894tx_irq_fail:
1895 free_irq(grp->interruptError, grp);
1896err_irq_fail:
1897 return err;
1898
1899}
1900
1901/* Bring the controller up and running */
1902int startup_gfar(struct net_device *ndev)
1903{
1904 struct gfar_private *priv = netdev_priv(ndev);
1905 struct gfar __iomem *regs = NULL;
1906 int err, i, j;
1907
1908 for (i = 0; i < priv->num_grps; i++) {
1909 regs= priv->gfargrp[i].regs;
1910 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1911 }
1912
1913 regs= priv->gfargrp[0].regs;
1914 err = gfar_alloc_skb_resources(ndev);
1915 if (err)
1916 return err;
1917
1918 gfar_init_mac(ndev);
1919
1920 for (i = 0; i < priv->num_grps; i++) {
1921 err = register_grp_irqs(&priv->gfargrp[i]);
1922 if (err) {
1923 for (j = 0; j < i; j++)
1924 free_grp_irqs(&priv->gfargrp[j]);
1925 goto irq_fail;
1926 }
1927 }
1928
Andy Fleming7f7f5312005-11-11 12:38:59 -06001929 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001930 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001932 phy_start(priv->phydev);
1933
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001934 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 return 0;
1937
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001938irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001939 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 return err;
1941}
1942
1943/* Called when something needs to use the ethernet device */
1944/* Returns 0 for success. */
1945static int gfar_enet_open(struct net_device *dev)
1946{
Li Yang94e8cc32007-10-12 21:53:51 +08001947 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 int err;
1949
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001950 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001951
Andy Fleming0fd56bb2009-02-04 16:43:16 -08001952 skb_queue_head_init(&priv->rx_recycle);
1953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 /* Initialize a bunch of registers */
1955 init_registers(dev);
1956
1957 gfar_set_mac_address(dev);
1958
1959 err = init_phy(dev);
1960
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001961 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001962 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001964 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
1966 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001967 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001968 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001969 return err;
1970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001972 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001974 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 return err;
1977}
1978
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001979static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001980{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001981 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001982
1983 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001984
Kumar Gala0bbaf062005-06-20 10:54:21 -05001985 return fcb;
1986}
1987
1988static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1989{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001990 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001991
1992 /* If we're here, it's a IP packet with a TCP or UDP
1993 * payload. We set it to checksum, using a pseudo-header
1994 * we provide
1995 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001996 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001997
Andy Fleming7f7f5312005-11-11 12:38:59 -06001998 /* Tell the controller what the protocol is */
1999 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002000 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002001 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03002002 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002003 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05002004 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002005
2006 /* l3os is the distance between the start of the
2007 * frame (skb->data) and the start of the IP hdr.
2008 * l4os is the distance between the start of the
2009 * l3 hdr and the l4 hdr */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002010 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002011 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002012
Andy Fleming7f7f5312005-11-11 12:38:59 -06002013 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002014}
2015
Andy Fleming7f7f5312005-11-11 12:38:59 -06002016void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002017{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002018 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002019 fcb->vlctl = vlan_tx_tag_get(skb);
2020}
2021
Dai Haruki4669bc92008-12-17 16:51:04 -08002022static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2023 struct txbd8 *base, int ring_size)
2024{
2025 struct txbd8 *new_bd = bdp + stride;
2026
2027 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2028}
2029
2030static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2031 int ring_size)
2032{
2033 return skip_txbd(bdp, 1, base, ring_size);
2034}
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036/* This is called by the kernel when a frame is ready for transmission. */
2037/* It is pointed to by the dev->hard_start_xmit function pointer */
2038static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2039{
2040 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002041 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002042 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002043 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002044 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002045 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002046 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002047 int i, rq = 0, do_tstamp = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002048 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05002049 unsigned long flags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002050 unsigned int nr_frags, nr_txbds, length;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002051
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00002052 /*
2053 * TOE=1 frames larger than 2500 bytes may see excess delays
2054 * before start of transmission.
2055 */
2056 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2057 skb->ip_summed == CHECKSUM_PARTIAL &&
2058 skb->len > 2500)) {
2059 int ret;
2060
2061 ret = skb_checksum_help(skb);
2062 if (ret)
2063 return ret;
2064 }
2065
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002066 rq = skb->queue_mapping;
2067 tx_queue = priv->tx_queue[rq];
2068 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002069 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002070 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002071
2072 /* check if time stamp should be generated */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002073 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2074 priv->hwts_tx_en))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002075 do_tstamp = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -08002076
Li Yang5b28bea2009-03-27 15:54:30 -07002077 /* make space for additional header when fcb is needed */
2078 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002079 (priv->vlgrp && vlan_tx_tag_present(skb)) ||
2080 unlikely(do_tstamp)) &&
Li Yang5b28bea2009-03-27 15:54:30 -07002081 (skb_headroom(skb) < GMAC_FCB_LEN)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002082 struct sk_buff *skb_new;
2083
2084 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
2085 if (!skb_new) {
2086 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07002087 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002088 return NETDEV_TX_OK;
2089 }
2090 kfree_skb(skb);
2091 skb = skb_new;
2092 }
2093
Dai Haruki4669bc92008-12-17 16:51:04 -08002094 /* total number of fragments in the SKB */
2095 nr_frags = skb_shinfo(skb)->nr_frags;
2096
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002097 /* calculate the required number of TxBDs for this skb */
2098 if (unlikely(do_tstamp))
2099 nr_txbds = nr_frags + 2;
2100 else
2101 nr_txbds = nr_frags + 1;
2102
Dai Haruki4669bc92008-12-17 16:51:04 -08002103 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002104 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002105 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002106 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002107 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002108 return NETDEV_TX_BUSY;
2109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
2111 /* Update transmit stats */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002112 txq->tx_bytes += skb->len;
2113 txq->tx_packets ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002115 txbdp = txbdp_start = tx_queue->cur_tx;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002116 lstatus = txbdp->lstatus;
2117
2118 /* Time stamp insertion requires one additional TxBD */
2119 if (unlikely(do_tstamp))
2120 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2121 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Dai Haruki4669bc92008-12-17 16:51:04 -08002123 if (nr_frags == 0) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002124 if (unlikely(do_tstamp))
2125 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2126 TXBD_INTERRUPT);
2127 else
2128 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Dai Haruki4669bc92008-12-17 16:51:04 -08002129 } else {
2130 /* Place the fragment addresses and lengths into the TxBDs */
2131 for (i = 0; i < nr_frags; i++) {
2132 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002133 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
Dai Haruki4669bc92008-12-17 16:51:04 -08002135 length = skb_shinfo(skb)->frags[i].size;
2136
2137 lstatus = txbdp->lstatus | length |
2138 BD_LFLAG(TXBD_READY);
2139
2140 /* Handle the last BD specially */
2141 if (i == nr_frags - 1)
2142 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2143
Kumar Gala48268572009-03-18 23:28:22 -07002144 bufaddr = dma_map_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002145 skb_shinfo(skb)->frags[i].page,
2146 skb_shinfo(skb)->frags[i].page_offset,
2147 length,
2148 DMA_TO_DEVICE);
2149
2150 /* set the TxBD length and buffer pointer */
2151 txbdp->bufPtr = bufaddr;
2152 txbdp->lstatus = lstatus;
2153 }
2154
2155 lstatus = txbdp_start->lstatus;
2156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
Kumar Gala0bbaf062005-06-20 10:54:21 -05002158 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08002159 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002160 fcb = gfar_add_fcb(skb);
2161 lstatus |= BD_LFLAG(TXBD_TOE);
2162 gfar_tx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002163 }
2164
Dai Haruki77ecaf22008-12-16 15:30:48 -08002165 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002166 if (unlikely(NULL == fcb)) {
2167 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08002168 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002169 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002170
2171 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002172 }
2173
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002174 /* Setup tx hardware time stamping if requested */
2175 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002176 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002177 if (fcb == NULL)
2178 fcb = gfar_add_fcb(skb);
2179 fcb->ptp = 1;
2180 lstatus |= BD_LFLAG(TXBD_TOE);
2181 }
2182
Kumar Gala48268572009-03-18 23:28:22 -07002183 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Dai Haruki4669bc92008-12-17 16:51:04 -08002184 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002186 /*
2187 * If time stamping is requested one additional TxBD must be set up. The
2188 * first TxBD points to the FCB and must have a data length of
2189 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2190 * the full frame length.
2191 */
2192 if (unlikely(do_tstamp)) {
2193 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2194 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2195 (skb_headlen(skb) - GMAC_FCB_LEN);
2196 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2197 } else {
2198 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
Dai Haruki4669bc92008-12-17 16:51:04 -08002201 /*
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002202 * We can work in parallel with gfar_clean_tx_ring(), except
2203 * when modifying num_txbdfree. Note that we didn't grab the lock
2204 * when we were reading the num_txbdfree and checking for available
2205 * space, that's because outside of this function it can only grow,
2206 * and once we've got needed space, it cannot suddenly disappear.
2207 *
2208 * The lock also protects us from gfar_error(), which can modify
2209 * regs->tstat and thus retrigger the transfers, which is why we
2210 * also must grab the lock before setting ready bit for the first
2211 * to be transmitted BD.
2212 */
2213 spin_lock_irqsave(&tx_queue->txlock, flags);
2214
2215 /*
Dai Haruki4669bc92008-12-17 16:51:04 -08002216 * The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002217 * semantics (it requires synchronization between cacheable and
2218 * uncacheable mappings, which eieio doesn't provide and which we
2219 * don't need), thus requiring a more expensive sync instruction. At
2220 * some point, the set of architecture-independent barrier functions
2221 * should be expanded to include weaker barriers.
2222 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002223 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002224
Dai Haruki4669bc92008-12-17 16:51:04 -08002225 txbdp_start->lstatus = lstatus;
2226
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002227 eieio(); /* force lstatus write before tx_skbuff */
2228
2229 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2230
Dai Haruki4669bc92008-12-17 16:51:04 -08002231 /* Update the current skb pointer to the next entry we will use
2232 * (wrapping if necessary) */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002233 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2234 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002235
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002236 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002237
2238 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002239 tx_queue->num_txbdfree -= (nr_txbds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
2241 /* If the next BD still needs to be cleaned up, then the bds
2242 are full. We need to tell the kernel to stop sending us stuff. */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002243 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002244 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002246 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002250 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251
2252 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002253 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002255 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256}
2257
2258/* Stops the kernel queue, and halts the controller */
2259static int gfar_close(struct net_device *dev)
2260{
2261 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002262
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002263 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002264
Sebastian Siewiorab939902008-08-19 21:12:45 +02002265 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 stop_gfar(dev);
2267
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002268 /* Disconnect from the PHY */
2269 phy_disconnect(priv->phydev);
2270 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002272 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
2274 return 0;
2275}
2276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002278static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002280 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
2282 return 0;
2283}
2284
2285
Kumar Gala0bbaf062005-06-20 10:54:21 -05002286/* Enables and disables VLAN insertion/extraction */
2287static void gfar_vlan_rx_register(struct net_device *dev,
2288 struct vlan_group *grp)
2289{
2290 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002291 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002292 unsigned long flags;
2293 u32 tempval;
2294
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002295 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002296 local_irq_save(flags);
2297 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002298
Anton Vorontsovcd1f55a2009-01-26 14:33:23 -08002299 priv->vlgrp = grp;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002300
2301 if (grp) {
2302 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002303 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002304 tempval |= TCTRL_VLINS;
2305
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002306 gfar_write(&regs->tctrl, tempval);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002307
Kumar Gala0bbaf062005-06-20 10:54:21 -05002308 /* Enable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002309 tempval = gfar_read(&regs->rctrl);
Dai Haruki77ecaf22008-12-16 15:30:48 -08002310 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002311 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002312 } else {
2313 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002314 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002315 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002316 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002317
2318 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002319 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002320 tempval &= ~RCTRL_VLEX;
Dai Haruki77ecaf22008-12-16 15:30:48 -08002321 /* If parse is no longer required, then disable parser */
2322 if (tempval & RCTRL_REQ_PARSER)
2323 tempval |= RCTRL_PRSDEP_INIT;
2324 else
2325 tempval &= ~RCTRL_PRSDEP_INIT;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002326 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002327 }
2328
Dai Haruki77ecaf22008-12-16 15:30:48 -08002329 gfar_change_mtu(dev, dev->mtu);
2330
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002331 unlock_rx_qs(priv);
2332 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002333}
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2336{
2337 int tempsize, tempval;
2338 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002339 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002341 int frame_size = new_mtu + ETH_HLEN;
2342
Dai Haruki77ecaf22008-12-16 15:30:48 -08002343 if (priv->vlgrp)
Dai Harukifaa89572008-03-24 10:53:26 -05002344 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002347 if (netif_msg_drv(priv))
2348 printk(KERN_ERR "%s: Invalid MTU setting\n",
2349 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 return -EINVAL;
2351 }
2352
Dai Haruki77ecaf22008-12-16 15:30:48 -08002353 if (gfar_uses_fcb(priv))
2354 frame_size += GMAC_FCB_LEN;
2355
2356 frame_size += priv->padding;
2357
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 tempsize =
2359 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2360 INCREMENTAL_BUFFER_SIZE;
2361
2362 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06002363 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2365 stop_gfar(dev);
2366
2367 priv->rx_buffer_size = tempsize;
2368
2369 dev->mtu = new_mtu;
2370
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002371 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2372 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
2374 /* If the mtu is larger than the max size for standard
2375 * ethernet frames (ie, a jumbo frame), then set maccfg2
2376 * to allow huge frames, and to check the length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002377 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Anton Vorontsov7d350972010-06-30 06:39:12 +00002379 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2380 gfar_has_errata(priv, GFAR_ERRATA_74))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2382 else
2383 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2384
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002385 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386
2387 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2388 startup_gfar(dev);
2389
2390 return 0;
2391}
2392
Sebastian Siewiorab939902008-08-19 21:12:45 +02002393/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 * transmitted after a set amount of time.
2395 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002396 * starting over will fix the problem.
2397 */
2398static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002400 struct gfar_private *priv = container_of(work, struct gfar_private,
2401 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002402 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
2404 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002405 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 stop_gfar(dev);
2407 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002408 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 }
2410
David S. Miller263ba322008-07-15 03:47:41 -07002411 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412}
2413
Sebastian Siewiorab939902008-08-19 21:12:45 +02002414static void gfar_timeout(struct net_device *dev)
2415{
2416 struct gfar_private *priv = netdev_priv(dev);
2417
2418 dev->stats.tx_errors++;
2419 schedule_work(&priv->reset_task);
2420}
2421
Eran Libertyacbc0f02010-07-07 15:54:54 -07002422static void gfar_align_skb(struct sk_buff *skb)
2423{
2424 /* We need the data buffer to be aligned properly. We will reserve
2425 * as many bytes as needed to align the data properly
2426 */
2427 skb_reserve(skb, RXBUF_ALIGNMENT -
2428 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2429}
2430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002432static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002434 struct net_device *dev = tx_queue->dev;
Dai Harukid080cd62008-04-09 19:37:51 -05002435 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002436 struct gfar_priv_rx_q *rx_queue = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002437 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002438 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002439 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002440 struct sk_buff *skb;
2441 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002442 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002443 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002444 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002445 int howmany = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002446 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002447 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002449 rx_queue = priv->rx_queue[tx_queue->qindex];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002450 bdp = tx_queue->dirty_tx;
2451 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002452
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002453 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002454 unsigned long flags;
2455
Dai Haruki4669bc92008-12-17 16:51:04 -08002456 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002457
2458 /*
2459 * When time stamping, one additional TxBD must be freed.
2460 * Also, we need to dma_unmap_single() the TxPAL.
2461 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002462 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002463 nr_txbds = frags + 2;
2464 else
2465 nr_txbds = frags + 1;
2466
2467 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002468
2469 lstatus = lbdp->lstatus;
2470
2471 /* Only clean completed frames */
2472 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2473 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 break;
2475
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002476 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002477 next = next_txbd(bdp, base, tx_ring_size);
2478 buflen = next->length + GMAC_FCB_LEN;
2479 } else
2480 buflen = bdp->length;
2481
2482 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2483 buflen, DMA_TO_DEVICE);
2484
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002485 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002486 struct skb_shared_hwtstamps shhwtstamps;
2487 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2488 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2489 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2490 skb_tstamp_tx(skb, &shhwtstamps);
2491 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2492 bdp = next;
2493 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002494
2495 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2496 bdp = next_txbd(bdp, base, tx_ring_size);
2497
2498 for (i = 0; i < frags; i++) {
Kumar Gala48268572009-03-18 23:28:22 -07002499 dma_unmap_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002500 bdp->bufPtr,
2501 bdp->length,
2502 DMA_TO_DEVICE);
2503 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2504 bdp = next_txbd(bdp, base, tx_ring_size);
2505 }
2506
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002507 /*
2508 * If there's room in the queue (limit it to rx_buffer_size)
2509 * we add this skb back into the pool, if it's the right size
2510 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002511 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002512 skb_recycle_check(skb, priv->rx_buffer_size +
Eran Libertyacbc0f02010-07-07 15:54:54 -07002513 RXBUF_ALIGNMENT)) {
2514 gfar_align_skb(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002515 __skb_queue_head(&priv->rx_recycle, skb);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002516 } else
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002517 dev_kfree_skb_any(skb);
2518
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002519 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002520
2521 skb_dirtytx = (skb_dirtytx + 1) &
2522 TX_RING_MOD_MASK(tx_ring_size);
2523
Dai Harukid080cd62008-04-09 19:37:51 -05002524 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002525 spin_lock_irqsave(&tx_queue->txlock, flags);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002526 tx_queue->num_txbdfree += nr_txbds;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002527 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
Dai Haruki4669bc92008-12-17 16:51:04 -08002530 /* If we freed a buffer, we can restart transmission, if necessary */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002531 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2532 netif_wake_subqueue(dev, tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533
Dai Haruki4669bc92008-12-17 16:51:04 -08002534 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002535 tx_queue->skb_dirtytx = skb_dirtytx;
2536 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
Dai Harukid080cd62008-04-09 19:37:51 -05002538 return howmany;
2539}
2540
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002541static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002542{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002543 unsigned long flags;
2544
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002545 spin_lock_irqsave(&gfargrp->grplock, flags);
2546 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002547 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002548 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002549 } else {
2550 /*
2551 * Clear IEVENT, so interrupts aren't called again
2552 * because of the packets that have already arrived.
2553 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002554 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002555 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002556 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002557
Dai Haruki8c7396a2008-12-17 16:52:00 -08002558}
2559
Dai Harukid080cd62008-04-09 19:37:51 -05002560/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002561static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002562{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002563 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 return IRQ_HANDLED;
2565}
2566
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002567static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -05002568 struct sk_buff *skb)
2569{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002570 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002571 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002572 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002573
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002574 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2575 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002576 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002577}
2578
Eran Libertyacbc0f02010-07-07 15:54:54 -07002579static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2580{
2581 struct gfar_private *priv = netdev_priv(dev);
2582 struct sk_buff *skb = NULL;
2583
2584 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2585 if (!skb)
2586 return NULL;
2587
2588 gfar_align_skb(skb);
2589
2590 return skb;
2591}
Andy Fleming815b97c2008-04-22 17:18:29 -05002592
2593struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594{
2595 struct gfar_private *priv = netdev_priv(dev);
2596 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002598 skb = __skb_dequeue(&priv->rx_recycle);
2599 if (!skb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002600 skb = gfar_alloc_skb(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 return skb;
2603}
2604
Li Yang298e1a92007-10-16 14:18:13 +08002605static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
Li Yang298e1a92007-10-16 14:18:13 +08002607 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002608 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 struct gfar_extra_stats *estats = &priv->extra_stats;
2610
2611 /* If the packet was truncated, none of the other errors
2612 * matter */
2613 if (status & RXBD_TRUNCATED) {
2614 stats->rx_length_errors++;
2615
2616 estats->rx_trunc++;
2617
2618 return;
2619 }
2620 /* Count the errors, if there were any */
2621 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2622 stats->rx_length_errors++;
2623
2624 if (status & RXBD_LARGE)
2625 estats->rx_large++;
2626 else
2627 estats->rx_short++;
2628 }
2629 if (status & RXBD_NONOCTET) {
2630 stats->rx_frame_errors++;
2631 estats->rx_nonoctet++;
2632 }
2633 if (status & RXBD_CRCERR) {
2634 estats->rx_crcerr++;
2635 stats->rx_crc_errors++;
2636 }
2637 if (status & RXBD_OVERRUN) {
2638 estats->rx_overrun++;
2639 stats->rx_crc_errors++;
2640 }
2641}
2642
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002643irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002645 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 return IRQ_HANDLED;
2647}
2648
Kumar Gala0bbaf062005-06-20 10:54:21 -05002649static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2650{
2651 /* If valid headers were found, and valid sums
2652 * were verified, then we tell the kernel that no
2653 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002654 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002655 skb->ip_summed = CHECKSUM_UNNECESSARY;
2656 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002657 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002658}
2659
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661/* gfar_process_frame() -- handle one incoming packet if skb
2662 * isn't NULL. */
2663static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Dai Haruki2c2db482008-12-16 15:31:15 -08002664 int amount_pull)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665{
2666 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002667 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668
Dai Haruki2c2db482008-12-16 15:31:15 -08002669 int ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002670
Dai Haruki2c2db482008-12-16 15:31:15 -08002671 /* fcb is at the beginning if exists */
2672 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
Dai Haruki2c2db482008-12-16 15:31:15 -08002674 /* Remove the FCB from the skb */
2675 /* Remove the padded bytes, if there are any */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002676 if (amount_pull) {
2677 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002678 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002679 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002680
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002681 /* Get receive timestamp from the skb */
2682 if (priv->hwts_rx_en) {
2683 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2684 u64 *ns = (u64 *) skb->data;
2685 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2686 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2687 }
2688
2689 if (priv->padding)
2690 skb_pull(skb, priv->padding);
2691
Dai Haruki2c2db482008-12-16 15:31:15 -08002692 if (priv->rx_csum_enable)
2693 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002694
Dai Haruki2c2db482008-12-16 15:31:15 -08002695 /* Tell the skb what kind of packet this is */
2696 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002697
Dai Haruki2c2db482008-12-16 15:31:15 -08002698 /* Send the packet up the stack */
2699 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2700 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2701 else
2702 ret = netif_receive_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
Dai Haruki2c2db482008-12-16 15:31:15 -08002704 if (NET_RX_DROP == ret)
2705 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706
2707 return 0;
2708}
2709
2710/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05002711 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 * of frames handled
2713 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002714int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002716 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002717 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002719 int pkt_len;
2720 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 int howmany = 0;
2722 struct gfar_private *priv = netdev_priv(dev);
2723
2724 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002725 bdp = rx_queue->cur_rx;
2726 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002728 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
Dai Haruki2c2db482008-12-16 15:31:15 -08002729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002731 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05002732 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002733
2734 /* Add another skb for the future */
2735 newskb = gfar_new_skb(dev);
2736
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002737 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738
Kumar Gala48268572009-03-18 23:28:22 -07002739 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Andy Fleming81183052008-11-12 10:07:11 -06002740 priv->rx_buffer_size, DMA_FROM_DEVICE);
2741
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002742 if (unlikely(!(bdp->status & RXBD_ERR) &&
2743 bdp->length > priv->rx_buffer_size))
2744 bdp->status = RXBD_LARGE;
2745
Andy Fleming815b97c2008-04-22 17:18:29 -05002746 /* We drop the frame if we failed to allocate a new buffer */
2747 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2748 bdp->status & RXBD_ERR)) {
2749 count_errors(bdp->status, dev);
2750
2751 if (unlikely(!newskb))
2752 newskb = skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002753 else if (skb)
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002754 __skb_queue_head(&priv->rx_recycle, skb);
Andy Fleming815b97c2008-04-22 17:18:29 -05002755 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002757 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 howmany++;
2759
Dai Haruki2c2db482008-12-16 15:31:15 -08002760 if (likely(skb)) {
2761 pkt_len = bdp->length - ETH_FCS_LEN;
2762 /* Remove the FCS from the packet length */
2763 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002764 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002765 skb_record_rx_queue(skb, rx_queue->qindex);
Dai Haruki2c2db482008-12-16 15:31:15 -08002766 gfar_process_frame(dev, skb, amount_pull);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Dai Haruki2c2db482008-12-16 15:31:15 -08002768 } else {
2769 if (netif_msg_rx_err(priv))
2770 printk(KERN_WARNING
2771 "%s: Missing skb!\n", dev->name);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002772 rx_queue->stats.rx_dropped++;
Dai Haruki2c2db482008-12-16 15:31:15 -08002773 priv->extra_stats.rx_skbmissing++;
2774 }
2775
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 }
2777
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002778 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
Andy Fleming815b97c2008-04-22 17:18:29 -05002780 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002781 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
2783 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002784 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
2786 /* update to point at the next skb */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002787 rx_queue->skb_currx =
2788 (rx_queue->skb_currx + 1) &
2789 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 }
2791
2792 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002793 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 return howmany;
2796}
2797
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002798static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002800 struct gfar_priv_grp *gfargrp = container_of(napi,
2801 struct gfar_priv_grp, napi);
2802 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002803 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002804 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002805 struct gfar_priv_rx_q *rx_queue = NULL;
2806 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002807 int tx_cleaned = 0, i, left_over_budget = budget;
2808 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002809 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002810
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002811 num_queues = gfargrp->num_rx_queues;
2812 budget_per_queue = budget/num_queues;
2813
Dai Haruki8c7396a2008-12-17 16:52:00 -08002814 /* Clear IEVENT, so interrupts aren't called again
2815 * because of the packets that have already arrived */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002816 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002817
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002818 while (num_queues && left_over_budget) {
2819
2820 budget_per_queue = left_over_budget/num_queues;
2821 left_over_budget = 0;
2822
Akinobu Mita984b3f52010-03-05 13:41:37 -08002823 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002824 if (test_bit(i, &serviced_queues))
2825 continue;
2826 rx_queue = priv->rx_queue[i];
2827 tx_queue = priv->tx_queue[rx_queue->qindex];
2828
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002829 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002830 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2831 budget_per_queue);
2832 rx_cleaned += rx_cleaned_per_queue;
2833 if(rx_cleaned_per_queue < budget_per_queue) {
2834 left_over_budget = left_over_budget +
2835 (budget_per_queue - rx_cleaned_per_queue);
2836 set_bit(i, &serviced_queues);
2837 num_queues--;
2838 }
2839 }
Dai Harukid080cd62008-04-09 19:37:51 -05002840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841
Andy Fleming42199882008-12-17 16:52:30 -08002842 if (tx_cleaned)
2843 return budget;
2844
2845 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002846 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
2848 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002849 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002851 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
2853 /* If we are coalescing interrupts, update the timer */
2854 /* Otherwise, clear it */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002855 gfar_configure_coalescing(priv,
2856 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 }
2858
Andy Fleming42199882008-12-17 16:52:30 -08002859 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002862#ifdef CONFIG_NET_POLL_CONTROLLER
2863/*
2864 * Polling 'interrupt' - used by things like netconsole to send skbs
2865 * without having to re-enable interrupts. It's not called while
2866 * the interrupt routine is executing.
2867 */
2868static void gfar_netpoll(struct net_device *dev)
2869{
2870 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002871 int i = 0;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002872
2873 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002874 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002875 for (i = 0; i < priv->num_grps; i++) {
2876 disable_irq(priv->gfargrp[i].interruptTransmit);
2877 disable_irq(priv->gfargrp[i].interruptReceive);
2878 disable_irq(priv->gfargrp[i].interruptError);
2879 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2880 &priv->gfargrp[i]);
2881 enable_irq(priv->gfargrp[i].interruptError);
2882 enable_irq(priv->gfargrp[i].interruptReceive);
2883 enable_irq(priv->gfargrp[i].interruptTransmit);
2884 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002885 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002886 for (i = 0; i < priv->num_grps; i++) {
2887 disable_irq(priv->gfargrp[i].interruptTransmit);
2888 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2889 &priv->gfargrp[i]);
2890 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002891 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002892 }
2893}
2894#endif
2895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002897static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002899 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
2901 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002902 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002905 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002906 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
2908 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002909 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002910 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002912 /* Check for errors */
2913 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002914 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915
2916 return IRQ_HANDLED;
2917}
2918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919/* Called every time the controller might need to be made
2920 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002921 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 * function converts those variables into the appropriate
2923 * register values, and can bring down the device if needed.
2924 */
2925static void adjust_link(struct net_device *dev)
2926{
2927 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002928 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002929 unsigned long flags;
2930 struct phy_device *phydev = priv->phydev;
2931 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002933 local_irq_save(flags);
2934 lock_tx_qs(priv);
2935
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002936 if (phydev->link) {
2937 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002938 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002939
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 /* Now we make sure that we can be in full duplex mode.
2941 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002942 if (phydev->duplex != priv->oldduplex) {
2943 new_state = 1;
2944 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002946 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002949 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 }
2951
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002952 if (phydev->speed != priv->oldspeed) {
2953 new_state = 1;
2954 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 tempval =
2957 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002958
2959 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 break;
2961 case 100:
2962 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 tempval =
2964 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002965
2966 /* Reduced mode distinguishes
2967 * between 10 and 100 */
2968 if (phydev->speed == SPEED_100)
2969 ecntrl |= ECNTRL_R100;
2970 else
2971 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 break;
2973 default:
Kumar Gala0bbaf062005-06-20 10:54:21 -05002974 if (netif_msg_link(priv))
2975 printk(KERN_WARNING
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002976 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2977 dev->name, phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 break;
2979 }
2980
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002981 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 }
2983
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002984 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002985 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002986
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002988 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002991 } else if (priv->oldlink) {
2992 new_state = 1;
2993 priv->oldlink = 0;
2994 priv->oldspeed = 0;
2995 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002998 if (new_state && netif_msg_link(priv))
2999 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003000 unlock_tx_qs(priv);
3001 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003002}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003
3004/* Update the hash table based on the current list of multicast
3005 * addresses we subscribe to. Also, change the promiscuity of
3006 * the device based on the flags (this function is called
3007 * whenever dev->flags is changed */
3008static void gfar_set_multi(struct net_device *dev)
3009{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003010 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003012 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 u32 tempval;
3014
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003015 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 /* Set RCTRL to PROM */
3017 tempval = gfar_read(&regs->rctrl);
3018 tempval |= RCTRL_PROM;
3019 gfar_write(&regs->rctrl, tempval);
3020 } else {
3021 /* Set RCTRL to not PROM */
3022 tempval = gfar_read(&regs->rctrl);
3023 tempval &= ~(RCTRL_PROM);
3024 gfar_write(&regs->rctrl, tempval);
3025 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003026
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003027 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003029 gfar_write(&regs->igaddr0, 0xffffffff);
3030 gfar_write(&regs->igaddr1, 0xffffffff);
3031 gfar_write(&regs->igaddr2, 0xffffffff);
3032 gfar_write(&regs->igaddr3, 0xffffffff);
3033 gfar_write(&regs->igaddr4, 0xffffffff);
3034 gfar_write(&regs->igaddr5, 0xffffffff);
3035 gfar_write(&regs->igaddr6, 0xffffffff);
3036 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 gfar_write(&regs->gaddr0, 0xffffffff);
3038 gfar_write(&regs->gaddr1, 0xffffffff);
3039 gfar_write(&regs->gaddr2, 0xffffffff);
3040 gfar_write(&regs->gaddr3, 0xffffffff);
3041 gfar_write(&regs->gaddr4, 0xffffffff);
3042 gfar_write(&regs->gaddr5, 0xffffffff);
3043 gfar_write(&regs->gaddr6, 0xffffffff);
3044 gfar_write(&regs->gaddr7, 0xffffffff);
3045 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003046 int em_num;
3047 int idx;
3048
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003050 gfar_write(&regs->igaddr0, 0x0);
3051 gfar_write(&regs->igaddr1, 0x0);
3052 gfar_write(&regs->igaddr2, 0x0);
3053 gfar_write(&regs->igaddr3, 0x0);
3054 gfar_write(&regs->igaddr4, 0x0);
3055 gfar_write(&regs->igaddr5, 0x0);
3056 gfar_write(&regs->igaddr6, 0x0);
3057 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 gfar_write(&regs->gaddr0, 0x0);
3059 gfar_write(&regs->gaddr1, 0x0);
3060 gfar_write(&regs->gaddr2, 0x0);
3061 gfar_write(&regs->gaddr3, 0x0);
3062 gfar_write(&regs->gaddr4, 0x0);
3063 gfar_write(&regs->gaddr5, 0x0);
3064 gfar_write(&regs->gaddr6, 0x0);
3065 gfar_write(&regs->gaddr7, 0x0);
3066
Andy Fleming7f7f5312005-11-11 12:38:59 -06003067 /* If we have extended hash tables, we need to
3068 * clear the exact match registers to prepare for
3069 * setting them */
3070 if (priv->extended_hash) {
3071 em_num = GFAR_EM_NUM + 1;
3072 gfar_clear_exact_match(dev);
3073 idx = 1;
3074 } else {
3075 idx = 0;
3076 em_num = 0;
3077 }
3078
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003079 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 return;
3081
3082 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003083 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003084 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003085 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003086 idx++;
3087 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003088 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 }
3090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091}
3092
Andy Fleming7f7f5312005-11-11 12:38:59 -06003093
3094/* Clears each of the exact match registers to zero, so they
3095 * don't interfere with normal reception */
3096static void gfar_clear_exact_match(struct net_device *dev)
3097{
3098 int idx;
3099 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
3100
3101 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
3102 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
3103}
3104
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105/* Set the appropriate hash bit for the given addr */
3106/* The algorithm works like so:
3107 * 1) Take the Destination Address (ie the multicast address), and
3108 * do a CRC on it (little endian), and reverse the bits of the
3109 * result.
3110 * 2) Use the 8 most significant bits as a hash into a 256-entry
3111 * table. The table is controlled through 8 32-bit registers:
3112 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3113 * gaddr7. This means that the 3 most significant bits in the
3114 * hash index which gaddr register to use, and the 5 other bits
3115 * indicate which bit (assuming an IBM numbering scheme, which
3116 * for PowerPC (tm) is usually the case) in the register holds
3117 * the entry. */
3118static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3119{
3120 u32 tempval;
3121 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 u32 result = ether_crc(MAC_ADDR_LEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003123 int width = priv->hash_width;
3124 u8 whichbit = (result >> (32 - width)) & 0x1f;
3125 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 u32 value = (1 << (31-whichbit));
3127
Kumar Gala0bbaf062005-06-20 10:54:21 -05003128 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003130 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131}
3132
Andy Fleming7f7f5312005-11-11 12:38:59 -06003133
3134/* There are multiple MAC Address register pairs on some controllers
3135 * This function sets the numth pair to a given address
3136 */
3137static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
3138{
3139 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003140 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003141 int idx;
3142 char tmpbuf[MAC_ADDR_LEN];
3143 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003144 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003145
3146 macptr += num*2;
3147
3148 /* Now copy it into the mac registers backwards, cuz */
3149 /* little endian is silly */
3150 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
3151 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
3152
3153 gfar_write(macptr, *((u32 *) (tmpbuf)));
3154
3155 tempval = *((u32 *) (tmpbuf + 4));
3156
3157 gfar_write(macptr+1, tempval);
3158}
3159
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003161static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003163 struct gfar_priv_grp *gfargrp = grp_id;
3164 struct gfar __iomem *regs = gfargrp->regs;
3165 struct gfar_private *priv= gfargrp->priv;
3166 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
3168 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003169 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170
3171 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003172 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003173
3174 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003175 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003176 (events & IEVENT_MAG))
3177 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178
3179 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003180 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3181 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003182 dev->name, events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183
3184 /* Update the error counters */
3185 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003186 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
3188 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003189 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003191 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003193 unsigned long flags;
3194
Kumar Gala0bbaf062005-06-20 10:54:21 -05003195 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003196 printk(KERN_DEBUG "%s: TX FIFO underrun, "
3197 "packet dropped.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003198 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 priv->extra_stats.tx_underrun++;
3200
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003201 local_irq_save(flags);
3202 lock_tx_qs(priv);
3203
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003205 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003206
3207 unlock_tx_qs(priv);
3208 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05003210 if (netif_msg_tx_err(priv))
3211 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 }
3213 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003214 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 priv->extra_stats.rx_bsy++;
3216
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003217 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
Kumar Gala0bbaf062005-06-20 10:54:21 -05003219 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003220 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003221 dev->name, gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 }
3223 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003224 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 priv->extra_stats.rx_babr++;
3226
Kumar Gala0bbaf062005-06-20 10:54:21 -05003227 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003228 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 }
3230 if (events & IEVENT_EBERR) {
3231 priv->extra_stats.eberr++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003232 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003233 printk(KERN_DEBUG "%s: bus error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05003235 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003236 printk(KERN_DEBUG "%s: control frame\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237
3238 if (events & IEVENT_BABT) {
3239 priv->extra_stats.tx_babt++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003240 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003241 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 }
3243 return IRQ_HANDLED;
3244}
3245
Andy Flemingb31a1d82008-12-16 15:29:15 -08003246static struct of_device_id gfar_match[] =
3247{
3248 {
3249 .type = "network",
3250 .compatible = "gianfar",
3251 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003252 {
3253 .compatible = "fsl,etsec2",
3254 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003255 {},
3256};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003257MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003258
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259/* Structure for a device driver */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003260static struct of_platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003261 .driver = {
3262 .name = "fsl-gianfar",
3263 .owner = THIS_MODULE,
3264 .pm = GFAR_PM_OPS,
3265 .of_match_table = gfar_match,
3266 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 .probe = gfar_probe,
3268 .remove = gfar_remove,
3269};
3270
3271static int __init gfar_init(void)
3272{
Andy Fleming1577ece2009-02-04 16:42:12 -08003273 return of_register_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274}
3275
3276static void __exit gfar_exit(void)
3277{
Andy Flemingb31a1d82008-12-16 15:29:15 -08003278 of_unregister_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279}
3280
3281module_init(gfar_init);
3282module_exit(gfar_exit);
3283