blob: 5ed8f9f9419f163c26703412bb03512537a7e966 [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
26 *
27 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050028 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080029 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * last descriptor of the ring.
37 *
38 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050039 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040043 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020045 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050046 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
54 *
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
63 */
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/string.h>
67#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040068#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/slab.h>
70#include <linux/interrupt.h>
71#include <linux/init.h>
72#include <linux/delay.h>
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050076#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/spinlock.h>
78#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000079#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080080#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050081#include <linux/ip.h>
82#include <linux/tcp.h>
83#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080084#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000085#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87#include <asm/io.h>
Anton Vorontsov7d350972010-06-30 06:39:12 +000088#include <asm/reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <asm/irq.h>
90#include <asm/uaccess.h>
91#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <linux/dma-mapping.h>
93#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040094#include <linux/mii.h>
95#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080096#include <linux/phy_fixed.h>
97#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -070098#include <linux/of_net.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100#include "gianfar.h"
Andy Fleming1577ece2009-02-04 16:42:12 -0800101#include "fsl_pq_mdio.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#undef BRIEF_GFAR_ERRORS
105#undef VERBOSE_GFAR_ERRORS
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107const char gfar_driver_name[] = "Gianfar Ethernet";
Andy Fleming7f7f5312005-11-11 12:38:59 -0600108const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static int gfar_enet_open(struct net_device *dev);
111static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200112static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113static void gfar_timeout(struct net_device *dev);
114static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500115struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -0500117 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static int gfar_set_mac_address(struct net_device *dev);
119static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100120static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123static void adjust_link(struct net_device *dev);
124static void init_registers(struct net_device *dev);
125static int init_phy(struct net_device *dev);
Grant Likely2dc11582010-08-06 09:25:50 -0600126static int gfar_probe(struct platform_device *ofdev,
Andy Flemingb31a1d82008-12-16 15:29:15 -0800127 const struct of_device_id *match);
Grant Likely2dc11582010-08-06 09:25:50 -0600128static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400129static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130static void gfar_set_multi(struct net_device *dev);
131static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500132static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700133static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300134#ifdef CONFIG_NET_POLL_CONTROLLER
135static void gfar_netpoll(struct net_device *dev);
136#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000137int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
138static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800139static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
140 int amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500141static void gfar_vlan_rx_register(struct net_device *netdev,
142 struct vlan_group *grp);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600143void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500144static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600145void gfar_start(struct net_device *dev);
146static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800147static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000149static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151MODULE_AUTHOR("Freescale Semiconductor, Inc");
152MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153MODULE_LICENSE("GPL");
154
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000155static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000156 dma_addr_t buf)
157{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000158 u32 lstatus;
159
160 bdp->bufPtr = buf;
161
162 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000163 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000164 lstatus |= BD_LFLAG(RXBD_WRAP);
165
166 eieio();
167
168 bdp->lstatus = lstatus;
169}
170
Anton Vorontsov87283272009-10-12 06:00:39 +0000171static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000172{
Anton Vorontsov87283272009-10-12 06:00:39 +0000173 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000174 struct gfar_priv_tx_q *tx_queue = NULL;
175 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000176 struct txbd8 *txbdp;
177 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000178 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000179
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000180 for (i = 0; i < priv->num_tx_queues; i++) {
181 tx_queue = priv->tx_queue[i];
182 /* Initialize some variables in our dev structure */
183 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
184 tx_queue->dirty_tx = tx_queue->tx_bd_base;
185 tx_queue->cur_tx = tx_queue->tx_bd_base;
186 tx_queue->skb_curtx = 0;
187 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000188
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000189 /* Initialize Transmit Descriptor Ring */
190 txbdp = tx_queue->tx_bd_base;
191 for (j = 0; j < tx_queue->tx_ring_size; j++) {
192 txbdp->lstatus = 0;
193 txbdp->bufPtr = 0;
194 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000195 }
196
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000197 /* Set the last descriptor in the ring to indicate wrap */
198 txbdp--;
199 txbdp->status |= TXBD_WRAP;
200 }
201
202 for (i = 0; i < priv->num_rx_queues; i++) {
203 rx_queue = priv->rx_queue[i];
204 rx_queue->cur_rx = rx_queue->rx_bd_base;
205 rx_queue->skb_currx = 0;
206 rxbdp = rx_queue->rx_bd_base;
207
208 for (j = 0; j < rx_queue->rx_ring_size; j++) {
209 struct sk_buff *skb = rx_queue->rx_skbuff[j];
210
211 if (skb) {
212 gfar_init_rxbdp(rx_queue, rxbdp,
213 rxbdp->bufPtr);
214 } else {
215 skb = gfar_new_skb(ndev);
216 if (!skb) {
217 pr_err("%s: Can't allocate RX buffers\n",
218 ndev->name);
219 goto err_rxalloc_fail;
220 }
221 rx_queue->rx_skbuff[j] = skb;
222
223 gfar_new_rxbdp(rx_queue, rxbdp, skb);
224 }
225
226 rxbdp++;
227 }
228
Anton Vorontsov87283272009-10-12 06:00:39 +0000229 }
230
231 return 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000232
233err_rxalloc_fail:
234 free_skb_resources(priv);
235 return -ENOMEM;
Anton Vorontsov87283272009-10-12 06:00:39 +0000236}
237
238static int gfar_alloc_skb_resources(struct net_device *ndev)
239{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000240 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000241 dma_addr_t addr;
242 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000243 struct gfar_private *priv = netdev_priv(ndev);
244 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000245 struct gfar_priv_tx_q *tx_queue = NULL;
246 struct gfar_priv_rx_q *rx_queue = NULL;
247
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000248 priv->total_tx_ring_size = 0;
249 for (i = 0; i < priv->num_tx_queues; i++)
250 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
251
252 priv->total_rx_ring_size = 0;
253 for (i = 0; i < priv->num_rx_queues; i++)
254 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000255
256 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000257 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000258 sizeof(struct txbd8) * priv->total_tx_ring_size +
259 sizeof(struct rxbd8) * priv->total_rx_ring_size,
260 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000261 if (!vaddr) {
262 if (netif_msg_ifup(priv))
263 pr_err("%s: Could not allocate buffer descriptors!\n",
264 ndev->name);
265 return -ENOMEM;
266 }
267
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000268 for (i = 0; i < priv->num_tx_queues; i++) {
269 tx_queue = priv->tx_queue[i];
270 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
271 tx_queue->tx_bd_dma_base = addr;
272 tx_queue->dev = ndev;
273 /* enet DMA only understands physical addresses */
274 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
275 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
276 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000277
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000278 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000279 for (i = 0; i < priv->num_rx_queues; i++) {
280 rx_queue = priv->rx_queue[i];
281 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
282 rx_queue->rx_bd_dma_base = addr;
283 rx_queue->dev = ndev;
284 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
285 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
286 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000287
288 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000289 for (i = 0; i < priv->num_tx_queues; i++) {
290 tx_queue = priv->tx_queue[i];
291 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000292 tx_queue->tx_ring_size, GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000293 if (!tx_queue->tx_skbuff) {
294 if (netif_msg_ifup(priv))
295 pr_err("%s: Could not allocate tx_skbuff\n",
296 ndev->name);
297 goto cleanup;
298 }
299
300 for (k = 0; k < tx_queue->tx_ring_size; k++)
301 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000302 }
303
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000304 for (i = 0; i < priv->num_rx_queues; i++) {
305 rx_queue = priv->rx_queue[i];
306 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000307 rx_queue->rx_ring_size, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000308
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000309 if (!rx_queue->rx_skbuff) {
310 if (netif_msg_ifup(priv))
311 pr_err("%s: Could not allocate rx_skbuff\n",
312 ndev->name);
313 goto cleanup;
314 }
315
316 for (j = 0; j < rx_queue->rx_ring_size; j++)
317 rx_queue->rx_skbuff[j] = NULL;
318 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000319
Anton Vorontsov87283272009-10-12 06:00:39 +0000320 if (gfar_init_bds(ndev))
321 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000322
323 return 0;
324
325cleanup:
326 free_skb_resources(priv);
327 return -ENOMEM;
328}
329
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000330static void gfar_init_tx_rx_base(struct gfar_private *priv)
331{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000332 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000333 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000334 int i;
335
336 baddr = &regs->tbase0;
337 for(i = 0; i < priv->num_tx_queues; i++) {
338 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
339 baddr += 2;
340 }
341
342 baddr = &regs->rbase0;
343 for(i = 0; i < priv->num_rx_queues; i++) {
344 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
345 baddr += 2;
346 }
347}
348
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000349static void gfar_init_mac(struct net_device *ndev)
350{
351 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000352 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000353 u32 rctrl = 0;
354 u32 tctrl = 0;
355 u32 attrs = 0;
356
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000357 /* write the tx/rx base registers */
358 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000359
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000360 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000361 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000362
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000363 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000364 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000365 /* Program the RIR0 reg with the required distribution */
366 gfar_write(&regs->rir0, DEFAULT_RIR0);
367 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000368
369 if (priv->rx_csum_enable)
370 rctrl |= RCTRL_CHECKSUMMING;
371
372 if (priv->extended_hash) {
373 rctrl |= RCTRL_EXTHASH;
374
375 gfar_clear_exact_match(ndev);
376 rctrl |= RCTRL_EMEN;
377 }
378
379 if (priv->padding) {
380 rctrl &= ~RCTRL_PAL_MASK;
381 rctrl |= RCTRL_PADDING(priv->padding);
382 }
383
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000384 /* Insert receive time stamps into padding alignment bytes */
385 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
386 rctrl &= ~RCTRL_PAL_MASK;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000387 rctrl |= RCTRL_PADDING(8);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000388 priv->padding = 8;
389 }
390
Manfred Rudigier97553f72010-06-11 01:49:05 +0000391 /* Enable HW time stamping if requested from user space */
392 if (priv->hwts_rx_en)
393 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
394
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000395 /* keep vlan related bits if it's enabled */
396 if (priv->vlgrp) {
397 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
398 tctrl |= TCTRL_VLINS;
399 }
400
401 /* Init rctrl based on our settings */
402 gfar_write(&regs->rctrl, rctrl);
403
404 if (ndev->features & NETIF_F_IP_CSUM)
405 tctrl |= TCTRL_INIT_CSUM;
406
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000407 tctrl |= TCTRL_TXSCHED_PRIO;
408
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000409 gfar_write(&regs->tctrl, tctrl);
410
411 /* Set the extraction length and index */
412 attrs = ATTRELI_EL(priv->rx_stash_size) |
413 ATTRELI_EI(priv->rx_stash_index);
414
415 gfar_write(&regs->attreli, attrs);
416
417 /* Start with defaults, and add stashing or locking
418 * depending on the approprate variables */
419 attrs = ATTR_INIT_SETTINGS;
420
421 if (priv->bd_stash_en)
422 attrs |= ATTR_BDSTASH;
423
424 if (priv->rx_stash_size != 0)
425 attrs |= ATTR_BUFSTASH;
426
427 gfar_write(&regs->attr, attrs);
428
429 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
430 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
431 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
432}
433
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000434static struct net_device_stats *gfar_get_stats(struct net_device *dev)
435{
436 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000437 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
438 unsigned long tx_packets = 0, tx_bytes = 0;
439 int i = 0;
440
441 for (i = 0; i < priv->num_rx_queues; i++) {
442 rx_packets += priv->rx_queue[i]->stats.rx_packets;
443 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
444 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
445 }
446
447 dev->stats.rx_packets = rx_packets;
448 dev->stats.rx_bytes = rx_bytes;
449 dev->stats.rx_dropped = rx_dropped;
450
451 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000452 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
453 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000454 }
455
456 dev->stats.tx_bytes = tx_bytes;
457 dev->stats.tx_packets = tx_packets;
458
459 return &dev->stats;
460}
461
Andy Fleming26ccfc32009-03-10 12:58:28 +0000462static const struct net_device_ops gfar_netdev_ops = {
463 .ndo_open = gfar_enet_open,
464 .ndo_start_xmit = gfar_start_xmit,
465 .ndo_stop = gfar_close,
466 .ndo_change_mtu = gfar_change_mtu,
467 .ndo_set_multicast_list = gfar_set_multi,
468 .ndo_tx_timeout = gfar_timeout,
469 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000470 .ndo_get_stats = gfar_get_stats,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000471 .ndo_vlan_rx_register = gfar_vlan_rx_register,
Ben Hutchings240c1022009-07-09 17:54:35 +0000472 .ndo_set_mac_address = eth_mac_addr,
473 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000474#ifdef CONFIG_NET_POLL_CONTROLLER
475 .ndo_poll_controller = gfar_netpoll,
476#endif
477};
478
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000479unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
480unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
481
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000482void lock_rx_qs(struct gfar_private *priv)
483{
484 int i = 0x0;
485
486 for (i = 0; i < priv->num_rx_queues; i++)
487 spin_lock(&priv->rx_queue[i]->rxlock);
488}
489
490void lock_tx_qs(struct gfar_private *priv)
491{
492 int i = 0x0;
493
494 for (i = 0; i < priv->num_tx_queues; i++)
495 spin_lock(&priv->tx_queue[i]->txlock);
496}
497
498void unlock_rx_qs(struct gfar_private *priv)
499{
500 int i = 0x0;
501
502 for (i = 0; i < priv->num_rx_queues; i++)
503 spin_unlock(&priv->rx_queue[i]->rxlock);
504}
505
506void unlock_tx_qs(struct gfar_private *priv)
507{
508 int i = 0x0;
509
510 for (i = 0; i < priv->num_tx_queues; i++)
511 spin_unlock(&priv->tx_queue[i]->txlock);
512}
513
Andy Fleming7f7f5312005-11-11 12:38:59 -0600514/* Returns 1 if incoming frames use an FCB */
515static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500516{
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000517 return priv->vlgrp || priv->rx_csum_enable ||
518 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500519}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400520
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000521static void free_tx_pointers(struct gfar_private *priv)
522{
523 int i = 0;
524
525 for (i = 0; i < priv->num_tx_queues; i++)
526 kfree(priv->tx_queue[i]);
527}
528
529static void free_rx_pointers(struct gfar_private *priv)
530{
531 int i = 0;
532
533 for (i = 0; i < priv->num_rx_queues; i++)
534 kfree(priv->rx_queue[i]);
535}
536
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000537static void unmap_group_regs(struct gfar_private *priv)
538{
539 int i = 0;
540
541 for (i = 0; i < MAXGROUPS; i++)
542 if (priv->gfargrp[i].regs)
543 iounmap(priv->gfargrp[i].regs);
544}
545
546static void disable_napi(struct gfar_private *priv)
547{
548 int i = 0;
549
550 for (i = 0; i < priv->num_grps; i++)
551 napi_disable(&priv->gfargrp[i].napi);
552}
553
554static void enable_napi(struct gfar_private *priv)
555{
556 int i = 0;
557
558 for (i = 0; i < priv->num_grps; i++)
559 napi_enable(&priv->gfargrp[i].napi);
560}
561
562static int gfar_parse_group(struct device_node *np,
563 struct gfar_private *priv, const char *model)
564{
565 u32 *queue_mask;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000566
Anton Vorontsov7ce97d42010-04-23 07:12:44 +0000567 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000568 if (!priv->gfargrp[priv->num_grps].regs)
569 return -ENOMEM;
570
571 priv->gfargrp[priv->num_grps].interruptTransmit =
572 irq_of_parse_and_map(np, 0);
573
574 /* If we aren't the FEC we have multiple interrupts */
575 if (model && strcasecmp(model, "FEC")) {
576 priv->gfargrp[priv->num_grps].interruptReceive =
577 irq_of_parse_and_map(np, 1);
578 priv->gfargrp[priv->num_grps].interruptError =
579 irq_of_parse_and_map(np,2);
Nicolas Kaiser28cb6cc2010-11-15 10:59:42 +0000580 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
581 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
582 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000583 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000584 }
585
586 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
587 priv->gfargrp[priv->num_grps].priv = priv;
588 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
589 if(priv->mode == MQ_MG_MODE) {
590 queue_mask = (u32 *)of_get_property(np,
591 "fsl,rx-bit-map", NULL);
592 priv->gfargrp[priv->num_grps].rx_bit_map =
593 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
594 queue_mask = (u32 *)of_get_property(np,
595 "fsl,tx-bit-map", NULL);
596 priv->gfargrp[priv->num_grps].tx_bit_map =
597 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
598 } else {
599 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
600 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
601 }
602 priv->num_grps++;
603
604 return 0;
605}
606
Grant Likely2dc11582010-08-06 09:25:50 -0600607static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800608{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800609 const char *model;
610 const char *ctype;
611 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000612 int err = 0, i;
613 struct net_device *dev = NULL;
614 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700615 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000616 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800617 const u32 *stash;
618 const u32 *stash_len;
619 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000620 unsigned int num_tx_qs, num_rx_qs;
621 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800622
623 if (!np || !of_device_is_available(np))
624 return -ENODEV;
625
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000626 /* parse the num of tx and rx queues */
627 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
628 num_tx_qs = tx_queues ? *tx_queues : 1;
629
630 if (num_tx_qs > MAX_TX_QS) {
631 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
632 num_tx_qs, MAX_TX_QS);
633 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
634 return -EINVAL;
635 }
636
637 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
638 num_rx_qs = rx_queues ? *rx_queues : 1;
639
640 if (num_rx_qs > MAX_RX_QS) {
641 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
642 num_tx_qs, MAX_TX_QS);
643 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
644 return -EINVAL;
645 }
646
647 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
648 dev = *pdev;
649 if (NULL == dev)
650 return -ENOMEM;
651
652 priv = netdev_priv(dev);
Grant Likely61c7a082010-04-13 16:12:29 -0700653 priv->node = ofdev->dev.of_node;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000654 priv->ndev = dev;
655
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000656 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000657 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000658 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000659 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800660
661 model = of_get_property(np, "model", NULL);
662
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000663 for (i = 0; i < MAXGROUPS; i++)
664 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800665
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000666 /* Parse and initialize group specific information */
667 if (of_device_is_compatible(np, "fsl,etsec2")) {
668 priv->mode = MQ_MG_MODE;
669 for_each_child_of_node(np, child) {
670 err = gfar_parse_group(child, priv, model);
671 if (err)
672 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800673 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000674 } else {
675 priv->mode = SQ_SG_MODE;
676 err = gfar_parse_group(np, priv, model);
677 if(err)
678 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800679 }
680
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000681 for (i = 0; i < priv->num_tx_queues; i++)
682 priv->tx_queue[i] = NULL;
683 for (i = 0; i < priv->num_rx_queues; i++)
684 priv->rx_queue[i] = NULL;
685
686 for (i = 0; i < priv->num_tx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000687 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
688 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000689 if (!priv->tx_queue[i]) {
690 err = -ENOMEM;
691 goto tx_alloc_failed;
692 }
693 priv->tx_queue[i]->tx_skbuff = NULL;
694 priv->tx_queue[i]->qindex = i;
695 priv->tx_queue[i]->dev = dev;
696 spin_lock_init(&(priv->tx_queue[i]->txlock));
697 }
698
699 for (i = 0; i < priv->num_rx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000700 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
701 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000702 if (!priv->rx_queue[i]) {
703 err = -ENOMEM;
704 goto rx_alloc_failed;
705 }
706 priv->rx_queue[i]->rx_skbuff = NULL;
707 priv->rx_queue[i]->qindex = i;
708 priv->rx_queue[i]->dev = dev;
709 spin_lock_init(&(priv->rx_queue[i]->rxlock));
710 }
711
712
Andy Fleming4d7902f2009-02-04 16:43:44 -0800713 stash = of_get_property(np, "bd-stash", NULL);
714
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000715 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800716 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
717 priv->bd_stash_en = 1;
718 }
719
720 stash_len = of_get_property(np, "rx-stash-len", NULL);
721
722 if (stash_len)
723 priv->rx_stash_size = *stash_len;
724
725 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
726
727 if (stash_idx)
728 priv->rx_stash_index = *stash_idx;
729
730 if (stash_len || stash_idx)
731 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
732
Andy Flemingb31a1d82008-12-16 15:29:15 -0800733 mac_addr = of_get_mac_address(np);
734 if (mac_addr)
735 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
736
737 if (model && !strcasecmp(model, "TSEC"))
738 priv->device_flags =
739 FSL_GIANFAR_DEV_HAS_GIGABIT |
740 FSL_GIANFAR_DEV_HAS_COALESCE |
741 FSL_GIANFAR_DEV_HAS_RMON |
742 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
743 if (model && !strcasecmp(model, "eTSEC"))
744 priv->device_flags =
745 FSL_GIANFAR_DEV_HAS_GIGABIT |
746 FSL_GIANFAR_DEV_HAS_COALESCE |
747 FSL_GIANFAR_DEV_HAS_RMON |
748 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Dai Haruki2c2db482008-12-16 15:31:15 -0800749 FSL_GIANFAR_DEV_HAS_PADDING |
Andy Flemingb31a1d82008-12-16 15:29:15 -0800750 FSL_GIANFAR_DEV_HAS_CSUM |
751 FSL_GIANFAR_DEV_HAS_VLAN |
752 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
Manfred Rudigier97553f72010-06-11 01:49:05 +0000753 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
754 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800755
756 ctype = of_get_property(np, "phy-connection-type", NULL);
757
758 /* We only care about rgmii-id. The rest are autodetected */
759 if (ctype && !strcmp(ctype, "rgmii-id"))
760 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
761 else
762 priv->interface = PHY_INTERFACE_MODE_MII;
763
764 if (of_get_property(np, "fsl,magic-packet", NULL))
765 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
766
Grant Likelyfe192a42009-04-25 12:53:12 +0000767 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800768
769 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000770 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800771
772 return 0;
773
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000774rx_alloc_failed:
775 free_rx_pointers(priv);
776tx_alloc_failed:
777 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000778err_grp_init:
779 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000780 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800781 return err;
782}
783
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000784static int gfar_hwtstamp_ioctl(struct net_device *netdev,
785 struct ifreq *ifr, int cmd)
786{
787 struct hwtstamp_config config;
788 struct gfar_private *priv = netdev_priv(netdev);
789
790 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
791 return -EFAULT;
792
793 /* reserved for future extensions */
794 if (config.flags)
795 return -EINVAL;
796
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000797 switch (config.tx_type) {
798 case HWTSTAMP_TX_OFF:
799 priv->hwts_tx_en = 0;
800 break;
801 case HWTSTAMP_TX_ON:
802 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
803 return -ERANGE;
804 priv->hwts_tx_en = 1;
805 break;
806 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000807 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000808 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000809
810 switch (config.rx_filter) {
811 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000812 if (priv->hwts_rx_en) {
813 stop_gfar(netdev);
814 priv->hwts_rx_en = 0;
815 startup_gfar(netdev);
816 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000817 break;
818 default:
819 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
820 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000821 if (!priv->hwts_rx_en) {
822 stop_gfar(netdev);
823 priv->hwts_rx_en = 1;
824 startup_gfar(netdev);
825 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000826 config.rx_filter = HWTSTAMP_FILTER_ALL;
827 break;
828 }
829
830 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
831 -EFAULT : 0;
832}
833
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000834/* Ioctl MII Interface */
835static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
836{
837 struct gfar_private *priv = netdev_priv(dev);
838
839 if (!netif_running(dev))
840 return -EINVAL;
841
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000842 if (cmd == SIOCSHWTSTAMP)
843 return gfar_hwtstamp_ioctl(dev, rq, cmd);
844
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000845 if (!priv->phydev)
846 return -ENODEV;
847
Richard Cochran28b04112010-07-17 08:48:55 +0000848 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000849}
850
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000851static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
852{
853 unsigned int new_bit_map = 0x0;
854 int mask = 0x1 << (max_qs - 1), i;
855 for (i = 0; i < max_qs; i++) {
856 if (bit_map & mask)
857 new_bit_map = new_bit_map + (1 << i);
858 mask = mask >> 0x1;
859 }
860 return new_bit_map;
861}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000862
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000863static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
864 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000865{
866 u32 rqfpr = FPR_FILER_MASK;
867 u32 rqfcr = 0x0;
868
869 rqfar--;
870 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
871 ftp_rqfpr[rqfar] = rqfpr;
872 ftp_rqfcr[rqfar] = rqfcr;
873 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
874
875 rqfar--;
876 rqfcr = RQFCR_CMP_NOMATCH;
877 ftp_rqfpr[rqfar] = rqfpr;
878 ftp_rqfcr[rqfar] = rqfcr;
879 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
880
881 rqfar--;
882 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
883 rqfpr = class;
884 ftp_rqfcr[rqfar] = rqfcr;
885 ftp_rqfpr[rqfar] = rqfpr;
886 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
887
888 rqfar--;
889 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
890 rqfpr = class;
891 ftp_rqfcr[rqfar] = rqfcr;
892 ftp_rqfpr[rqfar] = rqfpr;
893 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
894
895 return rqfar;
896}
897
898static void gfar_init_filer_table(struct gfar_private *priv)
899{
900 int i = 0x0;
901 u32 rqfar = MAX_FILER_IDX;
902 u32 rqfcr = 0x0;
903 u32 rqfpr = FPR_FILER_MASK;
904
905 /* Default rule */
906 rqfcr = RQFCR_CMP_MATCH;
907 ftp_rqfcr[rqfar] = rqfcr;
908 ftp_rqfpr[rqfar] = rqfpr;
909 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
910
911 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
912 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
916 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
917
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +0200918 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000919 priv->cur_filer_idx = rqfar;
920
921 /* Rest are masked rules */
922 rqfcr = RQFCR_CMP_NOMATCH;
923 for (i = 0; i < rqfar; i++) {
924 ftp_rqfcr[i] = rqfcr;
925 ftp_rqfpr[i] = rqfpr;
926 gfar_write_filer(priv, i, rqfcr, rqfpr);
927 }
928}
929
Anton Vorontsov7d350972010-06-30 06:39:12 +0000930static void gfar_detect_errata(struct gfar_private *priv)
931{
932 struct device *dev = &priv->ofdev->dev;
933 unsigned int pvr = mfspr(SPRN_PVR);
934 unsigned int svr = mfspr(SPRN_SVR);
935 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
936 unsigned int rev = svr & 0xffff;
937
938 /* MPC8313 Rev 2.0 and higher; All MPC837x */
939 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
940 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
941 priv->errata |= GFAR_ERRATA_74;
942
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000943 /* MPC8313 and MPC837x all rev */
944 if ((pvr == 0x80850010 && mod == 0x80b0) ||
945 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
946 priv->errata |= GFAR_ERRATA_76;
947
Anton Vorontsov511d9342010-06-30 06:39:15 +0000948 /* MPC8313 and MPC837x all rev */
949 if ((pvr == 0x80850010 && mod == 0x80b0) ||
950 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
951 priv->errata |= GFAR_ERRATA_A002;
952
Anton Vorontsov7d350972010-06-30 06:39:12 +0000953 if (priv->errata)
954 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
955 priv->errata);
956}
957
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400958/* Set up the ethernet device structure, private data,
959 * and anything else we need before we start */
Grant Likely2dc11582010-08-06 09:25:50 -0600960static int gfar_probe(struct platform_device *ofdev,
Andy Flemingb31a1d82008-12-16 15:29:15 -0800961 const struct of_device_id *match)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963 u32 tempval;
964 struct net_device *dev = NULL;
965 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000966 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000967 int err = 0, i, grp_idx = 0;
Dai Harukic50a5d92008-12-17 16:51:32 -0800968 int len_devname;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000969 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000970 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000971 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000973 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000975 if (err)
976 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700979 priv->ndev = dev;
980 priv->ofdev = ofdev;
Grant Likely61c7a082010-04-13 16:12:29 -0700981 priv->node = ofdev->dev.of_node;
Kumar Gala48268572009-03-18 23:28:22 -0700982 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Scott Woodd87eb122008-07-11 18:04:45 -0500984 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200985 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Andy Flemingb31a1d82008-12-16 15:29:15 -0800987 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000988 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
Anton Vorontsov7d350972010-06-30 06:39:12 +0000990 gfar_detect_errata(priv);
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 /* Stop the DMA engine now, in case it was running before */
993 /* (The firmware could have used it, and left it running). */
Andy Fleming257d9382008-12-16 15:25:45 -0800994 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000997 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Andy Flemingb98ac702009-02-04 16:38:05 -0800999 /* We need to delay at least 3 TX clocks */
1000 udelay(2);
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001003 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001006 tempval = MACCFG2_INIT_SETTINGS;
1007 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1008 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1009 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
1011 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001012 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001015 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Andy Flemingb31a1d82008-12-16 15:29:15 -08001017 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
1019 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +00001022 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001023 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001025 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001026 for (i = 0; i < priv->num_grps; i++)
1027 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001028
Andy Flemingb31a1d82008-12-16 15:29:15 -08001029 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001030 priv->rx_csum_enable = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -08001031 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001032 } else
1033 priv->rx_csum_enable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
Kumar Gala0bbaf062005-06-20 10:54:21 -05001035 priv->vlgrp = NULL;
1036
Andy Fleming26ccfc32009-03-10 12:58:28 +00001037 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001038 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001039
Andy Flemingb31a1d82008-12-16 15:29:15 -08001040 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001041 priv->extended_hash = 1;
1042 priv->hash_width = 9;
1043
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001044 priv->hash_regs[0] = &regs->igaddr0;
1045 priv->hash_regs[1] = &regs->igaddr1;
1046 priv->hash_regs[2] = &regs->igaddr2;
1047 priv->hash_regs[3] = &regs->igaddr3;
1048 priv->hash_regs[4] = &regs->igaddr4;
1049 priv->hash_regs[5] = &regs->igaddr5;
1050 priv->hash_regs[6] = &regs->igaddr6;
1051 priv->hash_regs[7] = &regs->igaddr7;
1052 priv->hash_regs[8] = &regs->gaddr0;
1053 priv->hash_regs[9] = &regs->gaddr1;
1054 priv->hash_regs[10] = &regs->gaddr2;
1055 priv->hash_regs[11] = &regs->gaddr3;
1056 priv->hash_regs[12] = &regs->gaddr4;
1057 priv->hash_regs[13] = &regs->gaddr5;
1058 priv->hash_regs[14] = &regs->gaddr6;
1059 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001060
1061 } else {
1062 priv->extended_hash = 0;
1063 priv->hash_width = 8;
1064
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001065 priv->hash_regs[0] = &regs->gaddr0;
1066 priv->hash_regs[1] = &regs->gaddr1;
1067 priv->hash_regs[2] = &regs->gaddr2;
1068 priv->hash_regs[3] = &regs->gaddr3;
1069 priv->hash_regs[4] = &regs->gaddr4;
1070 priv->hash_regs[5] = &regs->gaddr5;
1071 priv->hash_regs[6] = &regs->gaddr6;
1072 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001073 }
1074
Andy Flemingb31a1d82008-12-16 15:29:15 -08001075 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001076 priv->padding = DEFAULT_PADDING;
1077 else
1078 priv->padding = 0;
1079
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001080 if (dev->features & NETIF_F_IP_CSUM ||
1081 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001082 dev->hard_header_len += GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001084 /* Program the isrg regs only if number of grps > 1 */
1085 if (priv->num_grps > 1) {
1086 baddr = &regs->isrg0;
1087 for (i = 0; i < priv->num_grps; i++) {
1088 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1089 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1090 gfar_write(baddr, isrg);
1091 baddr++;
1092 isrg = 0x0;
1093 }
1094 }
1095
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001096 /* Need to reverse the bit maps as bit_map's MSB is q0
Akinobu Mita984b3f52010-03-05 13:41:37 -08001097 * but, for_each_set_bit parses from right to left, which
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001098 * basically reverses the queue numbers */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001099 for (i = 0; i< priv->num_grps; i++) {
1100 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1101 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1102 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1103 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1104 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001105
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001106 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1107 * also assign queues to groups */
1108 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1109 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001110 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001111 priv->num_rx_queues) {
1112 priv->gfargrp[grp_idx].num_rx_queues++;
1113 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1114 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1115 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1116 }
1117 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001118 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001119 priv->num_tx_queues) {
1120 priv->gfargrp[grp_idx].num_tx_queues++;
1121 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1122 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1123 tqueue = tqueue | (TQUEUE_EN0 >> i);
1124 }
1125 priv->gfargrp[grp_idx].rstat = rstat;
1126 priv->gfargrp[grp_idx].tstat = tstat;
1127 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001128 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001129
1130 gfar_write(&regs->rqueue, rqueue);
1131 gfar_write(&regs->tqueue, tqueue);
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001135 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001136 for (i = 0; i < priv->num_tx_queues; i++) {
1137 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1138 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1139 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1140 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1141 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001142
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001143 for (i = 0; i < priv->num_rx_queues; i++) {
1144 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1145 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1146 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1147 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +00001149 /* enable filer if using multiple RX queues*/
1150 if(priv->num_rx_queues > 1)
1151 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001152 /* Enable most messages by default */
1153 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1154
Trent Piephod3eab822008-10-02 11:12:24 +00001155 /* Carrier starts down, phylib will bring it up */
1156 netif_carrier_off(dev);
1157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 err = register_netdev(dev);
1159
1160 if (err) {
1161 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1162 dev->name);
1163 goto register_fail;
1164 }
1165
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001166 device_init_wakeup(&dev->dev,
1167 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1168
Dai Harukic50a5d92008-12-17 16:51:32 -08001169 /* fill out IRQ number and name fields */
1170 len_devname = strlen(dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001171 for (i = 0; i < priv->num_grps; i++) {
1172 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1173 len_devname);
1174 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1175 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1176 "_g", sizeof("_g"));
1177 priv->gfargrp[i].int_name_tx[
1178 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1179 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1180 priv->gfargrp[i].int_name_tx)],
1181 "_tx", sizeof("_tx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001182
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001183 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1184 len_devname);
1185 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1186 "_g", sizeof("_g"));
1187 priv->gfargrp[i].int_name_rx[
1188 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1189 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1190 priv->gfargrp[i].int_name_rx)],
1191 "_rx", sizeof("_rx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001192
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001193 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1194 len_devname);
1195 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1196 "_g", sizeof("_g"));
1197 priv->gfargrp[i].int_name_er[strlen(
1198 priv->gfargrp[i].int_name_er)] = i+48;
1199 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1200 priv->gfargrp[i].int_name_er)],
1201 "_er", sizeof("_er") + 1);
1202 } else
1203 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1204 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001205
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001206 /* Initialize the filer table */
1207 gfar_init_filer_table(priv);
1208
Andy Fleming7f7f5312005-11-11 12:38:59 -06001209 /* Create all the sysfs files */
1210 gfar_init_sysfs(dev);
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 /* Print out the device info */
Johannes Berge1749612008-10-27 15:59:26 -07001213 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001216 /* provided which set of benchmarks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001218 for (i = 0; i < priv->num_rx_queues; i++)
Kim Phillipsddc01b32010-03-30 11:54:22 +00001219 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001220 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1221 for(i = 0; i < priv->num_tx_queues; i++)
Kim Phillipsddc01b32010-03-30 11:54:22 +00001222 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001223 dev->name, i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 return 0;
1226
1227register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001228 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001229 free_tx_pointers(priv);
1230 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001231 if (priv->phy_node)
1232 of_node_put(priv->phy_node);
1233 if (priv->tbi_node)
1234 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001236 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237}
1238
Grant Likely2dc11582010-08-06 09:25:50 -06001239static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001241 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
Grant Likelyfe192a42009-04-25 12:53:12 +00001243 if (priv->phy_node)
1244 of_node_put(priv->phy_node);
1245 if (priv->tbi_node)
1246 of_node_put(priv->tbi_node);
1247
Andy Flemingb31a1d82008-12-16 15:29:15 -08001248 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
David S. Millerd9d8e042009-09-06 01:41:02 -07001250 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001251 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001252 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 return 0;
1255}
1256
Scott Woodd87eb122008-07-11 18:04:45 -05001257#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001258
1259static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001260{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001261 struct gfar_private *priv = dev_get_drvdata(dev);
1262 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001263 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001264 unsigned long flags;
1265 u32 tempval;
1266
1267 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001268 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001269
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001270 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001271
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001272 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001273
1274 local_irq_save(flags);
1275 lock_tx_qs(priv);
1276 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001277
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001278 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001279
1280 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001281 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001282
1283 tempval &= ~MACCFG1_TX_EN;
1284
1285 if (!magic_packet)
1286 tempval &= ~MACCFG1_RX_EN;
1287
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001288 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001289
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001290 unlock_rx_qs(priv);
1291 unlock_tx_qs(priv);
1292 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001293
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001294 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001295
1296 if (magic_packet) {
1297 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001298 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001299
1300 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001301 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001302 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001303 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001304 } else {
1305 phy_stop(priv->phydev);
1306 }
1307 }
1308
1309 return 0;
1310}
1311
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001312static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001313{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001314 struct gfar_private *priv = dev_get_drvdata(dev);
1315 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001316 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001317 unsigned long flags;
1318 u32 tempval;
1319 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001320 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001321
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001322 if (!netif_running(ndev)) {
1323 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001324 return 0;
1325 }
1326
1327 if (!magic_packet && priv->phydev)
1328 phy_start(priv->phydev);
1329
1330 /* Disable Magic Packet mode, in case something
1331 * else woke us up.
1332 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001333 local_irq_save(flags);
1334 lock_tx_qs(priv);
1335 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001336
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001337 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001338 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001339 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001340
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001341 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001342
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001343 unlock_rx_qs(priv);
1344 unlock_tx_qs(priv);
1345 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001346
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001347 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001348
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001349 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001350
1351 return 0;
1352}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001353
1354static int gfar_restore(struct device *dev)
1355{
1356 struct gfar_private *priv = dev_get_drvdata(dev);
1357 struct net_device *ndev = priv->ndev;
1358
1359 if (!netif_running(ndev))
1360 return 0;
1361
1362 gfar_init_bds(ndev);
1363 init_registers(ndev);
1364 gfar_set_mac_address(ndev);
1365 gfar_init_mac(ndev);
1366 gfar_start(ndev);
1367
1368 priv->oldlink = 0;
1369 priv->oldspeed = 0;
1370 priv->oldduplex = -1;
1371
1372 if (priv->phydev)
1373 phy_start(priv->phydev);
1374
1375 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001376 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001377
1378 return 0;
1379}
1380
1381static struct dev_pm_ops gfar_pm_ops = {
1382 .suspend = gfar_suspend,
1383 .resume = gfar_resume,
1384 .freeze = gfar_suspend,
1385 .thaw = gfar_resume,
1386 .restore = gfar_restore,
1387};
1388
1389#define GFAR_PM_OPS (&gfar_pm_ops)
1390
Scott Woodd87eb122008-07-11 18:04:45 -05001391#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001392
1393#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001394
Scott Woodd87eb122008-07-11 18:04:45 -05001395#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001397/* Reads the controller's registers to determine what interface
1398 * connects it to the PHY.
1399 */
1400static phy_interface_t gfar_get_interface(struct net_device *dev)
1401{
1402 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001403 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001404 u32 ecntrl;
1405
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001406 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001407
1408 if (ecntrl & ECNTRL_SGMII_MODE)
1409 return PHY_INTERFACE_MODE_SGMII;
1410
1411 if (ecntrl & ECNTRL_TBI_MODE) {
1412 if (ecntrl & ECNTRL_REDUCED_MODE)
1413 return PHY_INTERFACE_MODE_RTBI;
1414 else
1415 return PHY_INTERFACE_MODE_TBI;
1416 }
1417
1418 if (ecntrl & ECNTRL_REDUCED_MODE) {
1419 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1420 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001421 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001422 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001423
1424 /*
1425 * This isn't autodetected right now, so it must
1426 * be set by the device tree or platform code.
1427 */
1428 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1429 return PHY_INTERFACE_MODE_RGMII_ID;
1430
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001431 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001432 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001433 }
1434
Andy Flemingb31a1d82008-12-16 15:29:15 -08001435 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001436 return PHY_INTERFACE_MODE_GMII;
1437
1438 return PHY_INTERFACE_MODE_MII;
1439}
1440
1441
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001442/* Initializes driver's PHY state, and attaches to the PHY.
1443 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 */
1445static int init_phy(struct net_device *dev)
1446{
1447 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001448 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001449 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001450 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001451 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 priv->oldlink = 0;
1454 priv->oldspeed = 0;
1455 priv->oldduplex = -1;
1456
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001457 interface = gfar_get_interface(dev);
1458
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001459 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1460 interface);
1461 if (!priv->phydev)
1462 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1463 interface);
1464 if (!priv->phydev) {
1465 dev_err(&dev->dev, "could not attach to PHY\n");
1466 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001467 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Kapil Junejad3c12872007-05-11 18:25:11 -05001469 if (interface == PHY_INTERFACE_MODE_SGMII)
1470 gfar_configure_serdes(dev);
1471
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001472 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001473 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1474 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477}
1478
Paul Gortmakerd0313582008-04-17 00:08:10 -04001479/*
1480 * Initialize TBI PHY interface for communicating with the
1481 * SERDES lynx PHY on the chip. We communicate with this PHY
1482 * through the MDIO bus on each controller, treating it as a
1483 * "normal" PHY at the address found in the TBIPA register. We assume
1484 * that the TBIPA register is valid. Either the MDIO bus code will set
1485 * it to a value that doesn't conflict with other PHYs on the bus, or the
1486 * value doesn't matter, as there are no other PHYs on the bus.
1487 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001488static void gfar_configure_serdes(struct net_device *dev)
1489{
1490 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001491 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001492
Grant Likelyfe192a42009-04-25 12:53:12 +00001493 if (!priv->tbi_node) {
1494 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1495 "device tree specify a tbi-handle\n");
1496 return;
1497 }
1498
1499 tbiphy = of_phy_find_device(priv->tbi_node);
1500 if (!tbiphy) {
1501 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001502 return;
1503 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001504
Andy Flemingb31a1d82008-12-16 15:29:15 -08001505 /*
1506 * If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001507 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1508 * everything for us? Resetting it takes the link down and requires
1509 * several seconds for it to come back.
1510 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001511 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001512 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001513
Paul Gortmakerd0313582008-04-17 00:08:10 -04001514 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001515 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001516
Grant Likelyfe192a42009-04-25 12:53:12 +00001517 phy_write(tbiphy, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -05001518 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1519 ADVERTISE_1000XPSE_ASYM);
1520
Grant Likelyfe192a42009-04-25 12:53:12 +00001521 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -05001522 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1523}
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525static void init_registers(struct net_device *dev)
1526{
1527 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001528 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001529 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001531 for (i = 0; i < priv->num_grps; i++) {
1532 regs = priv->gfargrp[i].regs;
1533 /* Clear IEVENT */
1534 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001536 /* Initialize IMASK */
1537 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001540 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001542 gfar_write(&regs->igaddr0, 0);
1543 gfar_write(&regs->igaddr1, 0);
1544 gfar_write(&regs->igaddr2, 0);
1545 gfar_write(&regs->igaddr3, 0);
1546 gfar_write(&regs->igaddr4, 0);
1547 gfar_write(&regs->igaddr5, 0);
1548 gfar_write(&regs->igaddr6, 0);
1549 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001551 gfar_write(&regs->gaddr0, 0);
1552 gfar_write(&regs->gaddr1, 0);
1553 gfar_write(&regs->gaddr2, 0);
1554 gfar_write(&regs->gaddr3, 0);
1555 gfar_write(&regs->gaddr4, 0);
1556 gfar_write(&regs->gaddr5, 0);
1557 gfar_write(&regs->gaddr6, 0);
1558 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001561 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001562 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
1564 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001565 gfar_write(&regs->rmon.cam1, 0xffffffff);
1566 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 }
1568
1569 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001570 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001573 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574}
1575
Anton Vorontsov511d9342010-06-30 06:39:15 +00001576static int __gfar_is_rx_idle(struct gfar_private *priv)
1577{
1578 u32 res;
1579
1580 /*
1581 * Normaly TSEC should not hang on GRS commands, so we should
1582 * actually wait for IEVENT_GRSC flag.
1583 */
1584 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1585 return 0;
1586
1587 /*
1588 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1589 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1590 * and the Rx can be safely reset.
1591 */
1592 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1593 res &= 0x7f807f80;
1594 if ((res & 0xffff) == (res >> 16))
1595 return 1;
1596
1597 return 0;
1598}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001599
1600/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001601static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
1603 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001604 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001606 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001608 for (i = 0; i < priv->num_grps; i++) {
1609 regs = priv->gfargrp[i].regs;
1610 /* Mask all interrupts */
1611 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001613 /* Clear all interrupts */
1614 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001617 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001619 tempval = gfar_read(&regs->dmactrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1621 != (DMACTRL_GRS | DMACTRL_GTS)) {
Anton Vorontsov511d9342010-06-30 06:39:15 +00001622 int ret;
1623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001625 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
Anton Vorontsov511d9342010-06-30 06:39:15 +00001627 do {
1628 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1629 (IEVENT_GRSC | IEVENT_GTSC)) ==
1630 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1631 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1632 ret = __gfar_is_rx_idle(priv);
1633 } while (!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 }
Scott Woodd87eb122008-07-11 18:04:45 -05001635}
Scott Woodd87eb122008-07-11 18:04:45 -05001636
1637/* Halt the receive and transmit queues */
1638void gfar_halt(struct net_device *dev)
1639{
1640 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001641 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001642 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Scott Wood2a54adc2008-08-12 15:10:46 -05001644 gfar_halt_nodisable(dev);
1645
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 /* Disable Rx and Tx */
1647 tempval = gfar_read(&regs->maccfg1);
1648 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1649 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001650}
1651
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001652static void free_grp_irqs(struct gfar_priv_grp *grp)
1653{
1654 free_irq(grp->interruptError, grp);
1655 free_irq(grp->interruptTransmit, grp);
1656 free_irq(grp->interruptReceive, grp);
1657}
1658
Kumar Gala0bbaf062005-06-20 10:54:21 -05001659void stop_gfar(struct net_device *dev)
1660{
1661 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001662 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001663 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001664
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001665 phy_stop(priv->phydev);
1666
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001667
Kumar Gala0bbaf062005-06-20 10:54:21 -05001668 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001669 local_irq_save(flags);
1670 lock_tx_qs(priv);
1671 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001672
Kumar Gala0bbaf062005-06-20 10:54:21 -05001673 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001675 unlock_rx_qs(priv);
1676 unlock_tx_qs(priv);
1677 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001680 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001681 for (i = 0; i < priv->num_grps; i++)
1682 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001684 for (i = 0; i < priv->num_grps; i++)
1685 free_irq(priv->gfargrp[i].interruptTransmit,
1686 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 }
1688
1689 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690}
1691
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001692static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001695 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001696 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001698 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001700 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1701 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001702 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Kumar Gala48268572009-03-18 23:28:22 -07001704 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001705 txbdp->length, DMA_TO_DEVICE);
1706 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001707 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1708 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001709 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001710 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001711 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001713 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001714 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1715 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001717 kfree(tx_queue->tx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001718}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001720static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1721{
1722 struct rxbd8 *rxbdp;
1723 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1724 int i;
1725
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001726 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001728 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1729 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001730 dma_unmap_single(&priv->ofdev->dev,
1731 rxbdp->bufPtr, priv->rx_buffer_size,
Anton Vorontsove69edd22009-10-12 06:00:30 +00001732 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001733 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1734 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001736 rxbdp->lstatus = 0;
1737 rxbdp->bufPtr = 0;
1738 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001740 kfree(rx_queue->rx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001741}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001742
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001743/* If there are any tx skbs or rx skbs still around, free them.
1744 * Then free tx_skbuff and rx_skbuff */
1745static void free_skb_resources(struct gfar_private *priv)
1746{
1747 struct gfar_priv_tx_q *tx_queue = NULL;
1748 struct gfar_priv_rx_q *rx_queue = NULL;
1749 int i;
1750
1751 /* Go through all the buffer descriptors and free their data buffers */
1752 for (i = 0; i < priv->num_tx_queues; i++) {
1753 tx_queue = priv->tx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001754 if(tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001755 free_skb_tx_queue(tx_queue);
1756 }
1757
1758 for (i = 0; i < priv->num_rx_queues; i++) {
1759 rx_queue = priv->rx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001760 if(rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001761 free_skb_rx_queue(rx_queue);
1762 }
1763
1764 dma_free_coherent(&priv->ofdev->dev,
1765 sizeof(struct txbd8) * priv->total_tx_ring_size +
1766 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1767 priv->tx_queue[0]->tx_bd_base,
1768 priv->tx_queue[0]->tx_bd_dma_base);
Sebastian Andrzej Siewior7df9c432010-05-04 22:30:47 +00001769 skb_queue_purge(&priv->rx_recycle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770}
1771
Kumar Gala0bbaf062005-06-20 10:54:21 -05001772void gfar_start(struct net_device *dev)
1773{
1774 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001775 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001776 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001777 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001778
1779 /* Enable Rx and Tx in MACCFG1 */
1780 tempval = gfar_read(&regs->maccfg1);
1781 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1782 gfar_write(&regs->maccfg1, tempval);
1783
1784 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001785 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001786 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001787 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001788
Kumar Gala0bbaf062005-06-20 10:54:21 -05001789 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001790 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001791 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001792 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001793
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001794 for (i = 0; i < priv->num_grps; i++) {
1795 regs = priv->gfargrp[i].regs;
1796 /* Clear THLT/RHLT, so that the DMA starts polling now */
1797 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1798 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1799 /* Unmask the interrupts we look for */
1800 gfar_write(&regs->imask, IMASK_DEFAULT);
1801 }
Dai Haruki12dea572008-12-16 15:30:20 -08001802
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001803 dev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001804}
1805
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001806void gfar_configure_coalescing(struct gfar_private *priv,
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001807 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001809 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001810 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001811 int i = 0;
1812
1813 /* Backward compatible case ---- even if we enable
1814 * multiple queues, there's only single reg to program
1815 */
1816 gfar_write(&regs->txic, 0);
1817 if(likely(priv->tx_queue[0]->txcoalescing))
1818 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1819
1820 gfar_write(&regs->rxic, 0);
1821 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1822 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1823
1824 if (priv->mode == MQ_MG_MODE) {
1825 baddr = &regs->txic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001826 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001827 if (likely(priv->tx_queue[i]->txcoalescing)) {
1828 gfar_write(baddr + i, 0);
1829 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1830 }
1831 }
1832
1833 baddr = &regs->rxic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001834 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001835 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1836 gfar_write(baddr + i, 0);
1837 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1838 }
1839 }
1840 }
1841}
1842
1843static int register_grp_irqs(struct gfar_priv_grp *grp)
1844{
1845 struct gfar_private *priv = grp->priv;
1846 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001847 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 /* If the device has multiple interrupts, register for
1850 * them. Otherwise, only register for the one */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001851 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001852 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 * Transmit, and Receive */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001854 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1855 grp->int_name_er,grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001856 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001857 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1858 dev->name, grp->interruptError);
1859
Julia Lawall2145f1a2010-08-05 10:26:20 +00001860 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 }
1862
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001863 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1864 0, grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001865 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001866 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1867 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 goto tx_irq_fail;
1869 }
1870
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001871 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1872 grp->int_name_rx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001873 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001874 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1875 dev->name, grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 goto rx_irq_fail;
1877 }
1878 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001879 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1880 grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001881 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001882 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1883 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 goto err_irq_fail;
1885 }
1886 }
1887
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001888 return 0;
1889
1890rx_irq_fail:
1891 free_irq(grp->interruptTransmit, grp);
1892tx_irq_fail:
1893 free_irq(grp->interruptError, grp);
1894err_irq_fail:
1895 return err;
1896
1897}
1898
1899/* Bring the controller up and running */
1900int startup_gfar(struct net_device *ndev)
1901{
1902 struct gfar_private *priv = netdev_priv(ndev);
1903 struct gfar __iomem *regs = NULL;
1904 int err, i, j;
1905
1906 for (i = 0; i < priv->num_grps; i++) {
1907 regs= priv->gfargrp[i].regs;
1908 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1909 }
1910
1911 regs= priv->gfargrp[0].regs;
1912 err = gfar_alloc_skb_resources(ndev);
1913 if (err)
1914 return err;
1915
1916 gfar_init_mac(ndev);
1917
1918 for (i = 0; i < priv->num_grps; i++) {
1919 err = register_grp_irqs(&priv->gfargrp[i]);
1920 if (err) {
1921 for (j = 0; j < i; j++)
1922 free_grp_irqs(&priv->gfargrp[j]);
Anton Vorontsovff760152011-01-18 02:36:02 +00001923 goto irq_fail;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001924 }
1925 }
1926
Andy Fleming7f7f5312005-11-11 12:38:59 -06001927 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001928 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001930 phy_start(priv->phydev);
1931
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001932 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 return 0;
1935
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001936irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001937 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 return err;
1939}
1940
1941/* Called when something needs to use the ethernet device */
1942/* Returns 0 for success. */
1943static int gfar_enet_open(struct net_device *dev)
1944{
Li Yang94e8cc32007-10-12 21:53:51 +08001945 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 int err;
1947
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001948 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001949
Andy Fleming0fd56bb2009-02-04 16:43:16 -08001950 skb_queue_head_init(&priv->rx_recycle);
1951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 /* Initialize a bunch of registers */
1953 init_registers(dev);
1954
1955 gfar_set_mac_address(dev);
1956
1957 err = init_phy(dev);
1958
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001959 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001960 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001965 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001966 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001967 return err;
1968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001970 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001972 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 return err;
1975}
1976
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001977static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001978{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001979 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001980
1981 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001982
Kumar Gala0bbaf062005-06-20 10:54:21 -05001983 return fcb;
1984}
1985
1986static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1987{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001988 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001989
1990 /* If we're here, it's a IP packet with a TCP or UDP
1991 * payload. We set it to checksum, using a pseudo-header
1992 * we provide
1993 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001994 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001995
Andy Fleming7f7f5312005-11-11 12:38:59 -06001996 /* Tell the controller what the protocol is */
1997 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001998 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001999 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03002000 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002001 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05002002 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002003
2004 /* l3os is the distance between the start of the
2005 * frame (skb->data) and the start of the IP hdr.
2006 * l4os is the distance between the start of the
2007 * l3 hdr and the l4 hdr */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002008 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002009 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002010
Andy Fleming7f7f5312005-11-11 12:38:59 -06002011 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002012}
2013
Andy Fleming7f7f5312005-11-11 12:38:59 -06002014void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002015{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002016 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002017 fcb->vlctl = vlan_tx_tag_get(skb);
2018}
2019
Dai Haruki4669bc92008-12-17 16:51:04 -08002020static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2021 struct txbd8 *base, int ring_size)
2022{
2023 struct txbd8 *new_bd = bdp + stride;
2024
2025 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2026}
2027
2028static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2029 int ring_size)
2030{
2031 return skip_txbd(bdp, 1, base, ring_size);
2032}
2033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034/* This is called by the kernel when a frame is ready for transmission. */
2035/* It is pointed to by the dev->hard_start_xmit function pointer */
2036static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2037{
2038 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002039 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002040 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002041 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002042 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002043 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002044 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002045 int i, rq = 0, do_tstamp = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002046 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05002047 unsigned long flags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002048 unsigned int nr_frags, nr_txbds, length;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002049
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00002050 /*
2051 * TOE=1 frames larger than 2500 bytes may see excess delays
2052 * before start of transmission.
2053 */
2054 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2055 skb->ip_summed == CHECKSUM_PARTIAL &&
2056 skb->len > 2500)) {
2057 int ret;
2058
2059 ret = skb_checksum_help(skb);
2060 if (ret)
2061 return ret;
2062 }
2063
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002064 rq = skb->queue_mapping;
2065 tx_queue = priv->tx_queue[rq];
2066 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002067 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002068 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002069
2070 /* check if time stamp should be generated */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002071 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2072 priv->hwts_tx_en))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002073 do_tstamp = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -08002074
Li Yang5b28bea2009-03-27 15:54:30 -07002075 /* make space for additional header when fcb is needed */
2076 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
Jesse Grosseab6d182010-10-20 13:56:03 +00002077 vlan_tx_tag_present(skb) ||
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002078 unlikely(do_tstamp)) &&
Li Yang5b28bea2009-03-27 15:54:30 -07002079 (skb_headroom(skb) < GMAC_FCB_LEN)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002080 struct sk_buff *skb_new;
2081
2082 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
2083 if (!skb_new) {
2084 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07002085 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002086 return NETDEV_TX_OK;
2087 }
2088 kfree_skb(skb);
2089 skb = skb_new;
2090 }
2091
Dai Haruki4669bc92008-12-17 16:51:04 -08002092 /* total number of fragments in the SKB */
2093 nr_frags = skb_shinfo(skb)->nr_frags;
2094
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002095 /* calculate the required number of TxBDs for this skb */
2096 if (unlikely(do_tstamp))
2097 nr_txbds = nr_frags + 2;
2098 else
2099 nr_txbds = nr_frags + 1;
2100
Dai Haruki4669bc92008-12-17 16:51:04 -08002101 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002102 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002103 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002104 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002105 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002106 return NETDEV_TX_BUSY;
2107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109 /* Update transmit stats */
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002110 tx_queue->stats.tx_bytes += skb->len;
2111 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002113 txbdp = txbdp_start = tx_queue->cur_tx;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002114 lstatus = txbdp->lstatus;
2115
2116 /* Time stamp insertion requires one additional TxBD */
2117 if (unlikely(do_tstamp))
2118 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2119 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
Dai Haruki4669bc92008-12-17 16:51:04 -08002121 if (nr_frags == 0) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002122 if (unlikely(do_tstamp))
2123 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2124 TXBD_INTERRUPT);
2125 else
2126 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Dai Haruki4669bc92008-12-17 16:51:04 -08002127 } else {
2128 /* Place the fragment addresses and lengths into the TxBDs */
2129 for (i = 0; i < nr_frags; i++) {
2130 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002131 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Dai Haruki4669bc92008-12-17 16:51:04 -08002133 length = skb_shinfo(skb)->frags[i].size;
2134
2135 lstatus = txbdp->lstatus | length |
2136 BD_LFLAG(TXBD_READY);
2137
2138 /* Handle the last BD specially */
2139 if (i == nr_frags - 1)
2140 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2141
Kumar Gala48268572009-03-18 23:28:22 -07002142 bufaddr = dma_map_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002143 skb_shinfo(skb)->frags[i].page,
2144 skb_shinfo(skb)->frags[i].page_offset,
2145 length,
2146 DMA_TO_DEVICE);
2147
2148 /* set the TxBD length and buffer pointer */
2149 txbdp->bufPtr = bufaddr;
2150 txbdp->lstatus = lstatus;
2151 }
2152
2153 lstatus = txbdp_start->lstatus;
2154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
Kumar Gala0bbaf062005-06-20 10:54:21 -05002156 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08002157 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002158 fcb = gfar_add_fcb(skb);
2159 lstatus |= BD_LFLAG(TXBD_TOE);
2160 gfar_tx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002161 }
2162
Jesse Grosseab6d182010-10-20 13:56:03 +00002163 if (vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002164 if (unlikely(NULL == fcb)) {
2165 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08002166 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002167 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002168
2169 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002170 }
2171
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002172 /* Setup tx hardware time stamping if requested */
2173 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002174 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002175 if (fcb == NULL)
2176 fcb = gfar_add_fcb(skb);
2177 fcb->ptp = 1;
2178 lstatus |= BD_LFLAG(TXBD_TOE);
2179 }
2180
Kumar Gala48268572009-03-18 23:28:22 -07002181 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Dai Haruki4669bc92008-12-17 16:51:04 -08002182 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002184 /*
2185 * If time stamping is requested one additional TxBD must be set up. The
2186 * first TxBD points to the FCB and must have a data length of
2187 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2188 * the full frame length.
2189 */
2190 if (unlikely(do_tstamp)) {
2191 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2192 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2193 (skb_headlen(skb) - GMAC_FCB_LEN);
2194 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2195 } else {
2196 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
Dai Haruki4669bc92008-12-17 16:51:04 -08002199 /*
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002200 * We can work in parallel with gfar_clean_tx_ring(), except
2201 * when modifying num_txbdfree. Note that we didn't grab the lock
2202 * when we were reading the num_txbdfree and checking for available
2203 * space, that's because outside of this function it can only grow,
2204 * and once we've got needed space, it cannot suddenly disappear.
2205 *
2206 * The lock also protects us from gfar_error(), which can modify
2207 * regs->tstat and thus retrigger the transfers, which is why we
2208 * also must grab the lock before setting ready bit for the first
2209 * to be transmitted BD.
2210 */
2211 spin_lock_irqsave(&tx_queue->txlock, flags);
2212
2213 /*
Dai Haruki4669bc92008-12-17 16:51:04 -08002214 * The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002215 * semantics (it requires synchronization between cacheable and
2216 * uncacheable mappings, which eieio doesn't provide and which we
2217 * don't need), thus requiring a more expensive sync instruction. At
2218 * some point, the set of architecture-independent barrier functions
2219 * should be expanded to include weaker barriers.
2220 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002221 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002222
Dai Haruki4669bc92008-12-17 16:51:04 -08002223 txbdp_start->lstatus = lstatus;
2224
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002225 eieio(); /* force lstatus write before tx_skbuff */
2226
2227 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2228
Dai Haruki4669bc92008-12-17 16:51:04 -08002229 /* Update the current skb pointer to the next entry we will use
2230 * (wrapping if necessary) */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002231 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2232 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002233
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002234 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002235
2236 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002237 tx_queue->num_txbdfree -= (nr_txbds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
2239 /* If the next BD still needs to be cleaned up, then the bds
2240 are full. We need to tell the kernel to stop sending us stuff. */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002241 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002242 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002244 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 }
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002248 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
2250 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002251 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002253 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254}
2255
2256/* Stops the kernel queue, and halts the controller */
2257static int gfar_close(struct net_device *dev)
2258{
2259 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002260
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002261 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002262
Sebastian Siewiorab939902008-08-19 21:12:45 +02002263 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 stop_gfar(dev);
2265
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002266 /* Disconnect from the PHY */
2267 phy_disconnect(priv->phydev);
2268 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002270 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
2272 return 0;
2273}
2274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002276static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002278 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
2280 return 0;
2281}
2282
2283
Kumar Gala0bbaf062005-06-20 10:54:21 -05002284/* Enables and disables VLAN insertion/extraction */
2285static void gfar_vlan_rx_register(struct net_device *dev,
2286 struct vlan_group *grp)
2287{
2288 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002289 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002290 unsigned long flags;
2291 u32 tempval;
2292
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002293 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002294 local_irq_save(flags);
2295 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002296
Anton Vorontsovcd1f55a2009-01-26 14:33:23 -08002297 priv->vlgrp = grp;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002298
2299 if (grp) {
2300 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002301 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002302 tempval |= TCTRL_VLINS;
2303
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002304 gfar_write(&regs->tctrl, tempval);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002305
Kumar Gala0bbaf062005-06-20 10:54:21 -05002306 /* Enable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002307 tempval = gfar_read(&regs->rctrl);
Dai Haruki77ecaf22008-12-16 15:30:48 -08002308 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002309 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002310 } else {
2311 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002312 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002313 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002314 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002315
2316 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002317 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002318 tempval &= ~RCTRL_VLEX;
Dai Haruki77ecaf22008-12-16 15:30:48 -08002319 /* If parse is no longer required, then disable parser */
2320 if (tempval & RCTRL_REQ_PARSER)
2321 tempval |= RCTRL_PRSDEP_INIT;
2322 else
2323 tempval &= ~RCTRL_PRSDEP_INIT;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002324 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002325 }
2326
Dai Haruki77ecaf22008-12-16 15:30:48 -08002327 gfar_change_mtu(dev, dev->mtu);
2328
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002329 unlock_rx_qs(priv);
2330 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002331}
2332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2334{
2335 int tempsize, tempval;
2336 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002337 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002339 int frame_size = new_mtu + ETH_HLEN;
2340
Dai Haruki77ecaf22008-12-16 15:30:48 -08002341 if (priv->vlgrp)
Dai Harukifaa89572008-03-24 10:53:26 -05002342 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002345 if (netif_msg_drv(priv))
2346 printk(KERN_ERR "%s: Invalid MTU setting\n",
2347 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 return -EINVAL;
2349 }
2350
Dai Haruki77ecaf22008-12-16 15:30:48 -08002351 if (gfar_uses_fcb(priv))
2352 frame_size += GMAC_FCB_LEN;
2353
2354 frame_size += priv->padding;
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 tempsize =
2357 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2358 INCREMENTAL_BUFFER_SIZE;
2359
2360 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06002361 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2363 stop_gfar(dev);
2364
2365 priv->rx_buffer_size = tempsize;
2366
2367 dev->mtu = new_mtu;
2368
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002369 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2370 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
2372 /* If the mtu is larger than the max size for standard
2373 * ethernet frames (ie, a jumbo frame), then set maccfg2
2374 * to allow huge frames, and to check the length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002375 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Anton Vorontsov7d350972010-06-30 06:39:12 +00002377 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2378 gfar_has_errata(priv, GFAR_ERRATA_74))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2380 else
2381 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2382
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002383 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
2385 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2386 startup_gfar(dev);
2387
2388 return 0;
2389}
2390
Sebastian Siewiorab939902008-08-19 21:12:45 +02002391/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 * transmitted after a set amount of time.
2393 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002394 * starting over will fix the problem.
2395 */
2396static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002398 struct gfar_private *priv = container_of(work, struct gfar_private,
2399 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002400 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
2402 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002403 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 stop_gfar(dev);
2405 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002406 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 }
2408
David S. Miller263ba322008-07-15 03:47:41 -07002409 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410}
2411
Sebastian Siewiorab939902008-08-19 21:12:45 +02002412static void gfar_timeout(struct net_device *dev)
2413{
2414 struct gfar_private *priv = netdev_priv(dev);
2415
2416 dev->stats.tx_errors++;
2417 schedule_work(&priv->reset_task);
2418}
2419
Eran Libertyacbc0f02010-07-07 15:54:54 -07002420static void gfar_align_skb(struct sk_buff *skb)
2421{
2422 /* We need the data buffer to be aligned properly. We will reserve
2423 * as many bytes as needed to align the data properly
2424 */
2425 skb_reserve(skb, RXBUF_ALIGNMENT -
2426 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2427}
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002430static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002432 struct net_device *dev = tx_queue->dev;
Dai Harukid080cd62008-04-09 19:37:51 -05002433 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002434 struct gfar_priv_rx_q *rx_queue = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002435 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002436 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002437 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002438 struct sk_buff *skb;
2439 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002440 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002441 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002442 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002443 int howmany = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002444 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002445 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002447 rx_queue = priv->rx_queue[tx_queue->qindex];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002448 bdp = tx_queue->dirty_tx;
2449 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002450
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002451 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002452 unsigned long flags;
2453
Dai Haruki4669bc92008-12-17 16:51:04 -08002454 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002455
2456 /*
2457 * When time stamping, one additional TxBD must be freed.
2458 * Also, we need to dma_unmap_single() the TxPAL.
2459 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002460 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002461 nr_txbds = frags + 2;
2462 else
2463 nr_txbds = frags + 1;
2464
2465 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002466
2467 lstatus = lbdp->lstatus;
2468
2469 /* Only clean completed frames */
2470 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2471 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 break;
2473
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002474 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002475 next = next_txbd(bdp, base, tx_ring_size);
2476 buflen = next->length + GMAC_FCB_LEN;
2477 } else
2478 buflen = bdp->length;
2479
2480 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2481 buflen, DMA_TO_DEVICE);
2482
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002483 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002484 struct skb_shared_hwtstamps shhwtstamps;
2485 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2486 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2487 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2488 skb_tstamp_tx(skb, &shhwtstamps);
2489 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2490 bdp = next;
2491 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002492
2493 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2494 bdp = next_txbd(bdp, base, tx_ring_size);
2495
2496 for (i = 0; i < frags; i++) {
Kumar Gala48268572009-03-18 23:28:22 -07002497 dma_unmap_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002498 bdp->bufPtr,
2499 bdp->length,
2500 DMA_TO_DEVICE);
2501 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2502 bdp = next_txbd(bdp, base, tx_ring_size);
2503 }
2504
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002505 /*
2506 * If there's room in the queue (limit it to rx_buffer_size)
2507 * we add this skb back into the pool, if it's the right size
2508 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002509 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002510 skb_recycle_check(skb, priv->rx_buffer_size +
Eran Libertyacbc0f02010-07-07 15:54:54 -07002511 RXBUF_ALIGNMENT)) {
2512 gfar_align_skb(skb);
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002513 skb_queue_head(&priv->rx_recycle, skb);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002514 } else
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002515 dev_kfree_skb_any(skb);
2516
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002517 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002518
2519 skb_dirtytx = (skb_dirtytx + 1) &
2520 TX_RING_MOD_MASK(tx_ring_size);
2521
Dai Harukid080cd62008-04-09 19:37:51 -05002522 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002523 spin_lock_irqsave(&tx_queue->txlock, flags);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002524 tx_queue->num_txbdfree += nr_txbds;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002525 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
Dai Haruki4669bc92008-12-17 16:51:04 -08002528 /* If we freed a buffer, we can restart transmission, if necessary */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002529 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2530 netif_wake_subqueue(dev, tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531
Dai Haruki4669bc92008-12-17 16:51:04 -08002532 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002533 tx_queue->skb_dirtytx = skb_dirtytx;
2534 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
Dai Harukid080cd62008-04-09 19:37:51 -05002536 return howmany;
2537}
2538
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002539static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002540{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002541 unsigned long flags;
2542
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002543 spin_lock_irqsave(&gfargrp->grplock, flags);
2544 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002545 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002546 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002547 } else {
2548 /*
2549 * Clear IEVENT, so interrupts aren't called again
2550 * because of the packets that have already arrived.
2551 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002552 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002553 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002554 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002555
Dai Haruki8c7396a2008-12-17 16:52:00 -08002556}
2557
Dai Harukid080cd62008-04-09 19:37:51 -05002558/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002559static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002560{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002561 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 return IRQ_HANDLED;
2563}
2564
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002565static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -05002566 struct sk_buff *skb)
2567{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002568 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002569 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002570 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002571
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002572 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2573 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002574 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002575}
2576
Eran Libertyacbc0f02010-07-07 15:54:54 -07002577static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2578{
2579 struct gfar_private *priv = netdev_priv(dev);
2580 struct sk_buff *skb = NULL;
2581
2582 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2583 if (!skb)
2584 return NULL;
2585
2586 gfar_align_skb(skb);
2587
2588 return skb;
2589}
Andy Fleming815b97c2008-04-22 17:18:29 -05002590
2591struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592{
2593 struct gfar_private *priv = netdev_priv(dev);
2594 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002596 skb = skb_dequeue(&priv->rx_recycle);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002597 if (!skb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002598 skb = gfar_alloc_skb(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 return skb;
2601}
2602
Li Yang298e1a92007-10-16 14:18:13 +08002603static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604{
Li Yang298e1a92007-10-16 14:18:13 +08002605 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002606 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 struct gfar_extra_stats *estats = &priv->extra_stats;
2608
2609 /* If the packet was truncated, none of the other errors
2610 * matter */
2611 if (status & RXBD_TRUNCATED) {
2612 stats->rx_length_errors++;
2613
2614 estats->rx_trunc++;
2615
2616 return;
2617 }
2618 /* Count the errors, if there were any */
2619 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2620 stats->rx_length_errors++;
2621
2622 if (status & RXBD_LARGE)
2623 estats->rx_large++;
2624 else
2625 estats->rx_short++;
2626 }
2627 if (status & RXBD_NONOCTET) {
2628 stats->rx_frame_errors++;
2629 estats->rx_nonoctet++;
2630 }
2631 if (status & RXBD_CRCERR) {
2632 estats->rx_crcerr++;
2633 stats->rx_crc_errors++;
2634 }
2635 if (status & RXBD_OVERRUN) {
2636 estats->rx_overrun++;
2637 stats->rx_crc_errors++;
2638 }
2639}
2640
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002641irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002643 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 return IRQ_HANDLED;
2645}
2646
Kumar Gala0bbaf062005-06-20 10:54:21 -05002647static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2648{
2649 /* If valid headers were found, and valid sums
2650 * were verified, then we tell the kernel that no
2651 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002652 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002653 skb->ip_summed = CHECKSUM_UNNECESSARY;
2654 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002655 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002656}
2657
2658
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659/* gfar_process_frame() -- handle one incoming packet if skb
2660 * isn't NULL. */
2661static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Dai Haruki2c2db482008-12-16 15:31:15 -08002662 int amount_pull)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663{
2664 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002665 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666
Dai Haruki2c2db482008-12-16 15:31:15 -08002667 int ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002668
Dai Haruki2c2db482008-12-16 15:31:15 -08002669 /* fcb is at the beginning if exists */
2670 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
Dai Haruki2c2db482008-12-16 15:31:15 -08002672 /* Remove the FCB from the skb */
2673 /* Remove the padded bytes, if there are any */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002674 if (amount_pull) {
2675 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002676 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002677 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002678
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002679 /* Get receive timestamp from the skb */
2680 if (priv->hwts_rx_en) {
2681 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2682 u64 *ns = (u64 *) skb->data;
2683 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2684 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2685 }
2686
2687 if (priv->padding)
2688 skb_pull(skb, priv->padding);
2689
Dai Haruki2c2db482008-12-16 15:31:15 -08002690 if (priv->rx_csum_enable)
2691 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002692
Dai Haruki2c2db482008-12-16 15:31:15 -08002693 /* Tell the skb what kind of packet this is */
2694 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002695
Dai Haruki2c2db482008-12-16 15:31:15 -08002696 /* Send the packet up the stack */
2697 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2698 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2699 else
2700 ret = netif_receive_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
Dai Haruki2c2db482008-12-16 15:31:15 -08002702 if (NET_RX_DROP == ret)
2703 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
2705 return 0;
2706}
2707
2708/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05002709 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 * of frames handled
2711 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002712int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002714 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002715 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002717 int pkt_len;
2718 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 int howmany = 0;
2720 struct gfar_private *priv = netdev_priv(dev);
2721
2722 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002723 bdp = rx_queue->cur_rx;
2724 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002726 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
Dai Haruki2c2db482008-12-16 15:31:15 -08002727
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002729 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05002730 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002731
2732 /* Add another skb for the future */
2733 newskb = gfar_new_skb(dev);
2734
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002735 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
Kumar Gala48268572009-03-18 23:28:22 -07002737 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Andy Fleming81183052008-11-12 10:07:11 -06002738 priv->rx_buffer_size, DMA_FROM_DEVICE);
2739
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002740 if (unlikely(!(bdp->status & RXBD_ERR) &&
2741 bdp->length > priv->rx_buffer_size))
2742 bdp->status = RXBD_LARGE;
2743
Andy Fleming815b97c2008-04-22 17:18:29 -05002744 /* We drop the frame if we failed to allocate a new buffer */
2745 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2746 bdp->status & RXBD_ERR)) {
2747 count_errors(bdp->status, dev);
2748
2749 if (unlikely(!newskb))
2750 newskb = skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002751 else if (skb)
Jarek Poplawskicd0ea242010-10-19 00:06:36 +00002752 skb_queue_head(&priv->rx_recycle, skb);
Andy Fleming815b97c2008-04-22 17:18:29 -05002753 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002755 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 howmany++;
2757
Dai Haruki2c2db482008-12-16 15:31:15 -08002758 if (likely(skb)) {
2759 pkt_len = bdp->length - ETH_FCS_LEN;
2760 /* Remove the FCS from the packet length */
2761 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002762 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002763 skb_record_rx_queue(skb, rx_queue->qindex);
Dai Haruki2c2db482008-12-16 15:31:15 -08002764 gfar_process_frame(dev, skb, amount_pull);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
Dai Haruki2c2db482008-12-16 15:31:15 -08002766 } else {
2767 if (netif_msg_rx_err(priv))
2768 printk(KERN_WARNING
2769 "%s: Missing skb!\n", dev->name);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002770 rx_queue->stats.rx_dropped++;
Dai Haruki2c2db482008-12-16 15:31:15 -08002771 priv->extra_stats.rx_skbmissing++;
2772 }
2773
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 }
2775
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002776 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
Andy Fleming815b97c2008-04-22 17:18:29 -05002778 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002779 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780
2781 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002782 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
2784 /* update to point at the next skb */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002785 rx_queue->skb_currx =
2786 (rx_queue->skb_currx + 1) &
2787 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 }
2789
2790 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002791 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 return howmany;
2794}
2795
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002796static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002798 struct gfar_priv_grp *gfargrp = container_of(napi,
2799 struct gfar_priv_grp, napi);
2800 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002801 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002802 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002803 struct gfar_priv_rx_q *rx_queue = NULL;
2804 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002805 int tx_cleaned = 0, i, left_over_budget = budget;
2806 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002807 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002808
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002809 num_queues = gfargrp->num_rx_queues;
2810 budget_per_queue = budget/num_queues;
2811
Dai Haruki8c7396a2008-12-17 16:52:00 -08002812 /* Clear IEVENT, so interrupts aren't called again
2813 * because of the packets that have already arrived */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002814 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002815
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002816 while (num_queues && left_over_budget) {
2817
2818 budget_per_queue = left_over_budget/num_queues;
2819 left_over_budget = 0;
2820
Akinobu Mita984b3f52010-03-05 13:41:37 -08002821 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002822 if (test_bit(i, &serviced_queues))
2823 continue;
2824 rx_queue = priv->rx_queue[i];
2825 tx_queue = priv->tx_queue[rx_queue->qindex];
2826
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002827 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002828 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2829 budget_per_queue);
2830 rx_cleaned += rx_cleaned_per_queue;
2831 if(rx_cleaned_per_queue < budget_per_queue) {
2832 left_over_budget = left_over_budget +
2833 (budget_per_queue - rx_cleaned_per_queue);
2834 set_bit(i, &serviced_queues);
2835 num_queues--;
2836 }
2837 }
Dai Harukid080cd62008-04-09 19:37:51 -05002838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
Andy Fleming42199882008-12-17 16:52:30 -08002840 if (tx_cleaned)
2841 return budget;
2842
2843 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002844 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002847 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002849 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850
2851 /* If we are coalescing interrupts, update the timer */
2852 /* Otherwise, clear it */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002853 gfar_configure_coalescing(priv,
2854 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 }
2856
Andy Fleming42199882008-12-17 16:52:30 -08002857 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002860#ifdef CONFIG_NET_POLL_CONTROLLER
2861/*
2862 * Polling 'interrupt' - used by things like netconsole to send skbs
2863 * without having to re-enable interrupts. It's not called while
2864 * the interrupt routine is executing.
2865 */
2866static void gfar_netpoll(struct net_device *dev)
2867{
2868 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002869 int i = 0;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002870
2871 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002872 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002873 for (i = 0; i < priv->num_grps; i++) {
2874 disable_irq(priv->gfargrp[i].interruptTransmit);
2875 disable_irq(priv->gfargrp[i].interruptReceive);
2876 disable_irq(priv->gfargrp[i].interruptError);
2877 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2878 &priv->gfargrp[i]);
2879 enable_irq(priv->gfargrp[i].interruptError);
2880 enable_irq(priv->gfargrp[i].interruptReceive);
2881 enable_irq(priv->gfargrp[i].interruptTransmit);
2882 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002883 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002884 for (i = 0; i < priv->num_grps; i++) {
2885 disable_irq(priv->gfargrp[i].interruptTransmit);
2886 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2887 &priv->gfargrp[i]);
2888 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002889 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002890 }
2891}
2892#endif
2893
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002895static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002897 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898
2899 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002900 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002903 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002904 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
2906 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002907 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002908 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002910 /* Check for errors */
2911 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002912 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
2914 return IRQ_HANDLED;
2915}
2916
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917/* Called every time the controller might need to be made
2918 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002919 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 * function converts those variables into the appropriate
2921 * register values, and can bring down the device if needed.
2922 */
2923static void adjust_link(struct net_device *dev)
2924{
2925 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002926 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002927 unsigned long flags;
2928 struct phy_device *phydev = priv->phydev;
2929 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002931 local_irq_save(flags);
2932 lock_tx_qs(priv);
2933
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002934 if (phydev->link) {
2935 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002936 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002937
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 /* Now we make sure that we can be in full duplex mode.
2939 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002940 if (phydev->duplex != priv->oldduplex) {
2941 new_state = 1;
2942 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002944 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002947 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 }
2949
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002950 if (phydev->speed != priv->oldspeed) {
2951 new_state = 1;
2952 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 tempval =
2955 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002956
2957 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 break;
2959 case 100:
2960 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 tempval =
2962 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002963
2964 /* Reduced mode distinguishes
2965 * between 10 and 100 */
2966 if (phydev->speed == SPEED_100)
2967 ecntrl |= ECNTRL_R100;
2968 else
2969 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 break;
2971 default:
Kumar Gala0bbaf062005-06-20 10:54:21 -05002972 if (netif_msg_link(priv))
2973 printk(KERN_WARNING
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002974 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2975 dev->name, phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 break;
2977 }
2978
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002979 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 }
2981
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002982 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002983 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002984
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002986 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002989 } else if (priv->oldlink) {
2990 new_state = 1;
2991 priv->oldlink = 0;
2992 priv->oldspeed = 0;
2993 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002996 if (new_state && netif_msg_link(priv))
2997 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002998 unlock_tx_qs(priv);
2999 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
3002/* Update the hash table based on the current list of multicast
3003 * addresses we subscribe to. Also, change the promiscuity of
3004 * the device based on the flags (this function is called
3005 * whenever dev->flags is changed */
3006static void gfar_set_multi(struct net_device *dev)
3007{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003008 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003010 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 u32 tempval;
3012
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003013 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 /* Set RCTRL to PROM */
3015 tempval = gfar_read(&regs->rctrl);
3016 tempval |= RCTRL_PROM;
3017 gfar_write(&regs->rctrl, tempval);
3018 } else {
3019 /* Set RCTRL to not PROM */
3020 tempval = gfar_read(&regs->rctrl);
3021 tempval &= ~(RCTRL_PROM);
3022 gfar_write(&regs->rctrl, tempval);
3023 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003024
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003025 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003027 gfar_write(&regs->igaddr0, 0xffffffff);
3028 gfar_write(&regs->igaddr1, 0xffffffff);
3029 gfar_write(&regs->igaddr2, 0xffffffff);
3030 gfar_write(&regs->igaddr3, 0xffffffff);
3031 gfar_write(&regs->igaddr4, 0xffffffff);
3032 gfar_write(&regs->igaddr5, 0xffffffff);
3033 gfar_write(&regs->igaddr6, 0xffffffff);
3034 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 gfar_write(&regs->gaddr0, 0xffffffff);
3036 gfar_write(&regs->gaddr1, 0xffffffff);
3037 gfar_write(&regs->gaddr2, 0xffffffff);
3038 gfar_write(&regs->gaddr3, 0xffffffff);
3039 gfar_write(&regs->gaddr4, 0xffffffff);
3040 gfar_write(&regs->gaddr5, 0xffffffff);
3041 gfar_write(&regs->gaddr6, 0xffffffff);
3042 gfar_write(&regs->gaddr7, 0xffffffff);
3043 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003044 int em_num;
3045 int idx;
3046
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003048 gfar_write(&regs->igaddr0, 0x0);
3049 gfar_write(&regs->igaddr1, 0x0);
3050 gfar_write(&regs->igaddr2, 0x0);
3051 gfar_write(&regs->igaddr3, 0x0);
3052 gfar_write(&regs->igaddr4, 0x0);
3053 gfar_write(&regs->igaddr5, 0x0);
3054 gfar_write(&regs->igaddr6, 0x0);
3055 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 gfar_write(&regs->gaddr0, 0x0);
3057 gfar_write(&regs->gaddr1, 0x0);
3058 gfar_write(&regs->gaddr2, 0x0);
3059 gfar_write(&regs->gaddr3, 0x0);
3060 gfar_write(&regs->gaddr4, 0x0);
3061 gfar_write(&regs->gaddr5, 0x0);
3062 gfar_write(&regs->gaddr6, 0x0);
3063 gfar_write(&regs->gaddr7, 0x0);
3064
Andy Fleming7f7f5312005-11-11 12:38:59 -06003065 /* If we have extended hash tables, we need to
3066 * clear the exact match registers to prepare for
3067 * setting them */
3068 if (priv->extended_hash) {
3069 em_num = GFAR_EM_NUM + 1;
3070 gfar_clear_exact_match(dev);
3071 idx = 1;
3072 } else {
3073 idx = 0;
3074 em_num = 0;
3075 }
3076
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003077 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 return;
3079
3080 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003081 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003082 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003083 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003084 idx++;
3085 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003086 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087 }
3088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089}
3090
Andy Fleming7f7f5312005-11-11 12:38:59 -06003091
3092/* Clears each of the exact match registers to zero, so they
3093 * don't interfere with normal reception */
3094static void gfar_clear_exact_match(struct net_device *dev)
3095{
3096 int idx;
Joe Perchesb6bc7652010-12-21 02:16:08 -08003097 static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003098
3099 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003100 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003101}
3102
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103/* Set the appropriate hash bit for the given addr */
3104/* The algorithm works like so:
3105 * 1) Take the Destination Address (ie the multicast address), and
3106 * do a CRC on it (little endian), and reverse the bits of the
3107 * result.
3108 * 2) Use the 8 most significant bits as a hash into a 256-entry
3109 * table. The table is controlled through 8 32-bit registers:
3110 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3111 * gaddr7. This means that the 3 most significant bits in the
3112 * hash index which gaddr register to use, and the 5 other bits
3113 * indicate which bit (assuming an IBM numbering scheme, which
3114 * for PowerPC (tm) is usually the case) in the register holds
3115 * the entry. */
3116static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3117{
3118 u32 tempval;
3119 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 u32 result = ether_crc(MAC_ADDR_LEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003121 int width = priv->hash_width;
3122 u8 whichbit = (result >> (32 - width)) & 0x1f;
3123 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 u32 value = (1 << (31-whichbit));
3125
Kumar Gala0bbaf062005-06-20 10:54:21 -05003126 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003128 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129}
3130
Andy Fleming7f7f5312005-11-11 12:38:59 -06003131
3132/* There are multiple MAC Address register pairs on some controllers
3133 * This function sets the numth pair to a given address
3134 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003135static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3136 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003137{
3138 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003139 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003140 int idx;
3141 char tmpbuf[MAC_ADDR_LEN];
3142 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003143 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003144
3145 macptr += num*2;
3146
3147 /* Now copy it into the mac registers backwards, cuz */
3148 /* little endian is silly */
3149 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
3150 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
3151
3152 gfar_write(macptr, *((u32 *) (tmpbuf)));
3153
3154 tempval = *((u32 *) (tmpbuf + 4));
3155
3156 gfar_write(macptr+1, tempval);
3157}
3158
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003160static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003162 struct gfar_priv_grp *gfargrp = grp_id;
3163 struct gfar __iomem *regs = gfargrp->regs;
3164 struct gfar_private *priv= gfargrp->priv;
3165 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166
3167 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003168 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169
3170 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003171 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003172
3173 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003174 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003175 (events & IEVENT_MAG))
3176 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177
3178 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003179 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3180 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003181 dev->name, events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
3183 /* Update the error counters */
3184 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003185 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186
3187 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003188 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003190 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003192 unsigned long flags;
3193
Kumar Gala0bbaf062005-06-20 10:54:21 -05003194 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003195 printk(KERN_DEBUG "%s: TX FIFO underrun, "
3196 "packet dropped.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003197 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 priv->extra_stats.tx_underrun++;
3199
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003200 local_irq_save(flags);
3201 lock_tx_qs(priv);
3202
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003204 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003205
3206 unlock_tx_qs(priv);
3207 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05003209 if (netif_msg_tx_err(priv))
3210 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 }
3212 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003213 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 priv->extra_stats.rx_bsy++;
3215
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003216 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217
Kumar Gala0bbaf062005-06-20 10:54:21 -05003218 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003219 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003220 dev->name, gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 }
3222 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003223 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 priv->extra_stats.rx_babr++;
3225
Kumar Gala0bbaf062005-06-20 10:54:21 -05003226 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003227 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 }
3229 if (events & IEVENT_EBERR) {
3230 priv->extra_stats.eberr++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003231 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003232 printk(KERN_DEBUG "%s: bus error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05003234 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003235 printk(KERN_DEBUG "%s: control frame\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
3237 if (events & IEVENT_BABT) {
3238 priv->extra_stats.tx_babt++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003239 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003240 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 }
3242 return IRQ_HANDLED;
3243}
3244
Andy Flemingb31a1d82008-12-16 15:29:15 -08003245static struct of_device_id gfar_match[] =
3246{
3247 {
3248 .type = "network",
3249 .compatible = "gianfar",
3250 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003251 {
3252 .compatible = "fsl,etsec2",
3253 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003254 {},
3255};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003256MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003257
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258/* Structure for a device driver */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003259static struct of_platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003260 .driver = {
3261 .name = "fsl-gianfar",
3262 .owner = THIS_MODULE,
3263 .pm = GFAR_PM_OPS,
3264 .of_match_table = gfar_match,
3265 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 .probe = gfar_probe,
3267 .remove = gfar_remove,
3268};
3269
3270static int __init gfar_init(void)
3271{
Andy Fleming1577ece2009-02-04 16:42:12 -08003272 return of_register_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273}
3274
3275static void __exit gfar_exit(void)
3276{
Andy Flemingb31a1d82008-12-16 15:29:15 -08003277 of_unregister_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278}
3279
3280module_init(gfar_init);
3281module_exit(gfar_exit);
3282