blob: 11d8cae415e543d79376636fa29ad0f88debf17c [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000011 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
26 *
27 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050028 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080029 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050034 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * last descriptor of the ring.
37 *
38 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050039 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040043 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020045 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050046 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
54 *
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
63 */
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/string.h>
67#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040068#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <linux/slab.h>
70#include <linux/interrupt.h>
71#include <linux/init.h>
72#include <linux/delay.h>
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050076#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/spinlock.h>
78#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000079#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080080#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050081#include <linux/ip.h>
82#include <linux/tcp.h>
83#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080084#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000085#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87#include <asm/io.h>
88#include <asm/irq.h>
89#include <asm/uaccess.h>
90#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#include <linux/dma-mapping.h>
92#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040093#include <linux/mii.h>
94#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080095#include <linux/phy_fixed.h>
96#include <linux/of.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98#include "gianfar.h"
Andy Fleming1577ece2009-02-04 16:42:12 -080099#include "fsl_pq_mdio.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#undef BRIEF_GFAR_ERRORS
103#undef VERBOSE_GFAR_ERRORS
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105const char gfar_driver_name[] = "Gianfar Ethernet";
Andy Fleming7f7f5312005-11-11 12:38:59 -0600106const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108static int gfar_enet_open(struct net_device *dev);
109static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200110static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static void gfar_timeout(struct net_device *dev);
112static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500113struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000114static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -0500115 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int gfar_set_mac_address(struct net_device *dev);
117static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100118static irqreturn_t gfar_error(int irq, void *dev_id);
119static irqreturn_t gfar_transmit(int irq, void *dev_id);
120static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static void adjust_link(struct net_device *dev);
122static void init_registers(struct net_device *dev);
123static int init_phy(struct net_device *dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800124static int gfar_probe(struct of_device *ofdev,
125 const struct of_device_id *match);
126static int gfar_remove(struct of_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400127static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128static void gfar_set_multi(struct net_device *dev);
129static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500130static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700131static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300132#ifdef CONFIG_NET_POLL_CONTROLLER
133static void gfar_netpoll(struct net_device *dev);
134#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000135int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
136static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800137static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
138 int amount_pull);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500139static void gfar_vlan_rx_register(struct net_device *netdev,
140 struct vlan_group *grp);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600141void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500142static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600143void gfar_start(struct net_device *dev);
144static void gfar_clear_exact_match(struct net_device *dev);
145static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000146static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150MODULE_LICENSE("GPL");
151
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000153 dma_addr_t buf)
154{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000155 u32 lstatus;
156
157 bdp->bufPtr = buf;
158
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163 eieio();
164
165 bdp->lstatus = lstatus;
166}
167
Anton Vorontsov87283272009-10-12 06:00:39 +0000168static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000169{
Anton Vorontsov87283272009-10-12 06:00:39 +0000170 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000173 struct txbd8 *txbdp;
174 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000175 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000176
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000185
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000192 }
193
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000194 /* Set the last descriptor in the ring to indicate wrap */
195 txbdp--;
196 txbdp->status |= TXBD_WRAP;
197 }
198
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
204
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208 if (skb) {
209 gfar_init_rxbdp(rx_queue, rxbdp,
210 rxbdp->bufPtr);
211 } else {
212 skb = gfar_new_skb(ndev);
213 if (!skb) {
214 pr_err("%s: Can't allocate RX buffers\n",
215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 }
222
223 rxbdp++;
224 }
225
Anton Vorontsov87283272009-10-12 06:00:39 +0000226 }
227
228 return 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
Anton Vorontsov87283272009-10-12 06:00:39 +0000233}
234
235static int gfar_alloc_skb_resources(struct net_device *ndev)
236{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000237 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000238 dma_addr_t addr;
239 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000240 struct gfar_private *priv = netdev_priv(ndev);
241 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
244
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000252
253 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000254 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000255 sizeof(struct txbd8) * priv->total_tx_ring_size +
256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
257 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000258 if (!vaddr) {
259 if (netif_msg_ifup(priv))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
261 ndev->name);
262 return -ENOMEM;
263 }
264
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000274
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000275 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000276 for (i = 0; i < priv->num_rx_queues; i++) {
277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000284
285 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000289 tx_queue->tx_ring_size, GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000290 if (!tx_queue->tx_skbuff) {
291 if (netif_msg_ifup(priv))
292 pr_err("%s: Could not allocate tx_skbuff\n",
293 ndev->name);
294 goto cleanup;
295 }
296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000299 }
300
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000304 rx_queue->rx_ring_size, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000305
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000306 if (!rx_queue->rx_skbuff) {
307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
309 ndev->name);
310 goto cleanup;
311 }
312
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
315 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000316
Anton Vorontsov87283272009-10-12 06:00:39 +0000317 if (gfar_init_bds(ndev))
318 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000319
320 return 0;
321
322cleanup:
323 free_skb_resources(priv);
324 return -ENOMEM;
325}
326
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000330 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000346static void gfar_init_mac(struct net_device *ndev)
347{
348 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000350 u32 rctrl = 0;
351 u32 tctrl = 0;
352 u32 attrs = 0;
353
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000356
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000357 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000359
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000360 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000361 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000362 /* Program the RIR0 reg with the required distribution */
363 gfar_write(&regs->rir0, DEFAULT_RIR0);
364 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000365
366 if (priv->rx_csum_enable)
367 rctrl |= RCTRL_CHECKSUMMING;
368
369 if (priv->extended_hash) {
370 rctrl |= RCTRL_EXTHASH;
371
372 gfar_clear_exact_match(ndev);
373 rctrl |= RCTRL_EMEN;
374 }
375
376 if (priv->padding) {
377 rctrl &= ~RCTRL_PAL_MASK;
378 rctrl |= RCTRL_PADDING(priv->padding);
379 }
380
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000381 /* Insert receive time stamps into padding alignment bytes */
382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
383 rctrl &= ~RCTRL_PAL_MASK;
384 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
385 priv->padding = 8;
386 }
387
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000388 /* keep vlan related bits if it's enabled */
389 if (priv->vlgrp) {
390 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
391 tctrl |= TCTRL_VLINS;
392 }
393
394 /* Init rctrl based on our settings */
395 gfar_write(&regs->rctrl, rctrl);
396
397 if (ndev->features & NETIF_F_IP_CSUM)
398 tctrl |= TCTRL_INIT_CSUM;
399
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000400 tctrl |= TCTRL_TXSCHED_PRIO;
401
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000402 gfar_write(&regs->tctrl, tctrl);
403
404 /* Set the extraction length and index */
405 attrs = ATTRELI_EL(priv->rx_stash_size) |
406 ATTRELI_EI(priv->rx_stash_index);
407
408 gfar_write(&regs->attreli, attrs);
409
410 /* Start with defaults, and add stashing or locking
411 * depending on the approprate variables */
412 attrs = ATTR_INIT_SETTINGS;
413
414 if (priv->bd_stash_en)
415 attrs |= ATTR_BDSTASH;
416
417 if (priv->rx_stash_size != 0)
418 attrs |= ATTR_BUFSTASH;
419
420 gfar_write(&regs->attr, attrs);
421
422 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
423 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
424 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
425}
426
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000427static struct net_device_stats *gfar_get_stats(struct net_device *dev)
428{
429 struct gfar_private *priv = netdev_priv(dev);
430 struct netdev_queue *txq;
431 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
432 unsigned long tx_packets = 0, tx_bytes = 0;
433 int i = 0;
434
435 for (i = 0; i < priv->num_rx_queues; i++) {
436 rx_packets += priv->rx_queue[i]->stats.rx_packets;
437 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
438 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
439 }
440
441 dev->stats.rx_packets = rx_packets;
442 dev->stats.rx_bytes = rx_bytes;
443 dev->stats.rx_dropped = rx_dropped;
444
445 for (i = 0; i < priv->num_tx_queues; i++) {
446 txq = netdev_get_tx_queue(dev, i);
447 tx_bytes += txq->tx_bytes;
448 tx_packets += txq->tx_packets;
449 }
450
451 dev->stats.tx_bytes = tx_bytes;
452 dev->stats.tx_packets = tx_packets;
453
454 return &dev->stats;
455}
456
Andy Fleming26ccfc32009-03-10 12:58:28 +0000457static const struct net_device_ops gfar_netdev_ops = {
458 .ndo_open = gfar_enet_open,
459 .ndo_start_xmit = gfar_start_xmit,
460 .ndo_stop = gfar_close,
461 .ndo_change_mtu = gfar_change_mtu,
462 .ndo_set_multicast_list = gfar_set_multi,
463 .ndo_tx_timeout = gfar_timeout,
464 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000465 .ndo_get_stats = gfar_get_stats,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000466 .ndo_vlan_rx_register = gfar_vlan_rx_register,
Ben Hutchings240c1022009-07-09 17:54:35 +0000467 .ndo_set_mac_address = eth_mac_addr,
468 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000469#ifdef CONFIG_NET_POLL_CONTROLLER
470 .ndo_poll_controller = gfar_netpoll,
471#endif
472};
473
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000474unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
475unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
476
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000477void lock_rx_qs(struct gfar_private *priv)
478{
479 int i = 0x0;
480
481 for (i = 0; i < priv->num_rx_queues; i++)
482 spin_lock(&priv->rx_queue[i]->rxlock);
483}
484
485void lock_tx_qs(struct gfar_private *priv)
486{
487 int i = 0x0;
488
489 for (i = 0; i < priv->num_tx_queues; i++)
490 spin_lock(&priv->tx_queue[i]->txlock);
491}
492
493void unlock_rx_qs(struct gfar_private *priv)
494{
495 int i = 0x0;
496
497 for (i = 0; i < priv->num_rx_queues; i++)
498 spin_unlock(&priv->rx_queue[i]->rxlock);
499}
500
501void unlock_tx_qs(struct gfar_private *priv)
502{
503 int i = 0x0;
504
505 for (i = 0; i < priv->num_tx_queues; i++)
506 spin_unlock(&priv->tx_queue[i]->txlock);
507}
508
Andy Fleming7f7f5312005-11-11 12:38:59 -0600509/* Returns 1 if incoming frames use an FCB */
510static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500511{
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000512 return priv->vlgrp || priv->rx_csum_enable ||
513 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500514}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400515
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000516static void free_tx_pointers(struct gfar_private *priv)
517{
518 int i = 0;
519
520 for (i = 0; i < priv->num_tx_queues; i++)
521 kfree(priv->tx_queue[i]);
522}
523
524static void free_rx_pointers(struct gfar_private *priv)
525{
526 int i = 0;
527
528 for (i = 0; i < priv->num_rx_queues; i++)
529 kfree(priv->rx_queue[i]);
530}
531
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000532static void unmap_group_regs(struct gfar_private *priv)
533{
534 int i = 0;
535
536 for (i = 0; i < MAXGROUPS; i++)
537 if (priv->gfargrp[i].regs)
538 iounmap(priv->gfargrp[i].regs);
539}
540
541static void disable_napi(struct gfar_private *priv)
542{
543 int i = 0;
544
545 for (i = 0; i < priv->num_grps; i++)
546 napi_disable(&priv->gfargrp[i].napi);
547}
548
549static void enable_napi(struct gfar_private *priv)
550{
551 int i = 0;
552
553 for (i = 0; i < priv->num_grps; i++)
554 napi_enable(&priv->gfargrp[i].napi);
555}
556
557static int gfar_parse_group(struct device_node *np,
558 struct gfar_private *priv, const char *model)
559{
560 u32 *queue_mask;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000561
Anton Vorontsov7ce97d42010-04-23 07:12:44 +0000562 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000563 if (!priv->gfargrp[priv->num_grps].regs)
564 return -ENOMEM;
565
566 priv->gfargrp[priv->num_grps].interruptTransmit =
567 irq_of_parse_and_map(np, 0);
568
569 /* If we aren't the FEC we have multiple interrupts */
570 if (model && strcasecmp(model, "FEC")) {
571 priv->gfargrp[priv->num_grps].interruptReceive =
572 irq_of_parse_and_map(np, 1);
573 priv->gfargrp[priv->num_grps].interruptError =
574 irq_of_parse_and_map(np,2);
575 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
576 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
577 priv->gfargrp[priv->num_grps].interruptError < 0) {
578 return -EINVAL;
579 }
580 }
581
582 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
583 priv->gfargrp[priv->num_grps].priv = priv;
584 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
585 if(priv->mode == MQ_MG_MODE) {
586 queue_mask = (u32 *)of_get_property(np,
587 "fsl,rx-bit-map", NULL);
588 priv->gfargrp[priv->num_grps].rx_bit_map =
589 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
590 queue_mask = (u32 *)of_get_property(np,
591 "fsl,tx-bit-map", NULL);
592 priv->gfargrp[priv->num_grps].tx_bit_map =
593 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
594 } else {
595 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
596 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
597 }
598 priv->num_grps++;
599
600 return 0;
601}
602
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000603static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800604{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800605 const char *model;
606 const char *ctype;
607 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000608 int err = 0, i;
609 struct net_device *dev = NULL;
610 struct gfar_private *priv = NULL;
611 struct device_node *np = ofdev->node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000612 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800613 const u32 *stash;
614 const u32 *stash_len;
615 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000616 unsigned int num_tx_qs, num_rx_qs;
617 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800618
619 if (!np || !of_device_is_available(np))
620 return -ENODEV;
621
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000622 /* parse the num of tx and rx queues */
623 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
624 num_tx_qs = tx_queues ? *tx_queues : 1;
625
626 if (num_tx_qs > MAX_TX_QS) {
627 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
628 num_tx_qs, MAX_TX_QS);
629 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
630 return -EINVAL;
631 }
632
633 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
634 num_rx_qs = rx_queues ? *rx_queues : 1;
635
636 if (num_rx_qs > MAX_RX_QS) {
637 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
638 num_tx_qs, MAX_TX_QS);
639 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
640 return -EINVAL;
641 }
642
643 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
644 dev = *pdev;
645 if (NULL == dev)
646 return -ENOMEM;
647
648 priv = netdev_priv(dev);
649 priv->node = ofdev->node;
650 priv->ndev = dev;
651
652 dev->num_tx_queues = num_tx_qs;
653 dev->real_num_tx_queues = num_tx_qs;
654 priv->num_tx_queues = num_tx_qs;
655 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000656 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800657
658 model = of_get_property(np, "model", NULL);
659
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000660 for (i = 0; i < MAXGROUPS; i++)
661 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800662
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000663 /* Parse and initialize group specific information */
664 if (of_device_is_compatible(np, "fsl,etsec2")) {
665 priv->mode = MQ_MG_MODE;
666 for_each_child_of_node(np, child) {
667 err = gfar_parse_group(child, priv, model);
668 if (err)
669 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800670 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000671 } else {
672 priv->mode = SQ_SG_MODE;
673 err = gfar_parse_group(np, priv, model);
674 if(err)
675 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800676 }
677
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000678 for (i = 0; i < priv->num_tx_queues; i++)
679 priv->tx_queue[i] = NULL;
680 for (i = 0; i < priv->num_rx_queues; i++)
681 priv->rx_queue[i] = NULL;
682
683 for (i = 0; i < priv->num_tx_queues; i++) {
Kim Phillipsed130582010-03-30 11:54:21 +0000684 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000685 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
686 if (!priv->tx_queue[i]) {
687 err = -ENOMEM;
688 goto tx_alloc_failed;
689 }
690 priv->tx_queue[i]->tx_skbuff = NULL;
691 priv->tx_queue[i]->qindex = i;
692 priv->tx_queue[i]->dev = dev;
693 spin_lock_init(&(priv->tx_queue[i]->txlock));
694 }
695
696 for (i = 0; i < priv->num_rx_queues; i++) {
Kim Phillipsed130582010-03-30 11:54:21 +0000697 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000698 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
699 if (!priv->rx_queue[i]) {
700 err = -ENOMEM;
701 goto rx_alloc_failed;
702 }
703 priv->rx_queue[i]->rx_skbuff = NULL;
704 priv->rx_queue[i]->qindex = i;
705 priv->rx_queue[i]->dev = dev;
706 spin_lock_init(&(priv->rx_queue[i]->rxlock));
707 }
708
709
Andy Fleming4d7902f2009-02-04 16:43:44 -0800710 stash = of_get_property(np, "bd-stash", NULL);
711
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000712 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800713 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
714 priv->bd_stash_en = 1;
715 }
716
717 stash_len = of_get_property(np, "rx-stash-len", NULL);
718
719 if (stash_len)
720 priv->rx_stash_size = *stash_len;
721
722 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
723
724 if (stash_idx)
725 priv->rx_stash_index = *stash_idx;
726
727 if (stash_len || stash_idx)
728 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
729
Andy Flemingb31a1d82008-12-16 15:29:15 -0800730 mac_addr = of_get_mac_address(np);
731 if (mac_addr)
732 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
733
734 if (model && !strcasecmp(model, "TSEC"))
735 priv->device_flags =
736 FSL_GIANFAR_DEV_HAS_GIGABIT |
737 FSL_GIANFAR_DEV_HAS_COALESCE |
738 FSL_GIANFAR_DEV_HAS_RMON |
739 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
740 if (model && !strcasecmp(model, "eTSEC"))
741 priv->device_flags =
742 FSL_GIANFAR_DEV_HAS_GIGABIT |
743 FSL_GIANFAR_DEV_HAS_COALESCE |
744 FSL_GIANFAR_DEV_HAS_RMON |
745 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Dai Haruki2c2db482008-12-16 15:31:15 -0800746 FSL_GIANFAR_DEV_HAS_PADDING |
Andy Flemingb31a1d82008-12-16 15:29:15 -0800747 FSL_GIANFAR_DEV_HAS_CSUM |
748 FSL_GIANFAR_DEV_HAS_VLAN |
749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
751 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800752
753 ctype = of_get_property(np, "phy-connection-type", NULL);
754
755 /* We only care about rgmii-id. The rest are autodetected */
756 if (ctype && !strcmp(ctype, "rgmii-id"))
757 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
758 else
759 priv->interface = PHY_INTERFACE_MODE_MII;
760
761 if (of_get_property(np, "fsl,magic-packet", NULL))
762 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
763
Grant Likelyfe192a42009-04-25 12:53:12 +0000764 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800765
766 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000767 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800768
769 return 0;
770
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000771rx_alloc_failed:
772 free_rx_pointers(priv);
773tx_alloc_failed:
774 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000775err_grp_init:
776 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000777 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800778 return err;
779}
780
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000781static int gfar_hwtstamp_ioctl(struct net_device *netdev,
782 struct ifreq *ifr, int cmd)
783{
784 struct hwtstamp_config config;
785 struct gfar_private *priv = netdev_priv(netdev);
786
787 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
788 return -EFAULT;
789
790 /* reserved for future extensions */
791 if (config.flags)
792 return -EINVAL;
793
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000794 switch (config.tx_type) {
795 case HWTSTAMP_TX_OFF:
796 priv->hwts_tx_en = 0;
797 break;
798 case HWTSTAMP_TX_ON:
799 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
800 return -ERANGE;
801 priv->hwts_tx_en = 1;
802 break;
803 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000804 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000805 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000806
807 switch (config.rx_filter) {
808 case HWTSTAMP_FILTER_NONE:
809 priv->hwts_rx_en = 0;
810 break;
811 default:
812 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
813 return -ERANGE;
814 priv->hwts_rx_en = 1;
815 config.rx_filter = HWTSTAMP_FILTER_ALL;
816 break;
817 }
818
819 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
820 -EFAULT : 0;
821}
822
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000823/* Ioctl MII Interface */
824static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
825{
826 struct gfar_private *priv = netdev_priv(dev);
827
828 if (!netif_running(dev))
829 return -EINVAL;
830
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000831 if (cmd == SIOCSHWTSTAMP)
832 return gfar_hwtstamp_ioctl(dev, rq, cmd);
833
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000834 if (!priv->phydev)
835 return -ENODEV;
836
837 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
838}
839
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000840static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
841{
842 unsigned int new_bit_map = 0x0;
843 int mask = 0x1 << (max_qs - 1), i;
844 for (i = 0; i < max_qs; i++) {
845 if (bit_map & mask)
846 new_bit_map = new_bit_map + (1 << i);
847 mask = mask >> 0x1;
848 }
849 return new_bit_map;
850}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000851
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000852static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
853 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000854{
855 u32 rqfpr = FPR_FILER_MASK;
856 u32 rqfcr = 0x0;
857
858 rqfar--;
859 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
860 ftp_rqfpr[rqfar] = rqfpr;
861 ftp_rqfcr[rqfar] = rqfcr;
862 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
863
864 rqfar--;
865 rqfcr = RQFCR_CMP_NOMATCH;
866 ftp_rqfpr[rqfar] = rqfpr;
867 ftp_rqfcr[rqfar] = rqfcr;
868 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
869
870 rqfar--;
871 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
872 rqfpr = class;
873 ftp_rqfcr[rqfar] = rqfcr;
874 ftp_rqfpr[rqfar] = rqfpr;
875 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
876
877 rqfar--;
878 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
879 rqfpr = class;
880 ftp_rqfcr[rqfar] = rqfcr;
881 ftp_rqfpr[rqfar] = rqfpr;
882 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
883
884 return rqfar;
885}
886
887static void gfar_init_filer_table(struct gfar_private *priv)
888{
889 int i = 0x0;
890 u32 rqfar = MAX_FILER_IDX;
891 u32 rqfcr = 0x0;
892 u32 rqfpr = FPR_FILER_MASK;
893
894 /* Default rule */
895 rqfcr = RQFCR_CMP_MATCH;
896 ftp_rqfcr[rqfar] = rqfcr;
897 ftp_rqfpr[rqfar] = rqfpr;
898 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
899
900 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
901 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
902 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
903 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
904 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
905 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
906
907 /* cur_filer_idx indicated the fisrt non-masked rule */
908 priv->cur_filer_idx = rqfar;
909
910 /* Rest are masked rules */
911 rqfcr = RQFCR_CMP_NOMATCH;
912 for (i = 0; i < rqfar; i++) {
913 ftp_rqfcr[i] = rqfcr;
914 ftp_rqfpr[i] = rqfpr;
915 gfar_write_filer(priv, i, rqfcr, rqfpr);
916 }
917}
918
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400919/* Set up the ethernet device structure, private data,
920 * and anything else we need before we start */
Andy Flemingb31a1d82008-12-16 15:29:15 -0800921static int gfar_probe(struct of_device *ofdev,
922 const struct of_device_id *match)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
924 u32 tempval;
925 struct net_device *dev = NULL;
926 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000927 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000928 int err = 0, i, grp_idx = 0;
Dai Harukic50a5d92008-12-17 16:51:32 -0800929 int len_devname;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000930 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000931 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000932 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000934 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000936 if (err)
937 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700940 priv->ndev = dev;
941 priv->ofdev = ofdev;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800942 priv->node = ofdev->node;
Kumar Gala48268572009-03-18 23:28:22 -0700943 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
Scott Woodd87eb122008-07-11 18:04:45 -0500945 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200946 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Andy Flemingb31a1d82008-12-16 15:29:15 -0800948 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000949 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951 /* Stop the DMA engine now, in case it was running before */
952 /* (The firmware could have used it, and left it running). */
Andy Fleming257d9382008-12-16 15:25:45 -0800953 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000956 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
Andy Flemingb98ac702009-02-04 16:38:05 -0800958 /* We need to delay at least 3 TX clocks */
959 udelay(2);
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000962 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964 /* Initialize MACCFG2. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000965 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
967 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000968 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000971 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Andy Flemingb31a1d82008-12-16 15:29:15 -0800973 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +0000978 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500979 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000981 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000982 for (i = 0; i < priv->num_grps; i++)
983 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000984
Andy Flemingb31a1d82008-12-16 15:29:15 -0800985 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500986 priv->rx_csum_enable = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -0800987 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500988 } else
989 priv->rx_csum_enable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Kumar Gala0bbaf062005-06-20 10:54:21 -0500991 priv->vlgrp = NULL;
992
Andy Fleming26ccfc32009-03-10 12:58:28 +0000993 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500994 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500995
Andy Flemingb31a1d82008-12-16 15:29:15 -0800996 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500997 priv->extended_hash = 1;
998 priv->hash_width = 9;
999
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001000 priv->hash_regs[0] = &regs->igaddr0;
1001 priv->hash_regs[1] = &regs->igaddr1;
1002 priv->hash_regs[2] = &regs->igaddr2;
1003 priv->hash_regs[3] = &regs->igaddr3;
1004 priv->hash_regs[4] = &regs->igaddr4;
1005 priv->hash_regs[5] = &regs->igaddr5;
1006 priv->hash_regs[6] = &regs->igaddr6;
1007 priv->hash_regs[7] = &regs->igaddr7;
1008 priv->hash_regs[8] = &regs->gaddr0;
1009 priv->hash_regs[9] = &regs->gaddr1;
1010 priv->hash_regs[10] = &regs->gaddr2;
1011 priv->hash_regs[11] = &regs->gaddr3;
1012 priv->hash_regs[12] = &regs->gaddr4;
1013 priv->hash_regs[13] = &regs->gaddr5;
1014 priv->hash_regs[14] = &regs->gaddr6;
1015 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001016
1017 } else {
1018 priv->extended_hash = 0;
1019 priv->hash_width = 8;
1020
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001021 priv->hash_regs[0] = &regs->gaddr0;
1022 priv->hash_regs[1] = &regs->gaddr1;
1023 priv->hash_regs[2] = &regs->gaddr2;
1024 priv->hash_regs[3] = &regs->gaddr3;
1025 priv->hash_regs[4] = &regs->gaddr4;
1026 priv->hash_regs[5] = &regs->gaddr5;
1027 priv->hash_regs[6] = &regs->gaddr6;
1028 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001029 }
1030
Andy Flemingb31a1d82008-12-16 15:29:15 -08001031 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001032 priv->padding = DEFAULT_PADDING;
1033 else
1034 priv->padding = 0;
1035
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001036 if (dev->features & NETIF_F_IP_CSUM ||
1037 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001038 dev->hard_header_len += GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001040 /* Program the isrg regs only if number of grps > 1 */
1041 if (priv->num_grps > 1) {
1042 baddr = &regs->isrg0;
1043 for (i = 0; i < priv->num_grps; i++) {
1044 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1045 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1046 gfar_write(baddr, isrg);
1047 baddr++;
1048 isrg = 0x0;
1049 }
1050 }
1051
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001052 /* Need to reverse the bit maps as bit_map's MSB is q0
Akinobu Mita984b3f52010-03-05 13:41:37 -08001053 * but, for_each_set_bit parses from right to left, which
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001054 * basically reverses the queue numbers */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001055 for (i = 0; i< priv->num_grps; i++) {
1056 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1057 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1058 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1059 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1060 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001061
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001062 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1063 * also assign queues to groups */
1064 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1065 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001066 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001067 priv->num_rx_queues) {
1068 priv->gfargrp[grp_idx].num_rx_queues++;
1069 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1070 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1071 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1072 }
1073 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001074 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001075 priv->num_tx_queues) {
1076 priv->gfargrp[grp_idx].num_tx_queues++;
1077 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1078 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1079 tqueue = tqueue | (TQUEUE_EN0 >> i);
1080 }
1081 priv->gfargrp[grp_idx].rstat = rstat;
1082 priv->gfargrp[grp_idx].tstat = tstat;
1083 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001084 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001085
1086 gfar_write(&regs->rqueue, rqueue);
1087 gfar_write(&regs->tqueue, tqueue);
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001091 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001092 for (i = 0; i < priv->num_tx_queues; i++) {
1093 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1094 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1095 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1096 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1097 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001098
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001099 for (i = 0; i < priv->num_rx_queues; i++) {
1100 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1101 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1102 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +00001105 /* enable filer if using multiple RX queues*/
1106 if(priv->num_rx_queues > 1)
1107 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001108 /* Enable most messages by default */
1109 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1110
Trent Piephod3eab822008-10-02 11:12:24 +00001111 /* Carrier starts down, phylib will bring it up */
1112 netif_carrier_off(dev);
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 err = register_netdev(dev);
1115
1116 if (err) {
1117 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1118 dev->name);
1119 goto register_fail;
1120 }
1121
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001122 device_init_wakeup(&dev->dev,
1123 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1124
Dai Harukic50a5d92008-12-17 16:51:32 -08001125 /* fill out IRQ number and name fields */
1126 len_devname = strlen(dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001127 for (i = 0; i < priv->num_grps; i++) {
1128 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1129 len_devname);
1130 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1131 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1132 "_g", sizeof("_g"));
1133 priv->gfargrp[i].int_name_tx[
1134 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1135 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1136 priv->gfargrp[i].int_name_tx)],
1137 "_tx", sizeof("_tx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001138
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001139 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1140 len_devname);
1141 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1142 "_g", sizeof("_g"));
1143 priv->gfargrp[i].int_name_rx[
1144 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1145 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1146 priv->gfargrp[i].int_name_rx)],
1147 "_rx", sizeof("_rx") + 1);
Dai Harukic50a5d92008-12-17 16:51:32 -08001148
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001149 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1150 len_devname);
1151 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1152 "_g", sizeof("_g"));
1153 priv->gfargrp[i].int_name_er[strlen(
1154 priv->gfargrp[i].int_name_er)] = i+48;
1155 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1156 priv->gfargrp[i].int_name_er)],
1157 "_er", sizeof("_er") + 1);
1158 } else
1159 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1160 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001161
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001162 /* Initialize the filer table */
1163 gfar_init_filer_table(priv);
1164
Andy Fleming7f7f5312005-11-11 12:38:59 -06001165 /* Create all the sysfs files */
1166 gfar_init_sysfs(dev);
1167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 /* Print out the device info */
Johannes Berge1749612008-10-27 15:59:26 -07001169 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
1171 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001172 /* provided which set of benchmarks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001174 for (i = 0; i < priv->num_rx_queues; i++)
Kim Phillipsddc01b32010-03-30 11:54:22 +00001175 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001176 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1177 for(i = 0; i < priv->num_tx_queues; i++)
Kim Phillipsddc01b32010-03-30 11:54:22 +00001178 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001179 dev->name, i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 return 0;
1182
1183register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001184 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001185 free_tx_pointers(priv);
1186 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001187 if (priv->phy_node)
1188 of_node_put(priv->phy_node);
1189 if (priv->tbi_node)
1190 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001192 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193}
1194
Andy Flemingb31a1d82008-12-16 15:29:15 -08001195static int gfar_remove(struct of_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001197 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Grant Likelyfe192a42009-04-25 12:53:12 +00001199 if (priv->phy_node)
1200 of_node_put(priv->phy_node);
1201 if (priv->tbi_node)
1202 of_node_put(priv->tbi_node);
1203
Andy Flemingb31a1d82008-12-16 15:29:15 -08001204 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
David S. Millerd9d8e042009-09-06 01:41:02 -07001206 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001207 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001208 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
1210 return 0;
1211}
1212
Scott Woodd87eb122008-07-11 18:04:45 -05001213#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001214
1215static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001216{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001217 struct gfar_private *priv = dev_get_drvdata(dev);
1218 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001219 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001220 unsigned long flags;
1221 u32 tempval;
1222
1223 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001224 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001225
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001226 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001227
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001228 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001229
1230 local_irq_save(flags);
1231 lock_tx_qs(priv);
1232 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001233
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001234 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001235
1236 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001237 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001238
1239 tempval &= ~MACCFG1_TX_EN;
1240
1241 if (!magic_packet)
1242 tempval &= ~MACCFG1_RX_EN;
1243
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001244 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001245
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001246 unlock_rx_qs(priv);
1247 unlock_tx_qs(priv);
1248 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001249
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001250 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001251
1252 if (magic_packet) {
1253 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001254 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001255
1256 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001257 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001258 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001259 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001260 } else {
1261 phy_stop(priv->phydev);
1262 }
1263 }
1264
1265 return 0;
1266}
1267
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001268static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001269{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001270 struct gfar_private *priv = dev_get_drvdata(dev);
1271 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001272 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001273 unsigned long flags;
1274 u32 tempval;
1275 int magic_packet = priv->wol_en &&
Andy Flemingb31a1d82008-12-16 15:29:15 -08001276 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001277
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001278 if (!netif_running(ndev)) {
1279 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001280 return 0;
1281 }
1282
1283 if (!magic_packet && priv->phydev)
1284 phy_start(priv->phydev);
1285
1286 /* Disable Magic Packet mode, in case something
1287 * else woke us up.
1288 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001289 local_irq_save(flags);
1290 lock_tx_qs(priv);
1291 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001292
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001293 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001294 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001295 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001296
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001297 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001298
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001299 unlock_rx_qs(priv);
1300 unlock_tx_qs(priv);
1301 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001302
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001303 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001304
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001305 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001306
1307 return 0;
1308}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001309
1310static int gfar_restore(struct device *dev)
1311{
1312 struct gfar_private *priv = dev_get_drvdata(dev);
1313 struct net_device *ndev = priv->ndev;
1314
1315 if (!netif_running(ndev))
1316 return 0;
1317
1318 gfar_init_bds(ndev);
1319 init_registers(ndev);
1320 gfar_set_mac_address(ndev);
1321 gfar_init_mac(ndev);
1322 gfar_start(ndev);
1323
1324 priv->oldlink = 0;
1325 priv->oldspeed = 0;
1326 priv->oldduplex = -1;
1327
1328 if (priv->phydev)
1329 phy_start(priv->phydev);
1330
1331 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001332 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001333
1334 return 0;
1335}
1336
1337static struct dev_pm_ops gfar_pm_ops = {
1338 .suspend = gfar_suspend,
1339 .resume = gfar_resume,
1340 .freeze = gfar_suspend,
1341 .thaw = gfar_resume,
1342 .restore = gfar_restore,
1343};
1344
1345#define GFAR_PM_OPS (&gfar_pm_ops)
1346
1347static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1348{
1349 return gfar_suspend(&ofdev->dev);
1350}
1351
1352static int gfar_legacy_resume(struct of_device *ofdev)
1353{
1354 return gfar_resume(&ofdev->dev);
1355}
1356
Scott Woodd87eb122008-07-11 18:04:45 -05001357#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001358
1359#define GFAR_PM_OPS NULL
1360#define gfar_legacy_suspend NULL
1361#define gfar_legacy_resume NULL
1362
Scott Woodd87eb122008-07-11 18:04:45 -05001363#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001365/* Reads the controller's registers to determine what interface
1366 * connects it to the PHY.
1367 */
1368static phy_interface_t gfar_get_interface(struct net_device *dev)
1369{
1370 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001371 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001372 u32 ecntrl;
1373
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001374 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001375
1376 if (ecntrl & ECNTRL_SGMII_MODE)
1377 return PHY_INTERFACE_MODE_SGMII;
1378
1379 if (ecntrl & ECNTRL_TBI_MODE) {
1380 if (ecntrl & ECNTRL_REDUCED_MODE)
1381 return PHY_INTERFACE_MODE_RTBI;
1382 else
1383 return PHY_INTERFACE_MODE_TBI;
1384 }
1385
1386 if (ecntrl & ECNTRL_REDUCED_MODE) {
1387 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1388 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001389 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001390 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001391
1392 /*
1393 * This isn't autodetected right now, so it must
1394 * be set by the device tree or platform code.
1395 */
1396 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1397 return PHY_INTERFACE_MODE_RGMII_ID;
1398
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001399 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001400 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001401 }
1402
Andy Flemingb31a1d82008-12-16 15:29:15 -08001403 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001404 return PHY_INTERFACE_MODE_GMII;
1405
1406 return PHY_INTERFACE_MODE_MII;
1407}
1408
1409
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001410/* Initializes driver's PHY state, and attaches to the PHY.
1411 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 */
1413static int init_phy(struct net_device *dev)
1414{
1415 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001416 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001417 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001418 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001419 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421 priv->oldlink = 0;
1422 priv->oldspeed = 0;
1423 priv->oldduplex = -1;
1424
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001425 interface = gfar_get_interface(dev);
1426
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001427 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1428 interface);
1429 if (!priv->phydev)
1430 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1431 interface);
1432 if (!priv->phydev) {
1433 dev_err(&dev->dev, "could not attach to PHY\n");
1434 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Kapil Junejad3c12872007-05-11 18:25:11 -05001437 if (interface == PHY_INTERFACE_MODE_SGMII)
1438 gfar_configure_serdes(dev);
1439
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001440 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001441 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1442 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445}
1446
Paul Gortmakerd0313582008-04-17 00:08:10 -04001447/*
1448 * Initialize TBI PHY interface for communicating with the
1449 * SERDES lynx PHY on the chip. We communicate with this PHY
1450 * through the MDIO bus on each controller, treating it as a
1451 * "normal" PHY at the address found in the TBIPA register. We assume
1452 * that the TBIPA register is valid. Either the MDIO bus code will set
1453 * it to a value that doesn't conflict with other PHYs on the bus, or the
1454 * value doesn't matter, as there are no other PHYs on the bus.
1455 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001456static void gfar_configure_serdes(struct net_device *dev)
1457{
1458 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001459 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001460
Grant Likelyfe192a42009-04-25 12:53:12 +00001461 if (!priv->tbi_node) {
1462 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1463 "device tree specify a tbi-handle\n");
1464 return;
1465 }
1466
1467 tbiphy = of_phy_find_device(priv->tbi_node);
1468 if (!tbiphy) {
1469 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001470 return;
1471 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001472
Andy Flemingb31a1d82008-12-16 15:29:15 -08001473 /*
1474 * If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001475 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1476 * everything for us? Resetting it takes the link down and requires
1477 * several seconds for it to come back.
1478 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001479 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001480 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001481
Paul Gortmakerd0313582008-04-17 00:08:10 -04001482 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001483 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001484
Grant Likelyfe192a42009-04-25 12:53:12 +00001485 phy_write(tbiphy, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -05001486 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1487 ADVERTISE_1000XPSE_ASYM);
1488
Grant Likelyfe192a42009-04-25 12:53:12 +00001489 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -05001490 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1491}
1492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493static void init_registers(struct net_device *dev)
1494{
1495 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001496 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001497 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001499 for (i = 0; i < priv->num_grps; i++) {
1500 regs = priv->gfargrp[i].regs;
1501 /* Clear IEVENT */
1502 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001504 /* Initialize IMASK */
1505 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1506 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001508 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001510 gfar_write(&regs->igaddr0, 0);
1511 gfar_write(&regs->igaddr1, 0);
1512 gfar_write(&regs->igaddr2, 0);
1513 gfar_write(&regs->igaddr3, 0);
1514 gfar_write(&regs->igaddr4, 0);
1515 gfar_write(&regs->igaddr5, 0);
1516 gfar_write(&regs->igaddr6, 0);
1517 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001519 gfar_write(&regs->gaddr0, 0);
1520 gfar_write(&regs->gaddr1, 0);
1521 gfar_write(&regs->gaddr2, 0);
1522 gfar_write(&regs->gaddr3, 0);
1523 gfar_write(&regs->gaddr4, 0);
1524 gfar_write(&regs->gaddr5, 0);
1525 gfar_write(&regs->gaddr6, 0);
1526 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001529 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001530 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001533 gfar_write(&regs->rmon.cam1, 0xffffffff);
1534 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 }
1536
1537 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001538 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001541 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542}
1543
Kumar Gala0bbaf062005-06-20 10:54:21 -05001544
1545/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001546static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547{
1548 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001549 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001551 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001553 for (i = 0; i < priv->num_grps; i++) {
1554 regs = priv->gfargrp[i].regs;
1555 /* Mask all interrupts */
1556 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001558 /* Clear all interrupts */
1559 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001562 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001564 tempval = gfar_read(&regs->dmactrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1566 != (DMACTRL_GRS | DMACTRL_GTS)) {
1567 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001568 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
Andy Fleming761ed01b2010-04-27 16:43:31 -07001570 spin_event_timeout(((gfar_read(&regs->ievent) &
1571 (IEVENT_GRSC | IEVENT_GTSC)) ==
1572 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 }
Scott Woodd87eb122008-07-11 18:04:45 -05001574}
Scott Woodd87eb122008-07-11 18:04:45 -05001575
1576/* Halt the receive and transmit queues */
1577void gfar_halt(struct net_device *dev)
1578{
1579 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001580 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001581 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Scott Wood2a54adc2008-08-12 15:10:46 -05001583 gfar_halt_nodisable(dev);
1584
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 /* Disable Rx and Tx */
1586 tempval = gfar_read(&regs->maccfg1);
1587 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1588 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001589}
1590
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001591static void free_grp_irqs(struct gfar_priv_grp *grp)
1592{
1593 free_irq(grp->interruptError, grp);
1594 free_irq(grp->interruptTransmit, grp);
1595 free_irq(grp->interruptReceive, grp);
1596}
1597
Kumar Gala0bbaf062005-06-20 10:54:21 -05001598void stop_gfar(struct net_device *dev)
1599{
1600 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001601 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001602 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001603
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001604 phy_stop(priv->phydev);
1605
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001606
Kumar Gala0bbaf062005-06-20 10:54:21 -05001607 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001608 local_irq_save(flags);
1609 lock_tx_qs(priv);
1610 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001611
Kumar Gala0bbaf062005-06-20 10:54:21 -05001612 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001614 unlock_rx_qs(priv);
1615 unlock_tx_qs(priv);
1616 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
1618 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001619 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001620 for (i = 0; i < priv->num_grps; i++)
1621 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001623 for (i = 0; i < priv->num_grps; i++)
1624 free_irq(priv->gfargrp[i].interruptTransmit,
1625 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 }
1627
1628 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629}
1630
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001631static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001634 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001635 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001637 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001639 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1640 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001641 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
Kumar Gala48268572009-03-18 23:28:22 -07001643 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001644 txbdp->length, DMA_TO_DEVICE);
1645 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001646 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1647 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001648 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001649 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Dai Haruki4669bc92008-12-17 16:51:04 -08001650 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001652 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001653 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1654 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001656 kfree(tx_queue->tx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001659static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1660{
1661 struct rxbd8 *rxbdp;
1662 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1663 int i;
1664
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001665 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001667 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1668 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001669 dma_unmap_single(&priv->ofdev->dev,
1670 rxbdp->bufPtr, priv->rx_buffer_size,
Anton Vorontsove69edd22009-10-12 06:00:30 +00001671 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001672 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1673 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001675 rxbdp->lstatus = 0;
1676 rxbdp->bufPtr = 0;
1677 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001679 kfree(rx_queue->rx_skbuff);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001680}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001681
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001682/* If there are any tx skbs or rx skbs still around, free them.
1683 * Then free tx_skbuff and rx_skbuff */
1684static void free_skb_resources(struct gfar_private *priv)
1685{
1686 struct gfar_priv_tx_q *tx_queue = NULL;
1687 struct gfar_priv_rx_q *rx_queue = NULL;
1688 int i;
1689
1690 /* Go through all the buffer descriptors and free their data buffers */
1691 for (i = 0; i < priv->num_tx_queues; i++) {
1692 tx_queue = priv->tx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001693 if(tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001694 free_skb_tx_queue(tx_queue);
1695 }
1696
1697 for (i = 0; i < priv->num_rx_queues; i++) {
1698 rx_queue = priv->rx_queue[i];
Andy Fleming7c0d10d2010-03-29 15:42:23 +00001699 if(rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001700 free_skb_rx_queue(rx_queue);
1701 }
1702
1703 dma_free_coherent(&priv->ofdev->dev,
1704 sizeof(struct txbd8) * priv->total_tx_ring_size +
1705 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1706 priv->tx_queue[0]->tx_bd_base,
1707 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708}
1709
Kumar Gala0bbaf062005-06-20 10:54:21 -05001710void gfar_start(struct net_device *dev)
1711{
1712 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001713 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001714 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001715 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001716
1717 /* Enable Rx and Tx in MACCFG1 */
1718 tempval = gfar_read(&regs->maccfg1);
1719 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1720 gfar_write(&regs->maccfg1, tempval);
1721
1722 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001723 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001724 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001725 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001726
Kumar Gala0bbaf062005-06-20 10:54:21 -05001727 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001728 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001729 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001730 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001731
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001732 for (i = 0; i < priv->num_grps; i++) {
1733 regs = priv->gfargrp[i].regs;
1734 /* Clear THLT/RHLT, so that the DMA starts polling now */
1735 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1736 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1737 /* Unmask the interrupts we look for */
1738 gfar_write(&regs->imask, IMASK_DEFAULT);
1739 }
Dai Haruki12dea572008-12-16 15:30:20 -08001740
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001741 dev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001742}
1743
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001744void gfar_configure_coalescing(struct gfar_private *priv,
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001745 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001747 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001748 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001749 int i = 0;
1750
1751 /* Backward compatible case ---- even if we enable
1752 * multiple queues, there's only single reg to program
1753 */
1754 gfar_write(&regs->txic, 0);
1755 if(likely(priv->tx_queue[0]->txcoalescing))
1756 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1757
1758 gfar_write(&regs->rxic, 0);
1759 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1760 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1761
1762 if (priv->mode == MQ_MG_MODE) {
1763 baddr = &regs->txic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001764 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001765 if (likely(priv->tx_queue[i]->txcoalescing)) {
1766 gfar_write(baddr + i, 0);
1767 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1768 }
1769 }
1770
1771 baddr = &regs->rxic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001772 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001773 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1774 gfar_write(baddr + i, 0);
1775 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1776 }
1777 }
1778 }
1779}
1780
1781static int register_grp_irqs(struct gfar_priv_grp *grp)
1782{
1783 struct gfar_private *priv = grp->priv;
1784 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001785 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 /* If the device has multiple interrupts, register for
1788 * them. Otherwise, only register for the one */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001789 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001790 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 * Transmit, and Receive */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001792 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1793 grp->int_name_er,grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001794 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001795 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1796 dev->name, grp->interruptError);
1797
1798 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 }
1800
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001801 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1802 0, grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001803 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001804 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1805 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 goto tx_irq_fail;
1807 }
1808
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001809 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1810 grp->int_name_rx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001811 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001812 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1813 dev->name, grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 goto rx_irq_fail;
1815 }
1816 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001817 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1818 grp->int_name_tx, grp)) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001819 if (netif_msg_intr(priv))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001820 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1821 dev->name, grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 goto err_irq_fail;
1823 }
1824 }
1825
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001826 return 0;
1827
1828rx_irq_fail:
1829 free_irq(grp->interruptTransmit, grp);
1830tx_irq_fail:
1831 free_irq(grp->interruptError, grp);
1832err_irq_fail:
1833 return err;
1834
1835}
1836
1837/* Bring the controller up and running */
1838int startup_gfar(struct net_device *ndev)
1839{
1840 struct gfar_private *priv = netdev_priv(ndev);
1841 struct gfar __iomem *regs = NULL;
1842 int err, i, j;
1843
1844 for (i = 0; i < priv->num_grps; i++) {
1845 regs= priv->gfargrp[i].regs;
1846 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1847 }
1848
1849 regs= priv->gfargrp[0].regs;
1850 err = gfar_alloc_skb_resources(ndev);
1851 if (err)
1852 return err;
1853
1854 gfar_init_mac(ndev);
1855
1856 for (i = 0; i < priv->num_grps; i++) {
1857 err = register_grp_irqs(&priv->gfargrp[i]);
1858 if (err) {
1859 for (j = 0; j < i; j++)
1860 free_grp_irqs(&priv->gfargrp[j]);
1861 goto irq_fail;
1862 }
1863 }
1864
Andy Fleming7f7f5312005-11-11 12:38:59 -06001865 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001866 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001868 phy_start(priv->phydev);
1869
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001870 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 return 0;
1873
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001874irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001875 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 return err;
1877}
1878
1879/* Called when something needs to use the ethernet device */
1880/* Returns 0 for success. */
1881static int gfar_enet_open(struct net_device *dev)
1882{
Li Yang94e8cc32007-10-12 21:53:51 +08001883 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 int err;
1885
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001886 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001887
Andy Fleming0fd56bb2009-02-04 16:43:16 -08001888 skb_queue_head_init(&priv->rx_recycle);
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 /* Initialize a bunch of registers */
1891 init_registers(dev);
1892
1893 gfar_set_mac_address(dev);
1894
1895 err = init_phy(dev);
1896
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001897 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001898 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
1902 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001903 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001904 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001905 return err;
1906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001908 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001910 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 return err;
1913}
1914
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001915static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001916{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001917 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001918
1919 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001920
Kumar Gala0bbaf062005-06-20 10:54:21 -05001921 return fcb;
1922}
1923
1924static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1925{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001926 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001927
1928 /* If we're here, it's a IP packet with a TCP or UDP
1929 * payload. We set it to checksum, using a pseudo-header
1930 * we provide
1931 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001932 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001933
Andy Fleming7f7f5312005-11-11 12:38:59 -06001934 /* Tell the controller what the protocol is */
1935 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001936 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001937 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03001938 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06001939 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05001940 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001941
1942 /* l3os is the distance between the start of the
1943 * frame (skb->data) and the start of the IP hdr.
1944 * l4os is the distance between the start of the
1945 * l3 hdr and the l4 hdr */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001946 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001947 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001948
Andy Fleming7f7f5312005-11-11 12:38:59 -06001949 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001950}
1951
Andy Fleming7f7f5312005-11-11 12:38:59 -06001952void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001953{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001954 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001955 fcb->vlctl = vlan_tx_tag_get(skb);
1956}
1957
Dai Haruki4669bc92008-12-17 16:51:04 -08001958static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1959 struct txbd8 *base, int ring_size)
1960{
1961 struct txbd8 *new_bd = bdp + stride;
1962
1963 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1964}
1965
1966static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1967 int ring_size)
1968{
1969 return skip_txbd(bdp, 1, base, ring_size);
1970}
1971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972/* This is called by the kernel when a frame is ready for transmission. */
1973/* It is pointed to by the dev->hard_start_xmit function pointer */
1974static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1975{
1976 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001977 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001978 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001979 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001980 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00001981 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08001982 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00001983 int i, rq = 0, do_tstamp = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08001984 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05001985 unsigned long flags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00001986 unsigned int nr_frags, nr_txbds, length;
1987 union skb_shared_tx *shtx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001988
1989 rq = skb->queue_mapping;
1990 tx_queue = priv->tx_queue[rq];
1991 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001992 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001993 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00001994 shtx = skb_tx(skb);
1995
1996 /* check if time stamp should be generated */
1997 if (unlikely(shtx->hardware && priv->hwts_tx_en))
1998 do_tstamp = 1;
Dai Haruki4669bc92008-12-17 16:51:04 -08001999
Li Yang5b28bea2009-03-27 15:54:30 -07002000 /* make space for additional header when fcb is needed */
2001 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002002 (priv->vlgrp && vlan_tx_tag_present(skb)) ||
2003 unlikely(do_tstamp)) &&
Li Yang5b28bea2009-03-27 15:54:30 -07002004 (skb_headroom(skb) < GMAC_FCB_LEN)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002005 struct sk_buff *skb_new;
2006
2007 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
2008 if (!skb_new) {
2009 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07002010 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002011 return NETDEV_TX_OK;
2012 }
2013 kfree_skb(skb);
2014 skb = skb_new;
2015 }
2016
Dai Haruki4669bc92008-12-17 16:51:04 -08002017 /* total number of fragments in the SKB */
2018 nr_frags = skb_shinfo(skb)->nr_frags;
2019
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002020 /* calculate the required number of TxBDs for this skb */
2021 if (unlikely(do_tstamp))
2022 nr_txbds = nr_frags + 2;
2023 else
2024 nr_txbds = nr_frags + 1;
2025
Dai Haruki4669bc92008-12-17 16:51:04 -08002026 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002027 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002028 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002029 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002030 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002031 return NETDEV_TX_BUSY;
2032 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034 /* Update transmit stats */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002035 txq->tx_bytes += skb->len;
2036 txq->tx_packets ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002038 txbdp = txbdp_start = tx_queue->cur_tx;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002039 lstatus = txbdp->lstatus;
2040
2041 /* Time stamp insertion requires one additional TxBD */
2042 if (unlikely(do_tstamp))
2043 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2044 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
Dai Haruki4669bc92008-12-17 16:51:04 -08002046 if (nr_frags == 0) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002047 if (unlikely(do_tstamp))
2048 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2049 TXBD_INTERRUPT);
2050 else
2051 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Dai Haruki4669bc92008-12-17 16:51:04 -08002052 } else {
2053 /* Place the fragment addresses and lengths into the TxBDs */
2054 for (i = 0; i < nr_frags; i++) {
2055 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002056 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
Dai Haruki4669bc92008-12-17 16:51:04 -08002058 length = skb_shinfo(skb)->frags[i].size;
2059
2060 lstatus = txbdp->lstatus | length |
2061 BD_LFLAG(TXBD_READY);
2062
2063 /* Handle the last BD specially */
2064 if (i == nr_frags - 1)
2065 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2066
Kumar Gala48268572009-03-18 23:28:22 -07002067 bufaddr = dma_map_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002068 skb_shinfo(skb)->frags[i].page,
2069 skb_shinfo(skb)->frags[i].page_offset,
2070 length,
2071 DMA_TO_DEVICE);
2072
2073 /* set the TxBD length and buffer pointer */
2074 txbdp->bufPtr = bufaddr;
2075 txbdp->lstatus = lstatus;
2076 }
2077
2078 lstatus = txbdp_start->lstatus;
2079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
Kumar Gala0bbaf062005-06-20 10:54:21 -05002081 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08002082 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002083 fcb = gfar_add_fcb(skb);
2084 lstatus |= BD_LFLAG(TXBD_TOE);
2085 gfar_tx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002086 }
2087
Dai Haruki77ecaf22008-12-16 15:30:48 -08002088 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002089 if (unlikely(NULL == fcb)) {
2090 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08002091 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002092 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002093
2094 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002095 }
2096
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002097 /* Setup tx hardware time stamping if requested */
2098 if (unlikely(do_tstamp)) {
2099 shtx->in_progress = 1;
2100 if (fcb == NULL)
2101 fcb = gfar_add_fcb(skb);
2102 fcb->ptp = 1;
2103 lstatus |= BD_LFLAG(TXBD_TOE);
2104 }
2105
Kumar Gala48268572009-03-18 23:28:22 -07002106 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Dai Haruki4669bc92008-12-17 16:51:04 -08002107 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002109 /*
2110 * If time stamping is requested one additional TxBD must be set up. The
2111 * first TxBD points to the FCB and must have a data length of
2112 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2113 * the full frame length.
2114 */
2115 if (unlikely(do_tstamp)) {
2116 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2117 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2118 (skb_headlen(skb) - GMAC_FCB_LEN);
2119 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2120 } else {
2121 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2122 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
Dai Haruki4669bc92008-12-17 16:51:04 -08002124 /*
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002125 * We can work in parallel with gfar_clean_tx_ring(), except
2126 * when modifying num_txbdfree. Note that we didn't grab the lock
2127 * when we were reading the num_txbdfree and checking for available
2128 * space, that's because outside of this function it can only grow,
2129 * and once we've got needed space, it cannot suddenly disappear.
2130 *
2131 * The lock also protects us from gfar_error(), which can modify
2132 * regs->tstat and thus retrigger the transfers, which is why we
2133 * also must grab the lock before setting ready bit for the first
2134 * to be transmitted BD.
2135 */
2136 spin_lock_irqsave(&tx_queue->txlock, flags);
2137
2138 /*
Dai Haruki4669bc92008-12-17 16:51:04 -08002139 * The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002140 * semantics (it requires synchronization between cacheable and
2141 * uncacheable mappings, which eieio doesn't provide and which we
2142 * don't need), thus requiring a more expensive sync instruction. At
2143 * some point, the set of architecture-independent barrier functions
2144 * should be expanded to include weaker barriers.
2145 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002146 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002147
Dai Haruki4669bc92008-12-17 16:51:04 -08002148 txbdp_start->lstatus = lstatus;
2149
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002150 eieio(); /* force lstatus write before tx_skbuff */
2151
2152 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2153
Dai Haruki4669bc92008-12-17 16:51:04 -08002154 /* Update the current skb pointer to the next entry we will use
2155 * (wrapping if necessary) */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002156 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2157 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002158
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002159 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002160
2161 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002162 tx_queue->num_txbdfree -= (nr_txbds);
Dai Haruki4669bc92008-12-17 16:51:04 -08002163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 /* If the next BD still needs to be cleaned up, then the bds
2165 are full. We need to tell the kernel to stop sending us stuff. */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002166 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002167 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002169 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 }
2171
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002173 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002176 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002178 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179}
2180
2181/* Stops the kernel queue, and halts the controller */
2182static int gfar_close(struct net_device *dev)
2183{
2184 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002185
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002186 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002187
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002188 skb_queue_purge(&priv->rx_recycle);
Sebastian Siewiorab939902008-08-19 21:12:45 +02002189 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 stop_gfar(dev);
2191
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002192 /* Disconnect from the PHY */
2193 phy_disconnect(priv->phydev);
2194 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002196 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
2198 return 0;
2199}
2200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002202static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002204 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 return 0;
2207}
2208
2209
Kumar Gala0bbaf062005-06-20 10:54:21 -05002210/* Enables and disables VLAN insertion/extraction */
2211static void gfar_vlan_rx_register(struct net_device *dev,
2212 struct vlan_group *grp)
2213{
2214 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002215 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002216 unsigned long flags;
2217 u32 tempval;
2218
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002219 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002220 local_irq_save(flags);
2221 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002222
Anton Vorontsovcd1f55a2009-01-26 14:33:23 -08002223 priv->vlgrp = grp;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002224
2225 if (grp) {
2226 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002227 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002228 tempval |= TCTRL_VLINS;
2229
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002230 gfar_write(&regs->tctrl, tempval);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002231
Kumar Gala0bbaf062005-06-20 10:54:21 -05002232 /* Enable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002233 tempval = gfar_read(&regs->rctrl);
Dai Haruki77ecaf22008-12-16 15:30:48 -08002234 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002235 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002236 } else {
2237 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002238 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002239 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002240 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002241
2242 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002243 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002244 tempval &= ~RCTRL_VLEX;
Dai Haruki77ecaf22008-12-16 15:30:48 -08002245 /* If parse is no longer required, then disable parser */
2246 if (tempval & RCTRL_REQ_PARSER)
2247 tempval |= RCTRL_PRSDEP_INIT;
2248 else
2249 tempval &= ~RCTRL_PRSDEP_INIT;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002250 gfar_write(&regs->rctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002251 }
2252
Dai Haruki77ecaf22008-12-16 15:30:48 -08002253 gfar_change_mtu(dev, dev->mtu);
2254
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002255 unlock_rx_qs(priv);
2256 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002257}
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2260{
2261 int tempsize, tempval;
2262 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002263 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002265 int frame_size = new_mtu + ETH_HLEN;
2266
Dai Haruki77ecaf22008-12-16 15:30:48 -08002267 if (priv->vlgrp)
Dai Harukifaa89572008-03-24 10:53:26 -05002268 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002269
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002271 if (netif_msg_drv(priv))
2272 printk(KERN_ERR "%s: Invalid MTU setting\n",
2273 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 return -EINVAL;
2275 }
2276
Dai Haruki77ecaf22008-12-16 15:30:48 -08002277 if (gfar_uses_fcb(priv))
2278 frame_size += GMAC_FCB_LEN;
2279
2280 frame_size += priv->padding;
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 tempsize =
2283 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2284 INCREMENTAL_BUFFER_SIZE;
2285
2286 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06002287 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2289 stop_gfar(dev);
2290
2291 priv->rx_buffer_size = tempsize;
2292
2293 dev->mtu = new_mtu;
2294
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002295 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2296 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298 /* If the mtu is larger than the max size for standard
2299 * ethernet frames (ie, a jumbo frame), then set maccfg2
2300 * to allow huge frames, and to check the length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002301 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
2304 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2305 else
2306 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2307
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002308 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
2310 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2311 startup_gfar(dev);
2312
2313 return 0;
2314}
2315
Sebastian Siewiorab939902008-08-19 21:12:45 +02002316/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 * transmitted after a set amount of time.
2318 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002319 * starting over will fix the problem.
2320 */
2321static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002323 struct gfar_private *priv = container_of(work, struct gfar_private,
2324 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002325 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
2327 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002328 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 stop_gfar(dev);
2330 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002331 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 }
2333
David S. Miller263ba322008-07-15 03:47:41 -07002334 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335}
2336
Sebastian Siewiorab939902008-08-19 21:12:45 +02002337static void gfar_timeout(struct net_device *dev)
2338{
2339 struct gfar_private *priv = netdev_priv(dev);
2340
2341 dev->stats.tx_errors++;
2342 schedule_work(&priv->reset_task);
2343}
2344
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002346static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002348 struct net_device *dev = tx_queue->dev;
Dai Harukid080cd62008-04-09 19:37:51 -05002349 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002350 struct gfar_priv_rx_q *rx_queue = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002351 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002352 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002353 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002354 struct sk_buff *skb;
2355 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002356 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002357 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002358 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002359 int howmany = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002360 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002361 size_t buflen;
2362 union skb_shared_tx *shtx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002364 rx_queue = priv->rx_queue[tx_queue->qindex];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002365 bdp = tx_queue->dirty_tx;
2366 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002367
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002368 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002369 unsigned long flags;
2370
Dai Haruki4669bc92008-12-17 16:51:04 -08002371 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002372
2373 /*
2374 * When time stamping, one additional TxBD must be freed.
2375 * Also, we need to dma_unmap_single() the TxPAL.
2376 */
2377 shtx = skb_tx(skb);
2378 if (unlikely(shtx->in_progress))
2379 nr_txbds = frags + 2;
2380 else
2381 nr_txbds = frags + 1;
2382
2383 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002384
2385 lstatus = lbdp->lstatus;
2386
2387 /* Only clean completed frames */
2388 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2389 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 break;
2391
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002392 if (unlikely(shtx->in_progress)) {
2393 next = next_txbd(bdp, base, tx_ring_size);
2394 buflen = next->length + GMAC_FCB_LEN;
2395 } else
2396 buflen = bdp->length;
2397
2398 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2399 buflen, DMA_TO_DEVICE);
2400
2401 if (unlikely(shtx->in_progress)) {
2402 struct skb_shared_hwtstamps shhwtstamps;
2403 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2404 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2405 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2406 skb_tstamp_tx(skb, &shhwtstamps);
2407 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2408 bdp = next;
2409 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002410
2411 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2412 bdp = next_txbd(bdp, base, tx_ring_size);
2413
2414 for (i = 0; i < frags; i++) {
Kumar Gala48268572009-03-18 23:28:22 -07002415 dma_unmap_page(&priv->ofdev->dev,
Dai Haruki4669bc92008-12-17 16:51:04 -08002416 bdp->bufPtr,
2417 bdp->length,
2418 DMA_TO_DEVICE);
2419 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2420 bdp = next_txbd(bdp, base, tx_ring_size);
2421 }
2422
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002423 /*
2424 * If there's room in the queue (limit it to rx_buffer_size)
2425 * we add this skb back into the pool, if it's the right size
2426 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002427 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002428 skb_recycle_check(skb, priv->rx_buffer_size +
2429 RXBUF_ALIGNMENT))
2430 __skb_queue_head(&priv->rx_recycle, skb);
2431 else
2432 dev_kfree_skb_any(skb);
2433
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002434 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002435
2436 skb_dirtytx = (skb_dirtytx + 1) &
2437 TX_RING_MOD_MASK(tx_ring_size);
2438
Dai Harukid080cd62008-04-09 19:37:51 -05002439 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002440 spin_lock_irqsave(&tx_queue->txlock, flags);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002441 tx_queue->num_txbdfree += nr_txbds;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002442 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002443 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
Dai Haruki4669bc92008-12-17 16:51:04 -08002445 /* If we freed a buffer, we can restart transmission, if necessary */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002446 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2447 netif_wake_subqueue(dev, tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Dai Haruki4669bc92008-12-17 16:51:04 -08002449 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002450 tx_queue->skb_dirtytx = skb_dirtytx;
2451 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Dai Harukid080cd62008-04-09 19:37:51 -05002453 return howmany;
2454}
2455
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002456static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002457{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002458 unsigned long flags;
2459
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002460 spin_lock_irqsave(&gfargrp->grplock, flags);
2461 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002462 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002463 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002464 } else {
2465 /*
2466 * Clear IEVENT, so interrupts aren't called again
2467 * because of the packets that have already arrived.
2468 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002469 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002470 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002471 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002472
Dai Haruki8c7396a2008-12-17 16:52:00 -08002473}
2474
Dai Harukid080cd62008-04-09 19:37:51 -05002475/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002476static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002477{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002478 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 return IRQ_HANDLED;
2480}
2481
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002482static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Andy Fleming815b97c2008-04-22 17:18:29 -05002483 struct sk_buff *skb)
2484{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002485 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002486 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002487 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002488
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002489 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2490 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002491 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002492}
2493
2494
2495struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002497 unsigned int alignamount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 struct gfar_private *priv = netdev_priv(dev);
2499 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002501 skb = __skb_dequeue(&priv->rx_recycle);
2502 if (!skb)
2503 skb = netdev_alloc_skb(dev,
2504 priv->rx_buffer_size + RXBUF_ALIGNMENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
Andy Fleming815b97c2008-04-22 17:18:29 -05002506 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 return NULL;
2508
Andy Fleming7f7f5312005-11-11 12:38:59 -06002509 alignamount = RXBUF_ALIGNMENT -
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002510 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
Andy Fleming7f7f5312005-11-11 12:38:59 -06002511
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 /* We need the data buffer to be aligned properly. We will reserve
2513 * as many bytes as needed to align the data properly
2514 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002515 skb_reserve(skb, alignamount);
Ben Menchacaa6d36d52010-03-24 05:05:02 +00002516 GFAR_CB(skb)->alignamount = alignamount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 return skb;
2519}
2520
Li Yang298e1a92007-10-16 14:18:13 +08002521static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522{
Li Yang298e1a92007-10-16 14:18:13 +08002523 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002524 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 struct gfar_extra_stats *estats = &priv->extra_stats;
2526
2527 /* If the packet was truncated, none of the other errors
2528 * matter */
2529 if (status & RXBD_TRUNCATED) {
2530 stats->rx_length_errors++;
2531
2532 estats->rx_trunc++;
2533
2534 return;
2535 }
2536 /* Count the errors, if there were any */
2537 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2538 stats->rx_length_errors++;
2539
2540 if (status & RXBD_LARGE)
2541 estats->rx_large++;
2542 else
2543 estats->rx_short++;
2544 }
2545 if (status & RXBD_NONOCTET) {
2546 stats->rx_frame_errors++;
2547 estats->rx_nonoctet++;
2548 }
2549 if (status & RXBD_CRCERR) {
2550 estats->rx_crcerr++;
2551 stats->rx_crc_errors++;
2552 }
2553 if (status & RXBD_OVERRUN) {
2554 estats->rx_overrun++;
2555 stats->rx_crc_errors++;
2556 }
2557}
2558
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002559irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002561 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 return IRQ_HANDLED;
2563}
2564
Kumar Gala0bbaf062005-06-20 10:54:21 -05002565static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2566{
2567 /* If valid headers were found, and valid sums
2568 * were verified, then we tell the kernel that no
2569 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002570 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002571 skb->ip_summed = CHECKSUM_UNNECESSARY;
2572 else
2573 skb->ip_summed = CHECKSUM_NONE;
2574}
2575
2576
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577/* gfar_process_frame() -- handle one incoming packet if skb
2578 * isn't NULL. */
2579static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Dai Haruki2c2db482008-12-16 15:31:15 -08002580 int amount_pull)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581{
2582 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002583 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584
Dai Haruki2c2db482008-12-16 15:31:15 -08002585 int ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002586
Dai Haruki2c2db482008-12-16 15:31:15 -08002587 /* fcb is at the beginning if exists */
2588 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
Dai Haruki2c2db482008-12-16 15:31:15 -08002590 /* Remove the FCB from the skb */
2591 /* Remove the padded bytes, if there are any */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002592 if (amount_pull) {
2593 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002594 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002595 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002596
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002597 /* Get receive timestamp from the skb */
2598 if (priv->hwts_rx_en) {
2599 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2600 u64 *ns = (u64 *) skb->data;
2601 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2602 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2603 }
2604
2605 if (priv->padding)
2606 skb_pull(skb, priv->padding);
2607
Dai Haruki2c2db482008-12-16 15:31:15 -08002608 if (priv->rx_csum_enable)
2609 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002610
Dai Haruki2c2db482008-12-16 15:31:15 -08002611 /* Tell the skb what kind of packet this is */
2612 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002613
Dai Haruki2c2db482008-12-16 15:31:15 -08002614 /* Send the packet up the stack */
2615 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2616 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2617 else
2618 ret = netif_receive_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619
Dai Haruki2c2db482008-12-16 15:31:15 -08002620 if (NET_RX_DROP == ret)
2621 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
2623 return 0;
2624}
2625
2626/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05002627 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 * of frames handled
2629 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002630int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002632 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002633 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002635 int pkt_len;
2636 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 int howmany = 0;
2638 struct gfar_private *priv = netdev_priv(dev);
2639
2640 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002641 bdp = rx_queue->cur_rx;
2642 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002644 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
Dai Haruki2c2db482008-12-16 15:31:15 -08002645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002647 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05002648 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002649
2650 /* Add another skb for the future */
2651 newskb = gfar_new_skb(dev);
2652
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002653 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
Kumar Gala48268572009-03-18 23:28:22 -07002655 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Andy Fleming81183052008-11-12 10:07:11 -06002656 priv->rx_buffer_size, DMA_FROM_DEVICE);
2657
Andy Fleming815b97c2008-04-22 17:18:29 -05002658 /* We drop the frame if we failed to allocate a new buffer */
2659 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2660 bdp->status & RXBD_ERR)) {
2661 count_errors(bdp->status, dev);
2662
2663 if (unlikely(!newskb))
2664 newskb = skb;
Lennert Buytenhek4e2fd552009-05-25 00:42:34 -07002665 else if (skb) {
2666 /*
Ben Menchacaa6d36d52010-03-24 05:05:02 +00002667 * We need to un-reserve() the skb to what it
Lennert Buytenhek4e2fd552009-05-25 00:42:34 -07002668 * was before gfar_new_skb() re-aligned
2669 * it to an RXBUF_ALIGNMENT boundary
2670 * before we put the skb back on the
2671 * recycle list.
2672 */
Ben Menchacaa6d36d52010-03-24 05:05:02 +00002673 skb_reserve(skb, -GFAR_CB(skb)->alignamount);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002674 __skb_queue_head(&priv->rx_recycle, skb);
Lennert Buytenhek4e2fd552009-05-25 00:42:34 -07002675 }
Andy Fleming815b97c2008-04-22 17:18:29 -05002676 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002678 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 howmany++;
2680
Dai Haruki2c2db482008-12-16 15:31:15 -08002681 if (likely(skb)) {
2682 pkt_len = bdp->length - ETH_FCS_LEN;
2683 /* Remove the FCS from the packet length */
2684 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002685 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002686 skb_record_rx_queue(skb, rx_queue->qindex);
Dai Haruki2c2db482008-12-16 15:31:15 -08002687 gfar_process_frame(dev, skb, amount_pull);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
Dai Haruki2c2db482008-12-16 15:31:15 -08002689 } else {
2690 if (netif_msg_rx_err(priv))
2691 printk(KERN_WARNING
2692 "%s: Missing skb!\n", dev->name);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002693 rx_queue->stats.rx_dropped++;
Dai Haruki2c2db482008-12-16 15:31:15 -08002694 priv->extra_stats.rx_skbmissing++;
2695 }
2696
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 }
2698
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002699 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
Andy Fleming815b97c2008-04-22 17:18:29 -05002701 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002702 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
2704 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002705 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706
2707 /* update to point at the next skb */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002708 rx_queue->skb_currx =
2709 (rx_queue->skb_currx + 1) &
2710 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 }
2712
2713 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002714 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 return howmany;
2717}
2718
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002719static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002721 struct gfar_priv_grp *gfargrp = container_of(napi,
2722 struct gfar_priv_grp, napi);
2723 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002724 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002725 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002726 struct gfar_priv_rx_q *rx_queue = NULL;
2727 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002728 int tx_cleaned = 0, i, left_over_budget = budget;
2729 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002730 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002731
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002732 num_queues = gfargrp->num_rx_queues;
2733 budget_per_queue = budget/num_queues;
2734
Dai Haruki8c7396a2008-12-17 16:52:00 -08002735 /* Clear IEVENT, so interrupts aren't called again
2736 * because of the packets that have already arrived */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002737 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002738
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002739 while (num_queues && left_over_budget) {
2740
2741 budget_per_queue = left_over_budget/num_queues;
2742 left_over_budget = 0;
2743
Akinobu Mita984b3f52010-03-05 13:41:37 -08002744 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002745 if (test_bit(i, &serviced_queues))
2746 continue;
2747 rx_queue = priv->rx_queue[i];
2748 tx_queue = priv->tx_queue[rx_queue->qindex];
2749
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002750 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002751 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2752 budget_per_queue);
2753 rx_cleaned += rx_cleaned_per_queue;
2754 if(rx_cleaned_per_queue < budget_per_queue) {
2755 left_over_budget = left_over_budget +
2756 (budget_per_queue - rx_cleaned_per_queue);
2757 set_bit(i, &serviced_queues);
2758 num_queues--;
2759 }
2760 }
Dai Harukid080cd62008-04-09 19:37:51 -05002761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Andy Fleming42199882008-12-17 16:52:30 -08002763 if (tx_cleaned)
2764 return budget;
2765
2766 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002767 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
2769 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002770 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002772 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
2774 /* If we are coalescing interrupts, update the timer */
2775 /* Otherwise, clear it */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002776 gfar_configure_coalescing(priv,
2777 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 }
2779
Andy Fleming42199882008-12-17 16:52:30 -08002780 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002783#ifdef CONFIG_NET_POLL_CONTROLLER
2784/*
2785 * Polling 'interrupt' - used by things like netconsole to send skbs
2786 * without having to re-enable interrupts. It's not called while
2787 * the interrupt routine is executing.
2788 */
2789static void gfar_netpoll(struct net_device *dev)
2790{
2791 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002792 int i = 0;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002793
2794 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002795 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002796 for (i = 0; i < priv->num_grps; i++) {
2797 disable_irq(priv->gfargrp[i].interruptTransmit);
2798 disable_irq(priv->gfargrp[i].interruptReceive);
2799 disable_irq(priv->gfargrp[i].interruptError);
2800 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2801 &priv->gfargrp[i]);
2802 enable_irq(priv->gfargrp[i].interruptError);
2803 enable_irq(priv->gfargrp[i].interruptReceive);
2804 enable_irq(priv->gfargrp[i].interruptTransmit);
2805 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002806 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002807 for (i = 0; i < priv->num_grps; i++) {
2808 disable_irq(priv->gfargrp[i].interruptTransmit);
2809 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2810 &priv->gfargrp[i]);
2811 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002812 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002813 }
2814}
2815#endif
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002818static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002820 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
2822 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002823 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002826 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002827 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
2829 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002830 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002831 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002833 /* Check for errors */
2834 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002835 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836
2837 return IRQ_HANDLED;
2838}
2839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840/* Called every time the controller might need to be made
2841 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002842 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 * function converts those variables into the appropriate
2844 * register values, and can bring down the device if needed.
2845 */
2846static void adjust_link(struct net_device *dev)
2847{
2848 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002849 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002850 unsigned long flags;
2851 struct phy_device *phydev = priv->phydev;
2852 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002854 local_irq_save(flags);
2855 lock_tx_qs(priv);
2856
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002857 if (phydev->link) {
2858 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002859 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002860
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 /* Now we make sure that we can be in full duplex mode.
2862 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002863 if (phydev->duplex != priv->oldduplex) {
2864 new_state = 1;
2865 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002867 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002870 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 }
2872
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002873 if (phydev->speed != priv->oldspeed) {
2874 new_state = 1;
2875 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 tempval =
2878 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002879
2880 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 break;
2882 case 100:
2883 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 tempval =
2885 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002886
2887 /* Reduced mode distinguishes
2888 * between 10 and 100 */
2889 if (phydev->speed == SPEED_100)
2890 ecntrl |= ECNTRL_R100;
2891 else
2892 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 break;
2894 default:
Kumar Gala0bbaf062005-06-20 10:54:21 -05002895 if (netif_msg_link(priv))
2896 printk(KERN_WARNING
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002897 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2898 dev->name, phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 break;
2900 }
2901
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002902 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 }
2904
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002905 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002906 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002907
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002909 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002912 } else if (priv->oldlink) {
2913 new_state = 1;
2914 priv->oldlink = 0;
2915 priv->oldspeed = 0;
2916 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002919 if (new_state && netif_msg_link(priv))
2920 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002921 unlock_tx_qs(priv);
2922 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002923}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
2925/* Update the hash table based on the current list of multicast
2926 * addresses we subscribe to. Also, change the promiscuity of
2927 * the device based on the flags (this function is called
2928 * whenever dev->flags is changed */
2929static void gfar_set_multi(struct net_device *dev)
2930{
Jiri Pirko22bedad32010-04-01 21:22:57 +00002931 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002933 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 u32 tempval;
2935
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002936 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 /* Set RCTRL to PROM */
2938 tempval = gfar_read(&regs->rctrl);
2939 tempval |= RCTRL_PROM;
2940 gfar_write(&regs->rctrl, tempval);
2941 } else {
2942 /* Set RCTRL to not PROM */
2943 tempval = gfar_read(&regs->rctrl);
2944 tempval &= ~(RCTRL_PROM);
2945 gfar_write(&regs->rctrl, tempval);
2946 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002947
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002948 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002950 gfar_write(&regs->igaddr0, 0xffffffff);
2951 gfar_write(&regs->igaddr1, 0xffffffff);
2952 gfar_write(&regs->igaddr2, 0xffffffff);
2953 gfar_write(&regs->igaddr3, 0xffffffff);
2954 gfar_write(&regs->igaddr4, 0xffffffff);
2955 gfar_write(&regs->igaddr5, 0xffffffff);
2956 gfar_write(&regs->igaddr6, 0xffffffff);
2957 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 gfar_write(&regs->gaddr0, 0xffffffff);
2959 gfar_write(&regs->gaddr1, 0xffffffff);
2960 gfar_write(&regs->gaddr2, 0xffffffff);
2961 gfar_write(&regs->gaddr3, 0xffffffff);
2962 gfar_write(&regs->gaddr4, 0xffffffff);
2963 gfar_write(&regs->gaddr5, 0xffffffff);
2964 gfar_write(&regs->gaddr6, 0xffffffff);
2965 gfar_write(&regs->gaddr7, 0xffffffff);
2966 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002967 int em_num;
2968 int idx;
2969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002971 gfar_write(&regs->igaddr0, 0x0);
2972 gfar_write(&regs->igaddr1, 0x0);
2973 gfar_write(&regs->igaddr2, 0x0);
2974 gfar_write(&regs->igaddr3, 0x0);
2975 gfar_write(&regs->igaddr4, 0x0);
2976 gfar_write(&regs->igaddr5, 0x0);
2977 gfar_write(&regs->igaddr6, 0x0);
2978 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 gfar_write(&regs->gaddr0, 0x0);
2980 gfar_write(&regs->gaddr1, 0x0);
2981 gfar_write(&regs->gaddr2, 0x0);
2982 gfar_write(&regs->gaddr3, 0x0);
2983 gfar_write(&regs->gaddr4, 0x0);
2984 gfar_write(&regs->gaddr5, 0x0);
2985 gfar_write(&regs->gaddr6, 0x0);
2986 gfar_write(&regs->gaddr7, 0x0);
2987
Andy Fleming7f7f5312005-11-11 12:38:59 -06002988 /* If we have extended hash tables, we need to
2989 * clear the exact match registers to prepare for
2990 * setting them */
2991 if (priv->extended_hash) {
2992 em_num = GFAR_EM_NUM + 1;
2993 gfar_clear_exact_match(dev);
2994 idx = 1;
2995 } else {
2996 idx = 0;
2997 em_num = 0;
2998 }
2999
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003000 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 return;
3002
3003 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003004 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003005 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003006 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003007 idx++;
3008 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003009 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 }
3011 }
3012
3013 return;
3014}
3015
Andy Fleming7f7f5312005-11-11 12:38:59 -06003016
3017/* Clears each of the exact match registers to zero, so they
3018 * don't interfere with normal reception */
3019static void gfar_clear_exact_match(struct net_device *dev)
3020{
3021 int idx;
3022 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
3023
3024 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
3025 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
3026}
3027
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028/* Set the appropriate hash bit for the given addr */
3029/* The algorithm works like so:
3030 * 1) Take the Destination Address (ie the multicast address), and
3031 * do a CRC on it (little endian), and reverse the bits of the
3032 * result.
3033 * 2) Use the 8 most significant bits as a hash into a 256-entry
3034 * table. The table is controlled through 8 32-bit registers:
3035 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3036 * gaddr7. This means that the 3 most significant bits in the
3037 * hash index which gaddr register to use, and the 5 other bits
3038 * indicate which bit (assuming an IBM numbering scheme, which
3039 * for PowerPC (tm) is usually the case) in the register holds
3040 * the entry. */
3041static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3042{
3043 u32 tempval;
3044 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 u32 result = ether_crc(MAC_ADDR_LEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003046 int width = priv->hash_width;
3047 u8 whichbit = (result >> (32 - width)) & 0x1f;
3048 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 u32 value = (1 << (31-whichbit));
3050
Kumar Gala0bbaf062005-06-20 10:54:21 -05003051 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003053 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
3055 return;
3056}
3057
Andy Fleming7f7f5312005-11-11 12:38:59 -06003058
3059/* There are multiple MAC Address register pairs on some controllers
3060 * This function sets the numth pair to a given address
3061 */
3062static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
3063{
3064 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003065 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003066 int idx;
3067 char tmpbuf[MAC_ADDR_LEN];
3068 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003069 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003070
3071 macptr += num*2;
3072
3073 /* Now copy it into the mac registers backwards, cuz */
3074 /* little endian is silly */
3075 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
3076 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
3077
3078 gfar_write(macptr, *((u32 *) (tmpbuf)));
3079
3080 tempval = *((u32 *) (tmpbuf + 4));
3081
3082 gfar_write(macptr+1, tempval);
3083}
3084
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003086static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003088 struct gfar_priv_grp *gfargrp = grp_id;
3089 struct gfar __iomem *regs = gfargrp->regs;
3090 struct gfar_private *priv= gfargrp->priv;
3091 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092
3093 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003094 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095
3096 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003097 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003098
3099 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003100 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003101 (events & IEVENT_MAG))
3102 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103
3104 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003105 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3106 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003107 dev->name, events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108
3109 /* Update the error counters */
3110 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003111 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112
3113 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003114 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003116 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003118 unsigned long flags;
3119
Kumar Gala0bbaf062005-06-20 10:54:21 -05003120 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003121 printk(KERN_DEBUG "%s: TX FIFO underrun, "
3122 "packet dropped.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003123 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 priv->extra_stats.tx_underrun++;
3125
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003126 local_irq_save(flags);
3127 lock_tx_qs(priv);
3128
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003130 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003131
3132 unlock_tx_qs(priv);
3133 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05003135 if (netif_msg_tx_err(priv))
3136 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 }
3138 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003139 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 priv->extra_stats.rx_bsy++;
3141
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003142 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
Kumar Gala0bbaf062005-06-20 10:54:21 -05003144 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003145 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003146 dev->name, gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 }
3148 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003149 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 priv->extra_stats.rx_babr++;
3151
Kumar Gala0bbaf062005-06-20 10:54:21 -05003152 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003153 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 }
3155 if (events & IEVENT_EBERR) {
3156 priv->extra_stats.eberr++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003157 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003158 printk(KERN_DEBUG "%s: bus error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05003160 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003161 printk(KERN_DEBUG "%s: control frame\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162
3163 if (events & IEVENT_BABT) {
3164 priv->extra_stats.tx_babt++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003165 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003166 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 }
3168 return IRQ_HANDLED;
3169}
3170
Andy Flemingb31a1d82008-12-16 15:29:15 -08003171static struct of_device_id gfar_match[] =
3172{
3173 {
3174 .type = "network",
3175 .compatible = "gianfar",
3176 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003177 {
3178 .compatible = "fsl,etsec2",
3179 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003180 {},
3181};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003182MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003183
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184/* Structure for a device driver */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003185static struct of_platform_driver gfar_driver = {
3186 .name = "fsl-gianfar",
3187 .match_table = gfar_match,
3188
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 .probe = gfar_probe,
3190 .remove = gfar_remove,
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00003191 .suspend = gfar_legacy_suspend,
3192 .resume = gfar_legacy_resume,
3193 .driver.pm = GFAR_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194};
3195
3196static int __init gfar_init(void)
3197{
Andy Fleming1577ece2009-02-04 16:42:12 -08003198 return of_register_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199}
3200
3201static void __exit gfar_exit(void)
3202{
Andy Flemingb31a1d82008-12-16 15:29:15 -08003203 of_unregister_platform_driver(&gfar_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204}
3205
3206module_init(gfar_init);
3207module_exit(gfar_exit);
3208