blob: fdb80804cf2aee7b89dd741b6f5813b478f9e67f [file] [log] [blame]
Jan Ceuleers0977f812012-06-05 03:42:12 +00001/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06004 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000012 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080028 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050033 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050038 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040042 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020044 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050045 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
Joe Perches59deab22011-06-14 08:57:47 +000064#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
73#include <linux/init.h>
74#include <linux/delay.h>
75#include <linux/netdevice.h>
76#include <linux/etherdevice.h>
77#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050078#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/spinlock.h>
80#include <linux/mm.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000081#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080082#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050083#include <linux/ip.h>
84#include <linux/tcp.h>
85#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080086#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000087#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89#include <asm/io.h>
Anton Vorontsov7d350972010-06-30 06:39:12 +000090#include <asm/reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#include <asm/irq.h>
92#include <asm/uaccess.h>
93#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/dma-mapping.h>
95#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040096#include <linux/mii.h>
97#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080098#include <linux/phy_fixed.h>
99#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700100#include <linux/of_net.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Andy Fleming7f7f5312005-11-11 12:38:59 -0600106const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108static int gfar_enet_open(struct net_device *dev);
109static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200110static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static void gfar_timeout(struct net_device *dev);
112static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500113struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000114static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000115 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int gfar_set_mac_address(struct net_device *dev);
117static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100118static irqreturn_t gfar_error(int irq, void *dev_id);
119static irqreturn_t gfar_transmit(int irq, void *dev_id);
120static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static void adjust_link(struct net_device *dev);
122static void init_registers(struct net_device *dev);
123static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700124static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600125static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400126static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static void gfar_set_multi(struct net_device *dev);
128static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500129static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700130static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300131#ifdef CONFIG_NET_POLL_CONTROLLER
132static void gfar_netpoll(struct net_device *dev);
133#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Dai Haruki2c2db482008-12-16 15:31:15 -0800136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +0000137 int amount_pull, struct napi_struct *napi);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600138void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500139static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600140void gfar_start(struct net_device *dev);
141static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800142static void gfar_set_mac_for_addr(struct net_device *dev, int num,
143 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000144static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL");
149
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000150static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000151 dma_addr_t buf)
152{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000153 u32 lstatus;
154
155 bdp->bufPtr = buf;
156
157 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000158 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000159 lstatus |= BD_LFLAG(RXBD_WRAP);
160
161 eieio();
162
163 bdp->lstatus = lstatus;
164}
165
Anton Vorontsov87283272009-10-12 06:00:39 +0000166static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000167{
Anton Vorontsov87283272009-10-12 06:00:39 +0000168 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000169 struct gfar_priv_tx_q *tx_queue = NULL;
170 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000171 struct txbd8 *txbdp;
172 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000173 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000174
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000175 for (i = 0; i < priv->num_tx_queues; i++) {
176 tx_queue = priv->tx_queue[i];
177 /* Initialize some variables in our dev structure */
178 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
179 tx_queue->dirty_tx = tx_queue->tx_bd_base;
180 tx_queue->cur_tx = tx_queue->tx_bd_base;
181 tx_queue->skb_curtx = 0;
182 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000183
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000184 /* Initialize Transmit Descriptor Ring */
185 txbdp = tx_queue->tx_bd_base;
186 for (j = 0; j < tx_queue->tx_ring_size; j++) {
187 txbdp->lstatus = 0;
188 txbdp->bufPtr = 0;
189 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000190 }
191
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000192 /* Set the last descriptor in the ring to indicate wrap */
193 txbdp--;
194 txbdp->status |= TXBD_WRAP;
195 }
196
197 for (i = 0; i < priv->num_rx_queues; i++) {
198 rx_queue = priv->rx_queue[i];
199 rx_queue->cur_rx = rx_queue->rx_bd_base;
200 rx_queue->skb_currx = 0;
201 rxbdp = rx_queue->rx_bd_base;
202
203 for (j = 0; j < rx_queue->rx_ring_size; j++) {
204 struct sk_buff *skb = rx_queue->rx_skbuff[j];
205
206 if (skb) {
207 gfar_init_rxbdp(rx_queue, rxbdp,
208 rxbdp->bufPtr);
209 } else {
210 skb = gfar_new_skb(ndev);
211 if (!skb) {
Joe Perches59deab22011-06-14 08:57:47 +0000212 netdev_err(ndev, "Can't allocate RX buffers\n");
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +0000213 return -ENOMEM;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000214 }
215 rx_queue->rx_skbuff[j] = skb;
216
217 gfar_new_rxbdp(rx_queue, rxbdp, skb);
218 }
219
220 rxbdp++;
221 }
222
Anton Vorontsov87283272009-10-12 06:00:39 +0000223 }
224
225 return 0;
226}
227
228static int gfar_alloc_skb_resources(struct net_device *ndev)
229{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000230 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000231 dma_addr_t addr;
232 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000233 struct gfar_private *priv = netdev_priv(ndev);
234 struct device *dev = &priv->ofdev->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000235 struct gfar_priv_tx_q *tx_queue = NULL;
236 struct gfar_priv_rx_q *rx_queue = NULL;
237
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000238 priv->total_tx_ring_size = 0;
239 for (i = 0; i < priv->num_tx_queues; i++)
240 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
241
242 priv->total_rx_ring_size = 0;
243 for (i = 0; i < priv->num_rx_queues; i++)
244 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000245
246 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000247 vaddr = dma_alloc_coherent(dev,
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000248 sizeof(struct txbd8) * priv->total_tx_ring_size +
249 sizeof(struct rxbd8) * priv->total_rx_ring_size,
250 &addr, GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000251 if (!vaddr) {
Joe Perches59deab22011-06-14 08:57:47 +0000252 netif_err(priv, ifup, ndev,
253 "Could not allocate buffer descriptors!\n");
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000254 return -ENOMEM;
255 }
256
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000257 for (i = 0; i < priv->num_tx_queues; i++) {
258 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000259 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000260 tx_queue->tx_bd_dma_base = addr;
261 tx_queue->dev = ndev;
262 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000263 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
264 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000265 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000266
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000267 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000268 for (i = 0; i < priv->num_rx_queues; i++) {
269 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000270 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000271 rx_queue->rx_bd_dma_base = addr;
272 rx_queue->dev = ndev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000273 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
274 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000275 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000276
277 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000278 for (i = 0; i < priv->num_tx_queues; i++) {
279 tx_queue = priv->tx_queue[i];
280 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000281 tx_queue->tx_ring_size,
282 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000283 if (!tx_queue->tx_skbuff) {
Joe Perches59deab22011-06-14 08:57:47 +0000284 netif_err(priv, ifup, ndev,
285 "Could not allocate tx_skbuff\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000286 goto cleanup;
287 }
288
289 for (k = 0; k < tx_queue->tx_ring_size; k++)
290 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000291 }
292
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000293 for (i = 0; i < priv->num_rx_queues; i++) {
294 rx_queue = priv->rx_queue[i];
295 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000296 rx_queue->rx_ring_size,
297 GFP_KERNEL);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000298
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000299 if (!rx_queue->rx_skbuff) {
Joe Perches59deab22011-06-14 08:57:47 +0000300 netif_err(priv, ifup, ndev,
301 "Could not allocate rx_skbuff\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000302 goto cleanup;
303 }
304
305 for (j = 0; j < rx_queue->rx_ring_size; j++)
306 rx_queue->rx_skbuff[j] = NULL;
307 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000308
Anton Vorontsov87283272009-10-12 06:00:39 +0000309 if (gfar_init_bds(ndev))
310 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000311
312 return 0;
313
314cleanup:
315 free_skb_resources(priv);
316 return -ENOMEM;
317}
318
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000319static void gfar_init_tx_rx_base(struct gfar_private *priv)
320{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000321 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000322 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000323 int i;
324
325 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000326 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000327 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000328 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000329 }
330
331 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000332 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000333 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000334 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000335 }
336}
337
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000338static void gfar_init_mac(struct net_device *ndev)
339{
340 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000341 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000342 u32 rctrl = 0;
343 u32 tctrl = 0;
344 u32 attrs = 0;
345
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000346 /* write the tx/rx base registers */
347 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000348
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000349 /* Configure the coalescing support */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000350 gfar_configure_coalescing(priv, 0xFF, 0xFF);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000351
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000352 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000353 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000354 /* Program the RIR0 reg with the required distribution */
355 gfar_write(&regs->rir0, DEFAULT_RIR0);
356 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000357
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000358 /* Restore PROMISC mode */
359 if (ndev->flags & IFF_PROMISC)
360 rctrl |= RCTRL_PROM;
361
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000362 if (ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000363 rctrl |= RCTRL_CHECKSUMMING;
364
365 if (priv->extended_hash) {
366 rctrl |= RCTRL_EXTHASH;
367
368 gfar_clear_exact_match(ndev);
369 rctrl |= RCTRL_EMEN;
370 }
371
372 if (priv->padding) {
373 rctrl &= ~RCTRL_PAL_MASK;
374 rctrl |= RCTRL_PADDING(priv->padding);
375 }
376
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000377 /* Insert receive time stamps into padding alignment bytes */
378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
379 rctrl &= ~RCTRL_PAL_MASK;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000380 rctrl |= RCTRL_PADDING(8);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000381 priv->padding = 8;
382 }
383
Manfred Rudigier97553f72010-06-11 01:49:05 +0000384 /* Enable HW time stamping if requested from user space */
385 if (priv->hwts_rx_en)
386 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
387
Jiri Pirko87c288c2011-07-20 04:54:19 +0000388 if (ndev->features & NETIF_F_HW_VLAN_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000389 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000390
391 /* Init rctrl based on our settings */
392 gfar_write(&regs->rctrl, rctrl);
393
394 if (ndev->features & NETIF_F_IP_CSUM)
395 tctrl |= TCTRL_INIT_CSUM;
396
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000397 if (priv->prio_sched_en)
398 tctrl |= TCTRL_TXSCHED_PRIO;
399 else {
400 tctrl |= TCTRL_TXSCHED_WRRS;
401 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
402 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
403 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000404
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000405 gfar_write(&regs->tctrl, tctrl);
406
407 /* Set the extraction length and index */
408 attrs = ATTRELI_EL(priv->rx_stash_size) |
409 ATTRELI_EI(priv->rx_stash_index);
410
411 gfar_write(&regs->attreli, attrs);
412
413 /* Start with defaults, and add stashing or locking
Jan Ceuleers0977f812012-06-05 03:42:12 +0000414 * depending on the approprate variables
415 */
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000416 attrs = ATTR_INIT_SETTINGS;
417
418 if (priv->bd_stash_en)
419 attrs |= ATTR_BDSTASH;
420
421 if (priv->rx_stash_size != 0)
422 attrs |= ATTR_BUFSTASH;
423
424 gfar_write(&regs->attr, attrs);
425
426 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
427 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
428 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
429}
430
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000431static struct net_device_stats *gfar_get_stats(struct net_device *dev)
432{
433 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000434 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
435 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000436 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000437
438 for (i = 0; i < priv->num_rx_queues; i++) {
439 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000440 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000441 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
442 }
443
444 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000445 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000446 dev->stats.rx_dropped = rx_dropped;
447
448 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000449 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
450 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000451 }
452
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000453 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000454 dev->stats.tx_packets = tx_packets;
455
456 return &dev->stats;
457}
458
Andy Fleming26ccfc32009-03-10 12:58:28 +0000459static const struct net_device_ops gfar_netdev_ops = {
460 .ndo_open = gfar_enet_open,
461 .ndo_start_xmit = gfar_start_xmit,
462 .ndo_stop = gfar_close,
463 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000464 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000465 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000466 .ndo_tx_timeout = gfar_timeout,
467 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000468 .ndo_get_stats = gfar_get_stats,
Ben Hutchings240c1022009-07-09 17:54:35 +0000469 .ndo_set_mac_address = eth_mac_addr,
470 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000471#ifdef CONFIG_NET_POLL_CONTROLLER
472 .ndo_poll_controller = gfar_netpoll,
473#endif
474};
475
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000476void lock_rx_qs(struct gfar_private *priv)
477{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000478 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000479
480 for (i = 0; i < priv->num_rx_queues; i++)
481 spin_lock(&priv->rx_queue[i]->rxlock);
482}
483
484void lock_tx_qs(struct gfar_private *priv)
485{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000486 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000487
488 for (i = 0; i < priv->num_tx_queues; i++)
489 spin_lock(&priv->tx_queue[i]->txlock);
490}
491
492void unlock_rx_qs(struct gfar_private *priv)
493{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000494 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000495
496 for (i = 0; i < priv->num_rx_queues; i++)
497 spin_unlock(&priv->rx_queue[i]->rxlock);
498}
499
500void unlock_tx_qs(struct gfar_private *priv)
501{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000502 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000503
504 for (i = 0; i < priv->num_tx_queues; i++)
505 spin_unlock(&priv->tx_queue[i]->txlock);
506}
507
Jiri Pirko87c288c2011-07-20 04:54:19 +0000508static bool gfar_is_vlan_on(struct gfar_private *priv)
509{
510 return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
511 (priv->ndev->features & NETIF_F_HW_VLAN_TX);
512}
513
Andy Fleming7f7f5312005-11-11 12:38:59 -0600514/* Returns 1 if incoming frames use an FCB */
515static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500516{
Jiri Pirko87c288c2011-07-20 04:54:19 +0000517 return gfar_is_vlan_on(priv) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000518 (priv->ndev->features & NETIF_F_RXCSUM) ||
519 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500520}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400521
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000522static void free_tx_pointers(struct gfar_private *priv)
523{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000524 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000525
526 for (i = 0; i < priv->num_tx_queues; i++)
527 kfree(priv->tx_queue[i]);
528}
529
530static void free_rx_pointers(struct gfar_private *priv)
531{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000532 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000533
534 for (i = 0; i < priv->num_rx_queues; i++)
535 kfree(priv->rx_queue[i]);
536}
537
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000538static void unmap_group_regs(struct gfar_private *priv)
539{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000540 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000541
542 for (i = 0; i < MAXGROUPS; i++)
543 if (priv->gfargrp[i].regs)
544 iounmap(priv->gfargrp[i].regs);
545}
546
547static void disable_napi(struct gfar_private *priv)
548{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000549 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000550
551 for (i = 0; i < priv->num_grps; i++)
552 napi_disable(&priv->gfargrp[i].napi);
553}
554
555static void enable_napi(struct gfar_private *priv)
556{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000557 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000558
559 for (i = 0; i < priv->num_grps; i++)
560 napi_enable(&priv->gfargrp[i].napi);
561}
562
563static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000564 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000565{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000566 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000567 u32 *queue_mask;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000568
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000569 grp->regs = of_iomap(np, 0);
570 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000571 return -ENOMEM;
572
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000573 grp->interruptTransmit = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000574
575 /* If we aren't the FEC we have multiple interrupts */
576 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000577 grp->interruptReceive = irq_of_parse_and_map(np, 1);
578 grp->interruptError = irq_of_parse_and_map(np, 2);
579 if (grp->interruptTransmit == NO_IRQ ||
580 grp->interruptReceive == NO_IRQ ||
581 grp->interruptError == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000582 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000583 }
584
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000585 grp->grp_id = priv->num_grps;
586 grp->priv = priv;
587 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000588 if (priv->mode == MQ_MG_MODE) {
589 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000590 grp->rx_bit_map = queue_mask ?
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000591 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
592 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000593 grp->tx_bit_map = queue_mask ?
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000594 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000595 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000596 grp->rx_bit_map = 0xFF;
597 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000598 }
599 priv->num_grps++;
600
601 return 0;
602}
603
Grant Likely2dc11582010-08-06 09:25:50 -0600604static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800605{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800606 const char *model;
607 const char *ctype;
608 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000609 int err = 0, i;
610 struct net_device *dev = NULL;
611 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700612 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000613 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800614 const u32 *stash;
615 const u32 *stash_len;
616 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000617 unsigned int num_tx_qs, num_rx_qs;
618 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800619
620 if (!np || !of_device_is_available(np))
621 return -ENODEV;
622
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000623 /* parse the num of tx and rx queues */
624 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
625 num_tx_qs = tx_queues ? *tx_queues : 1;
626
627 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000628 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
629 num_tx_qs, MAX_TX_QS);
630 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000631 return -EINVAL;
632 }
633
634 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
635 num_rx_qs = rx_queues ? *rx_queues : 1;
636
637 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000638 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
639 num_rx_qs, MAX_RX_QS);
640 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000641 return -EINVAL;
642 }
643
644 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
645 dev = *pdev;
646 if (NULL == dev)
647 return -ENOMEM;
648
649 priv = netdev_priv(dev);
Grant Likely61c7a082010-04-13 16:12:29 -0700650 priv->node = ofdev->dev.of_node;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000651 priv->ndev = dev;
652
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000653 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000654 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000655 priv->num_rx_queues = num_rx_qs;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000656 priv->num_grps = 0x0;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800657
Jan Ceuleers0977f812012-06-05 03:42:12 +0000658 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700659 INIT_LIST_HEAD(&priv->rx_list.list);
660 priv->rx_list.count = 0;
661 mutex_init(&priv->rx_queue_access);
662
Andy Flemingb31a1d82008-12-16 15:29:15 -0800663 model = of_get_property(np, "model", NULL);
664
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000665 for (i = 0; i < MAXGROUPS; i++)
666 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800667
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000668 /* Parse and initialize group specific information */
669 if (of_device_is_compatible(np, "fsl,etsec2")) {
670 priv->mode = MQ_MG_MODE;
671 for_each_child_of_node(np, child) {
672 err = gfar_parse_group(child, priv, model);
673 if (err)
674 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800675 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000676 } else {
677 priv->mode = SQ_SG_MODE;
678 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000679 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000680 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800681 }
682
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000683 for (i = 0; i < priv->num_tx_queues; i++)
684 priv->tx_queue[i] = NULL;
685 for (i = 0; i < priv->num_rx_queues; i++)
686 priv->rx_queue[i] = NULL;
687
688 for (i = 0; i < priv->num_tx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000689 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
690 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000691 if (!priv->tx_queue[i]) {
692 err = -ENOMEM;
693 goto tx_alloc_failed;
694 }
695 priv->tx_queue[i]->tx_skbuff = NULL;
696 priv->tx_queue[i]->qindex = i;
697 priv->tx_queue[i]->dev = dev;
698 spin_lock_init(&(priv->tx_queue[i]->txlock));
699 }
700
701 for (i = 0; i < priv->num_rx_queues; i++) {
Joe Perchesde47f072010-05-31 17:23:12 +0000702 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
703 GFP_KERNEL);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000704 if (!priv->rx_queue[i]) {
705 err = -ENOMEM;
706 goto rx_alloc_failed;
707 }
708 priv->rx_queue[i]->rx_skbuff = NULL;
709 priv->rx_queue[i]->qindex = i;
710 priv->rx_queue[i]->dev = dev;
711 spin_lock_init(&(priv->rx_queue[i]->rxlock));
712 }
713
714
Andy Fleming4d7902f2009-02-04 16:43:44 -0800715 stash = of_get_property(np, "bd-stash", NULL);
716
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000717 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800718 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
719 priv->bd_stash_en = 1;
720 }
721
722 stash_len = of_get_property(np, "rx-stash-len", NULL);
723
724 if (stash_len)
725 priv->rx_stash_size = *stash_len;
726
727 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
728
729 if (stash_idx)
730 priv->rx_stash_index = *stash_idx;
731
732 if (stash_len || stash_idx)
733 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
734
Andy Flemingb31a1d82008-12-16 15:29:15 -0800735 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000736
Andy Flemingb31a1d82008-12-16 15:29:15 -0800737 if (mac_addr)
Joe Perches6a3c9102011-11-16 09:38:02 +0000738 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800739
740 if (model && !strcasecmp(model, "TSEC"))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000741 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
742 FSL_GIANFAR_DEV_HAS_COALESCE |
743 FSL_GIANFAR_DEV_HAS_RMON |
744 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
745
Andy Flemingb31a1d82008-12-16 15:29:15 -0800746 if (model && !strcasecmp(model, "eTSEC"))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000747 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
748 FSL_GIANFAR_DEV_HAS_COALESCE |
749 FSL_GIANFAR_DEV_HAS_RMON |
750 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
751 FSL_GIANFAR_DEV_HAS_PADDING |
752 FSL_GIANFAR_DEV_HAS_CSUM |
753 FSL_GIANFAR_DEV_HAS_VLAN |
754 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
755 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
756 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800757
758 ctype = of_get_property(np, "phy-connection-type", NULL);
759
760 /* We only care about rgmii-id. The rest are autodetected */
761 if (ctype && !strcmp(ctype, "rgmii-id"))
762 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
763 else
764 priv->interface = PHY_INTERFACE_MODE_MII;
765
766 if (of_get_property(np, "fsl,magic-packet", NULL))
767 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
768
Grant Likelyfe192a42009-04-25 12:53:12 +0000769 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800770
771 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000772 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800773
774 return 0;
775
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000776rx_alloc_failed:
777 free_rx_pointers(priv);
778tx_alloc_failed:
779 free_tx_pointers(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000780err_grp_init:
781 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000782 free_netdev(dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800783 return err;
784}
785
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000786static int gfar_hwtstamp_ioctl(struct net_device *netdev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000787 struct ifreq *ifr, int cmd)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000788{
789 struct hwtstamp_config config;
790 struct gfar_private *priv = netdev_priv(netdev);
791
792 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
793 return -EFAULT;
794
795 /* reserved for future extensions */
796 if (config.flags)
797 return -EINVAL;
798
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000799 switch (config.tx_type) {
800 case HWTSTAMP_TX_OFF:
801 priv->hwts_tx_en = 0;
802 break;
803 case HWTSTAMP_TX_ON:
804 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
805 return -ERANGE;
806 priv->hwts_tx_en = 1;
807 break;
808 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000809 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000810 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000811
812 switch (config.rx_filter) {
813 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000814 if (priv->hwts_rx_en) {
815 stop_gfar(netdev);
816 priv->hwts_rx_en = 0;
817 startup_gfar(netdev);
818 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000819 break;
820 default:
821 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
822 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000823 if (!priv->hwts_rx_en) {
824 stop_gfar(netdev);
825 priv->hwts_rx_en = 1;
826 startup_gfar(netdev);
827 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000828 config.rx_filter = HWTSTAMP_FILTER_ALL;
829 break;
830 }
831
832 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
833 -EFAULT : 0;
834}
835
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000836/* Ioctl MII Interface */
837static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
838{
839 struct gfar_private *priv = netdev_priv(dev);
840
841 if (!netif_running(dev))
842 return -EINVAL;
843
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000844 if (cmd == SIOCSHWTSTAMP)
845 return gfar_hwtstamp_ioctl(dev, rq, cmd);
846
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000847 if (!priv->phydev)
848 return -ENODEV;
849
Richard Cochran28b04112010-07-17 08:48:55 +0000850 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000851}
852
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000853static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
854{
855 unsigned int new_bit_map = 0x0;
856 int mask = 0x1 << (max_qs - 1), i;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000857
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000858 for (i = 0; i < max_qs; i++) {
859 if (bit_map & mask)
860 new_bit_map = new_bit_map + (1 << i);
861 mask = mask >> 0x1;
862 }
863 return new_bit_map;
864}
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000865
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000866static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
867 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000868{
869 u32 rqfpr = FPR_FILER_MASK;
870 u32 rqfcr = 0x0;
871
872 rqfar--;
873 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000874 priv->ftp_rqfpr[rqfar] = rqfpr;
875 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000876 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
877
878 rqfar--;
879 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000880 priv->ftp_rqfpr[rqfar] = rqfpr;
881 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000882 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
883
884 rqfar--;
885 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
886 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000887 priv->ftp_rqfcr[rqfar] = rqfcr;
888 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000889 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
890
891 rqfar--;
892 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
893 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000894 priv->ftp_rqfcr[rqfar] = rqfcr;
895 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000896 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
897
898 return rqfar;
899}
900
901static void gfar_init_filer_table(struct gfar_private *priv)
902{
903 int i = 0x0;
904 u32 rqfar = MAX_FILER_IDX;
905 u32 rqfcr = 0x0;
906 u32 rqfpr = FPR_FILER_MASK;
907
908 /* Default rule */
909 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000910 priv->ftp_rqfcr[rqfar] = rqfcr;
911 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000912 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
913
914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
916 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
917 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
918 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
919 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
920
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +0200921 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000922 priv->cur_filer_idx = rqfar;
923
924 /* Rest are masked rules */
925 rqfcr = RQFCR_CMP_NOMATCH;
926 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000927 priv->ftp_rqfcr[i] = rqfcr;
928 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000929 gfar_write_filer(priv, i, rqfcr, rqfpr);
930 }
931}
932
Anton Vorontsov7d350972010-06-30 06:39:12 +0000933static void gfar_detect_errata(struct gfar_private *priv)
934{
935 struct device *dev = &priv->ofdev->dev;
936 unsigned int pvr = mfspr(SPRN_PVR);
937 unsigned int svr = mfspr(SPRN_SVR);
938 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
939 unsigned int rev = svr & 0xffff;
940
941 /* MPC8313 Rev 2.0 and higher; All MPC837x */
942 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000943 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +0000944 priv->errata |= GFAR_ERRATA_74;
945
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000946 /* MPC8313 and MPC837x all rev */
947 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000948 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000949 priv->errata |= GFAR_ERRATA_76;
950
Anton Vorontsov511d9342010-06-30 06:39:15 +0000951 /* MPC8313 and MPC837x all rev */
952 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000953 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov511d9342010-06-30 06:39:15 +0000954 priv->errata |= GFAR_ERRATA_A002;
955
Alex Dubov4363c2f2011-03-16 17:57:13 +0000956 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
957 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000958 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
Alex Dubov4363c2f2011-03-16 17:57:13 +0000959 priv->errata |= GFAR_ERRATA_12;
960
Anton Vorontsov7d350972010-06-30 06:39:12 +0000961 if (priv->errata)
962 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
963 priv->errata);
964}
965
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400966/* Set up the ethernet device structure, private data,
Jan Ceuleers0977f812012-06-05 03:42:12 +0000967 * and anything else we need before we start
968 */
Grant Likely74888762011-02-22 21:05:51 -0700969static int gfar_probe(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970{
971 u32 tempval;
972 struct net_device *dev = NULL;
973 struct gfar_private *priv = NULL;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000974 struct gfar __iomem *regs = NULL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000975 int err = 0, i, grp_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000976 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000977 u32 isrg = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000978 u32 __iomem *baddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000980 err = gfar_of_init(ofdev, &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000982 if (err)
983 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 priv = netdev_priv(dev);
Kumar Gala48268572009-03-18 23:28:22 -0700986 priv->ndev = dev;
987 priv->ofdev = ofdev;
Grant Likely61c7a082010-04-13 16:12:29 -0700988 priv->node = ofdev->dev.of_node;
Kumar Gala48268572009-03-18 23:28:22 -0700989 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Scott Woodd87eb122008-07-11 18:04:45 -0500991 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200992 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Andy Flemingb31a1d82008-12-16 15:29:15 -0800994 dev_set_drvdata(&ofdev->dev, priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000995 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Anton Vorontsov7d350972010-06-30 06:39:12 +0000997 gfar_detect_errata(priv);
998
Jan Ceuleers0977f812012-06-05 03:42:12 +0000999 /* Stop the DMA engine now, in case it was running before
1000 * (The firmware could have used it, and left it running).
1001 */
Andy Fleming257d9382008-12-16 15:25:45 -08001002 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001005 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
Andy Flemingb98ac702009-02-04 16:38:05 -08001007 /* We need to delay at least 3 TX clocks */
1008 udelay(2);
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001011 gfar_write(&regs->maccfg1, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001014 tempval = MACCFG2_INIT_SETTINGS;
1015 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1016 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1017 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
1019 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001020 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 /* Set the dev->base_addr to the gfar reg region */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001023 dev->base_addr = (unsigned long) regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Andy Flemingb31a1d82008-12-16 15:29:15 -08001025 SET_NETDEV_DEV(dev, &ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 /* Fill in the dev structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 dev->watchdog_timeo = TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 dev->mtu = 1500;
Andy Fleming26ccfc32009-03-10 12:58:28 +00001030 dev->netdev_ops = &gfar_netdev_ops;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001031 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001033 /* Register for napi ...We are registering NAPI for each grp */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001034 for (i = 0; i < priv->num_grps; i++)
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001035 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1036 GFAR_DEV_WEIGHT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001037
Andy Flemingb31a1d82008-12-16 15:29:15 -08001038 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
Michał Mirosław8b3afe92011-04-15 04:50:50 +00001039 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001040 NETIF_F_RXCSUM;
Michał Mirosław8b3afe92011-04-15 04:50:50 +00001041 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001042 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
Michał Mirosław8b3afe92011-04-15 04:50:50 +00001043 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Jiri Pirko87c288c2011-07-20 04:54:19 +00001045 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1046 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Claudiu Manoile2c53be2012-08-23 21:46:25 +00001047 dev->features |= NETIF_F_HW_VLAN_RX;
Jiri Pirko87c288c2011-07-20 04:54:19 +00001048 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05001049
Andy Flemingb31a1d82008-12-16 15:29:15 -08001050 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001051 priv->extended_hash = 1;
1052 priv->hash_width = 9;
1053
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001054 priv->hash_regs[0] = &regs->igaddr0;
1055 priv->hash_regs[1] = &regs->igaddr1;
1056 priv->hash_regs[2] = &regs->igaddr2;
1057 priv->hash_regs[3] = &regs->igaddr3;
1058 priv->hash_regs[4] = &regs->igaddr4;
1059 priv->hash_regs[5] = &regs->igaddr5;
1060 priv->hash_regs[6] = &regs->igaddr6;
1061 priv->hash_regs[7] = &regs->igaddr7;
1062 priv->hash_regs[8] = &regs->gaddr0;
1063 priv->hash_regs[9] = &regs->gaddr1;
1064 priv->hash_regs[10] = &regs->gaddr2;
1065 priv->hash_regs[11] = &regs->gaddr3;
1066 priv->hash_regs[12] = &regs->gaddr4;
1067 priv->hash_regs[13] = &regs->gaddr5;
1068 priv->hash_regs[14] = &regs->gaddr6;
1069 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001070
1071 } else {
1072 priv->extended_hash = 0;
1073 priv->hash_width = 8;
1074
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001075 priv->hash_regs[0] = &regs->gaddr0;
1076 priv->hash_regs[1] = &regs->gaddr1;
1077 priv->hash_regs[2] = &regs->gaddr2;
1078 priv->hash_regs[3] = &regs->gaddr3;
1079 priv->hash_regs[4] = &regs->gaddr4;
1080 priv->hash_regs[5] = &regs->gaddr5;
1081 priv->hash_regs[6] = &regs->gaddr6;
1082 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001083 }
1084
Andy Flemingb31a1d82008-12-16 15:29:15 -08001085 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001086 priv->padding = DEFAULT_PADDING;
1087 else
1088 priv->padding = 0;
1089
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001090 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001091 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001092 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001094 /* Program the isrg regs only if number of grps > 1 */
1095 if (priv->num_grps > 1) {
1096 baddr = &regs->isrg0;
1097 for (i = 0; i < priv->num_grps; i++) {
1098 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1099 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1100 gfar_write(baddr, isrg);
1101 baddr++;
1102 isrg = 0x0;
1103 }
1104 }
1105
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001106 /* Need to reverse the bit maps as bit_map's MSB is q0
Akinobu Mita984b3f52010-03-05 13:41:37 -08001107 * but, for_each_set_bit parses from right to left, which
Jan Ceuleers0977f812012-06-05 03:42:12 +00001108 * basically reverses the queue numbers
1109 */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001110 for (i = 0; i< priv->num_grps; i++) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001111 priv->gfargrp[i].tx_bit_map =
1112 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1113 priv->gfargrp[i].rx_bit_map =
1114 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001115 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001116
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001117 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
Jan Ceuleers0977f812012-06-05 03:42:12 +00001118 * also assign queues to groups
1119 */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001120 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1121 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001122
Akinobu Mita984b3f52010-03-05 13:41:37 -08001123 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001124 priv->num_rx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001125 priv->gfargrp[grp_idx].num_rx_queues++;
1126 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1127 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1128 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1129 }
1130 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001131
Akinobu Mita984b3f52010-03-05 13:41:37 -08001132 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001133 priv->num_tx_queues) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001134 priv->gfargrp[grp_idx].num_tx_queues++;
1135 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1136 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1137 tqueue = tqueue | (TQUEUE_EN0 >> i);
1138 }
1139 priv->gfargrp[grp_idx].rstat = rstat;
1140 priv->gfargrp[grp_idx].tstat = tstat;
1141 rstat = tstat =0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001142 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001143
1144 gfar_write(&regs->rqueue, rqueue);
1145 gfar_write(&regs->tqueue, tqueue);
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001149 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001150 for (i = 0; i < priv->num_tx_queues; i++) {
1151 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1152 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1153 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1154 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1155 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001156
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001157 for (i = 0; i < priv->num_rx_queues; i++) {
1158 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1159 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1160 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Jan Ceuleers0977f812012-06-05 03:42:12 +00001163 /* always enable rx filer */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001164 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001165 /* Enable most messages by default */
1166 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001167 /* use pritority h/w tx queue scheduling for single queue devices */
1168 if (priv->num_tx_queues == 1)
1169 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001170
Trent Piephod3eab822008-10-02 11:12:24 +00001171 /* Carrier starts down, phylib will bring it up */
1172 netif_carrier_off(dev);
1173
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 err = register_netdev(dev);
1175
1176 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001177 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 goto register_fail;
1179 }
1180
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001181 device_init_wakeup(&dev->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001182 priv->device_flags &
1183 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001184
Dai Harukic50a5d92008-12-17 16:51:32 -08001185 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001186 for (i = 0; i < priv->num_grps; i++) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001187 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Joe Perches0015e552012-03-25 07:10:07 +00001188 sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
1189 dev->name, "_g", '0' + i, "_tx");
1190 sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
1191 dev->name, "_g", '0' + i, "_rx");
1192 sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
1193 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001194 } else
Joe Perches0015e552012-03-25 07:10:07 +00001195 strcpy(priv->gfargrp[i].int_name_tx, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001196 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001197
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001198 /* Initialize the filer table */
1199 gfar_init_filer_table(priv);
1200
Andy Fleming7f7f5312005-11-11 12:38:59 -06001201 /* Create all the sysfs files */
1202 gfar_init_sysfs(dev);
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001205 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Jan Ceuleers0977f812012-06-05 03:42:12 +00001207 /* Even more device info helps when determining which kernel
1208 * provided which set of benchmarks.
1209 */
Joe Perches59deab22011-06-14 08:57:47 +00001210 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001211 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001212 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1213 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001214 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001215 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1216 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
1218 return 0;
1219
1220register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001221 unmap_group_regs(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001222 free_tx_pointers(priv);
1223 free_rx_pointers(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001224 if (priv->phy_node)
1225 of_node_put(priv->phy_node);
1226 if (priv->tbi_node)
1227 of_node_put(priv->tbi_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001229 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
Grant Likely2dc11582010-08-06 09:25:50 -06001232static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Andy Flemingb31a1d82008-12-16 15:29:15 -08001234 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Grant Likelyfe192a42009-04-25 12:53:12 +00001236 if (priv->phy_node)
1237 of_node_put(priv->phy_node);
1238 if (priv->tbi_node)
1239 of_node_put(priv->tbi_node);
1240
Andy Flemingb31a1d82008-12-16 15:29:15 -08001241 dev_set_drvdata(&ofdev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
David S. Millerd9d8e042009-09-06 01:41:02 -07001243 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001244 unmap_group_regs(priv);
Kumar Gala48268572009-03-18 23:28:22 -07001245 free_netdev(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
1247 return 0;
1248}
1249
Scott Woodd87eb122008-07-11 18:04:45 -05001250#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001251
1252static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001253{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001254 struct gfar_private *priv = dev_get_drvdata(dev);
1255 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001256 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001257 unsigned long flags;
1258 u32 tempval;
1259
1260 int magic_packet = priv->wol_en &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001261 (priv->device_flags &
1262 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001263
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001264 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001265
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001266 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001267
1268 local_irq_save(flags);
1269 lock_tx_qs(priv);
1270 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001271
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001272 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001273
1274 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001275 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001276
1277 tempval &= ~MACCFG1_TX_EN;
1278
1279 if (!magic_packet)
1280 tempval &= ~MACCFG1_RX_EN;
1281
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001282 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001283
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001284 unlock_rx_qs(priv);
1285 unlock_tx_qs(priv);
1286 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001287
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001288 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001289
1290 if (magic_packet) {
1291 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001292 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001293
1294 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001295 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001296 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001297 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001298 } else {
1299 phy_stop(priv->phydev);
1300 }
1301 }
1302
1303 return 0;
1304}
1305
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001306static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001307{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001308 struct gfar_private *priv = dev_get_drvdata(dev);
1309 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001310 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001311 unsigned long flags;
1312 u32 tempval;
1313 int magic_packet = priv->wol_en &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001314 (priv->device_flags &
1315 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001316
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001317 if (!netif_running(ndev)) {
1318 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001319 return 0;
1320 }
1321
1322 if (!magic_packet && priv->phydev)
1323 phy_start(priv->phydev);
1324
1325 /* Disable Magic Packet mode, in case something
1326 * else woke us up.
1327 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001328 local_irq_save(flags);
1329 lock_tx_qs(priv);
1330 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001331
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001332 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001333 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001334 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001335
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001336 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001337
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001338 unlock_rx_qs(priv);
1339 unlock_tx_qs(priv);
1340 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001341
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001342 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001343
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001344 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001345
1346 return 0;
1347}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001348
1349static int gfar_restore(struct device *dev)
1350{
1351 struct gfar_private *priv = dev_get_drvdata(dev);
1352 struct net_device *ndev = priv->ndev;
1353
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001354 if (!netif_running(ndev)) {
1355 netif_device_attach(ndev);
1356
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001357 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001358 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001359
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001360 if (gfar_init_bds(ndev)) {
1361 free_skb_resources(priv);
1362 return -ENOMEM;
1363 }
1364
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001365 init_registers(ndev);
1366 gfar_set_mac_address(ndev);
1367 gfar_init_mac(ndev);
1368 gfar_start(ndev);
1369
1370 priv->oldlink = 0;
1371 priv->oldspeed = 0;
1372 priv->oldduplex = -1;
1373
1374 if (priv->phydev)
1375 phy_start(priv->phydev);
1376
1377 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001378 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001379
1380 return 0;
1381}
1382
1383static struct dev_pm_ops gfar_pm_ops = {
1384 .suspend = gfar_suspend,
1385 .resume = gfar_resume,
1386 .freeze = gfar_suspend,
1387 .thaw = gfar_resume,
1388 .restore = gfar_restore,
1389};
1390
1391#define GFAR_PM_OPS (&gfar_pm_ops)
1392
Scott Woodd87eb122008-07-11 18:04:45 -05001393#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001394
1395#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001396
Scott Woodd87eb122008-07-11 18:04:45 -05001397#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001399/* Reads the controller's registers to determine what interface
1400 * connects it to the PHY.
1401 */
1402static phy_interface_t gfar_get_interface(struct net_device *dev)
1403{
1404 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001405 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001406 u32 ecntrl;
1407
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001408 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001409
1410 if (ecntrl & ECNTRL_SGMII_MODE)
1411 return PHY_INTERFACE_MODE_SGMII;
1412
1413 if (ecntrl & ECNTRL_TBI_MODE) {
1414 if (ecntrl & ECNTRL_REDUCED_MODE)
1415 return PHY_INTERFACE_MODE_RTBI;
1416 else
1417 return PHY_INTERFACE_MODE_TBI;
1418 }
1419
1420 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001421 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001422 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001423 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001424 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001425 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001426
Jan Ceuleers0977f812012-06-05 03:42:12 +00001427 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001428 * be set by the device tree or platform code.
1429 */
1430 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1431 return PHY_INTERFACE_MODE_RGMII_ID;
1432
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001433 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001434 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001435 }
1436
Andy Flemingb31a1d82008-12-16 15:29:15 -08001437 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001438 return PHY_INTERFACE_MODE_GMII;
1439
1440 return PHY_INTERFACE_MODE_MII;
1441}
1442
1443
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001444/* Initializes driver's PHY state, and attaches to the PHY.
1445 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 */
1447static int init_phy(struct net_device *dev)
1448{
1449 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001450 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001451 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001452 SUPPORTED_1000baseT_Full : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001453 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 priv->oldlink = 0;
1456 priv->oldspeed = 0;
1457 priv->oldduplex = -1;
1458
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001459 interface = gfar_get_interface(dev);
1460
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001461 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1462 interface);
1463 if (!priv->phydev)
1464 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1465 interface);
1466 if (!priv->phydev) {
1467 dev_err(&dev->dev, "could not attach to PHY\n");
1468 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
Kapil Junejad3c12872007-05-11 18:25:11 -05001471 if (interface == PHY_INTERFACE_MODE_SGMII)
1472 gfar_configure_serdes(dev);
1473
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001474 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001475 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1476 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479}
1480
Jan Ceuleers0977f812012-06-05 03:42:12 +00001481/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001482 * SERDES lynx PHY on the chip. We communicate with this PHY
1483 * through the MDIO bus on each controller, treating it as a
1484 * "normal" PHY at the address found in the TBIPA register. We assume
1485 * that the TBIPA register is valid. Either the MDIO bus code will set
1486 * it to a value that doesn't conflict with other PHYs on the bus, or the
1487 * value doesn't matter, as there are no other PHYs on the bus.
1488 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001489static void gfar_configure_serdes(struct net_device *dev)
1490{
1491 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001492 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001493
Grant Likelyfe192a42009-04-25 12:53:12 +00001494 if (!priv->tbi_node) {
1495 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1496 "device tree specify a tbi-handle\n");
1497 return;
1498 }
1499
1500 tbiphy = of_phy_find_device(priv->tbi_node);
1501 if (!tbiphy) {
1502 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001503 return;
1504 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001505
Jan Ceuleers0977f812012-06-05 03:42:12 +00001506 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001507 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1508 * everything for us? Resetting it takes the link down and requires
1509 * several seconds for it to come back.
1510 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001511 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001512 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001513
Paul Gortmakerd0313582008-04-17 00:08:10 -04001514 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001515 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001516
Grant Likelyfe192a42009-04-25 12:53:12 +00001517 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001518 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1519 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001520
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001521 phy_write(tbiphy, MII_BMCR,
1522 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1523 BMCR_SPEED1000);
Kapil Junejad3c12872007-05-11 18:25:11 -05001524}
1525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526static void init_registers(struct net_device *dev)
1527{
1528 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001529 struct gfar __iomem *regs = NULL;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00001530 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001532 for (i = 0; i < priv->num_grps; i++) {
1533 regs = priv->gfargrp[i].regs;
1534 /* Clear IEVENT */
1535 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001537 /* Initialize IMASK */
1538 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1539 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001541 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001543 gfar_write(&regs->igaddr0, 0);
1544 gfar_write(&regs->igaddr1, 0);
1545 gfar_write(&regs->igaddr2, 0);
1546 gfar_write(&regs->igaddr3, 0);
1547 gfar_write(&regs->igaddr4, 0);
1548 gfar_write(&regs->igaddr5, 0);
1549 gfar_write(&regs->igaddr6, 0);
1550 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001552 gfar_write(&regs->gaddr0, 0);
1553 gfar_write(&regs->gaddr1, 0);
1554 gfar_write(&regs->gaddr2, 0);
1555 gfar_write(&regs->gaddr3, 0);
1556 gfar_write(&regs->gaddr4, 0);
1557 gfar_write(&regs->gaddr5, 0);
1558 gfar_write(&regs->gaddr6, 0);
1559 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001562 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001563 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
1565 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001566 gfar_write(&regs->rmon.cam1, 0xffffffff);
1567 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 }
1569
1570 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001571 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001574 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
Anton Vorontsov511d9342010-06-30 06:39:15 +00001577static int __gfar_is_rx_idle(struct gfar_private *priv)
1578{
1579 u32 res;
1580
Jan Ceuleers0977f812012-06-05 03:42:12 +00001581 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001582 * actually wait for IEVENT_GRSC flag.
1583 */
1584 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1585 return 0;
1586
Jan Ceuleers0977f812012-06-05 03:42:12 +00001587 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001588 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1589 * and the Rx can be safely reset.
1590 */
1591 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1592 res &= 0x7f807f80;
1593 if ((res & 0xffff) == (res >> 16))
1594 return 1;
1595
1596 return 0;
1597}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001598
1599/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001600static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
1602 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001603 struct gfar __iomem *regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 u32 tempval;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00001605 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001607 for (i = 0; i < priv->num_grps; i++) {
1608 regs = priv->gfargrp[i].regs;
1609 /* Mask all interrupts */
1610 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001612 /* Clear all interrupts */
1613 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001616 regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001618 tempval = gfar_read(&regs->dmactrl);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001619 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1620 (DMACTRL_GRS | DMACTRL_GTS)) {
Anton Vorontsov511d9342010-06-30 06:39:15 +00001621 int ret;
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001624 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Anton Vorontsov511d9342010-06-30 06:39:15 +00001626 do {
1627 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1628 (IEVENT_GRSC | IEVENT_GTSC)) ==
1629 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1630 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1631 ret = __gfar_is_rx_idle(priv);
1632 } while (!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 }
Scott Woodd87eb122008-07-11 18:04:45 -05001634}
Scott Woodd87eb122008-07-11 18:04:45 -05001635
1636/* Halt the receive and transmit queues */
1637void gfar_halt(struct net_device *dev)
1638{
1639 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001640 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001641 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
Scott Wood2a54adc2008-08-12 15:10:46 -05001643 gfar_halt_nodisable(dev);
1644
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 /* Disable Rx and Tx */
1646 tempval = gfar_read(&regs->maccfg1);
1647 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1648 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001649}
1650
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001651static void free_grp_irqs(struct gfar_priv_grp *grp)
1652{
1653 free_irq(grp->interruptError, grp);
1654 free_irq(grp->interruptTransmit, grp);
1655 free_irq(grp->interruptReceive, grp);
1656}
1657
Kumar Gala0bbaf062005-06-20 10:54:21 -05001658void stop_gfar(struct net_device *dev)
1659{
1660 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001661 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001662 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001663
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001664 phy_stop(priv->phydev);
1665
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001666
Kumar Gala0bbaf062005-06-20 10:54:21 -05001667 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001668 local_irq_save(flags);
1669 lock_tx_qs(priv);
1670 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001671
Kumar Gala0bbaf062005-06-20 10:54:21 -05001672 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001674 unlock_rx_qs(priv);
1675 unlock_tx_qs(priv);
1676 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001679 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001680 for (i = 0; i < priv->num_grps; i++)
1681 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001683 for (i = 0; i < priv->num_grps; i++)
1684 free_irq(priv->gfargrp[i].interruptTransmit,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001685 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 }
1687
1688 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689}
1690
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001691static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001694 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001695 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001697 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001699 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1700 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001701 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
Kumar Gala48268572009-03-18 23:28:22 -07001703 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001704 txbdp->length, DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001705 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001706 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001707 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001708 txbdp++;
Kumar Gala48268572009-03-18 23:28:22 -07001709 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001710 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001712 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001713 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1714 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001716 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001717 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001718}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001720static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1721{
1722 struct rxbd8 *rxbdp;
1723 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1724 int i;
1725
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001726 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001728 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1729 if (rx_queue->rx_skbuff[i]) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001730 dma_unmap_single(&priv->ofdev->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001731 rxbdp->bufPtr, priv->rx_buffer_size,
1732 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001733 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1734 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001736 rxbdp->lstatus = 0;
1737 rxbdp->bufPtr = 0;
1738 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001740 kfree(rx_queue->rx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001741 rx_queue->rx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001742}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001743
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001744/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00001745 * Then free tx_skbuff and rx_skbuff
1746 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001747static void free_skb_resources(struct gfar_private *priv)
1748{
1749 struct gfar_priv_tx_q *tx_queue = NULL;
1750 struct gfar_priv_rx_q *rx_queue = NULL;
1751 int i;
1752
1753 /* Go through all the buffer descriptors and free their data buffers */
1754 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001755 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001756
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001757 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001758 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001759 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001760 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001761 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001762 }
1763
1764 for (i = 0; i < priv->num_rx_queues; i++) {
1765 rx_queue = priv->rx_queue[i];
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001766 if (rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001767 free_skb_rx_queue(rx_queue);
1768 }
1769
1770 dma_free_coherent(&priv->ofdev->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001771 sizeof(struct txbd8) * priv->total_tx_ring_size +
1772 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1773 priv->tx_queue[0]->tx_bd_base,
1774 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775}
1776
Kumar Gala0bbaf062005-06-20 10:54:21 -05001777void gfar_start(struct net_device *dev)
1778{
1779 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001780 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001781 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001782 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001783
1784 /* Enable Rx and Tx in MACCFG1 */
1785 tempval = gfar_read(&regs->maccfg1);
1786 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1787 gfar_write(&regs->maccfg1, tempval);
1788
1789 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001790 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001791 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001792 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001793
Kumar Gala0bbaf062005-06-20 10:54:21 -05001794 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001795 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001796 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001797 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001798
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001799 for (i = 0; i < priv->num_grps; i++) {
1800 regs = priv->gfargrp[i].regs;
1801 /* Clear THLT/RHLT, so that the DMA starts polling now */
1802 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1803 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1804 /* Unmask the interrupts we look for */
1805 gfar_write(&regs->imask, IMASK_DEFAULT);
1806 }
Dai Haruki12dea572008-12-16 15:30:20 -08001807
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001808 dev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001809}
1810
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001811void gfar_configure_coalescing(struct gfar_private *priv,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001812 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001814 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001815 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001816 int i = 0;
1817
1818 /* Backward compatible case ---- even if we enable
1819 * multiple queues, there's only single reg to program
1820 */
1821 gfar_write(&regs->txic, 0);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001822 if (likely(priv->tx_queue[0]->txcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001823 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1824
1825 gfar_write(&regs->rxic, 0);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001826 if (unlikely(priv->rx_queue[0]->rxcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001827 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1828
1829 if (priv->mode == MQ_MG_MODE) {
1830 baddr = &regs->txic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001831 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
Claudiu Manoil9740e002012-06-28 04:40:53 +00001832 gfar_write(baddr + i, 0);
1833 if (likely(priv->tx_queue[i]->txcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001834 gfar_write(baddr + i, priv->tx_queue[i]->txic);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001835 }
1836
1837 baddr = &regs->rxic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001838 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
Claudiu Manoil9740e002012-06-28 04:40:53 +00001839 gfar_write(baddr + i, 0);
1840 if (likely(priv->rx_queue[i]->rxcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001841 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001842 }
1843 }
1844}
1845
1846static int register_grp_irqs(struct gfar_priv_grp *grp)
1847{
1848 struct gfar_private *priv = grp->priv;
1849 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001850 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00001853 * them. Otherwise, only register for the one
1854 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001855 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001856 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00001857 * Transmit, and Receive
1858 */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001859 if ((err = request_irq(grp->interruptError, gfar_error,
1860 0, grp->int_name_er, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001861 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1862 grp->interruptError);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001863
Julia Lawall2145f1a2010-08-05 10:26:20 +00001864 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 }
1866
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001867 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001868 0, grp->int_name_tx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001869 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1870 grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 goto tx_irq_fail;
1872 }
1873
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001874 if ((err = request_irq(grp->interruptReceive, gfar_receive,
1875 0, grp->int_name_rx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001876 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1877 grp->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 goto rx_irq_fail;
1879 }
1880 } else {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001881 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
1882 0, grp->int_name_tx, grp)) < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001883 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1884 grp->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 goto err_irq_fail;
1886 }
1887 }
1888
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001889 return 0;
1890
1891rx_irq_fail:
1892 free_irq(grp->interruptTransmit, grp);
1893tx_irq_fail:
1894 free_irq(grp->interruptError, grp);
1895err_irq_fail:
1896 return err;
1897
1898}
1899
1900/* Bring the controller up and running */
1901int startup_gfar(struct net_device *ndev)
1902{
1903 struct gfar_private *priv = netdev_priv(ndev);
1904 struct gfar __iomem *regs = NULL;
1905 int err, i, j;
1906
1907 for (i = 0; i < priv->num_grps; i++) {
1908 regs= priv->gfargrp[i].regs;
1909 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1910 }
1911
1912 regs= priv->gfargrp[0].regs;
1913 err = gfar_alloc_skb_resources(ndev);
1914 if (err)
1915 return err;
1916
1917 gfar_init_mac(ndev);
1918
1919 for (i = 0; i < priv->num_grps; i++) {
1920 err = register_grp_irqs(&priv->gfargrp[i]);
1921 if (err) {
1922 for (j = 0; j < i; j++)
1923 free_grp_irqs(&priv->gfargrp[j]);
Anton Vorontsovff760152011-01-18 02:36:02 +00001924 goto irq_fail;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001925 }
1926 }
1927
Andy Fleming7f7f5312005-11-11 12:38:59 -06001928 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001929 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001931 phy_start(priv->phydev);
1932
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001933 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 return 0;
1936
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001937irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001938 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 return err;
1940}
1941
Jan Ceuleers0977f812012-06-05 03:42:12 +00001942/* Called when something needs to use the ethernet device
1943 * Returns 0 for success.
1944 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945static int gfar_enet_open(struct net_device *dev)
1946{
Li Yang94e8cc32007-10-12 21:53:51 +08001947 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 int err;
1949
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001950 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 /* Initialize a bunch of registers */
1953 init_registers(dev);
1954
1955 gfar_set_mac_address(dev);
1956
1957 err = init_phy(dev);
1958
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001959 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001960 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001965 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001966 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001967 return err;
1968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001970 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001972 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 return err;
1975}
1976
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001977static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001978{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07001979 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07001980
1981 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001982
Kumar Gala0bbaf062005-06-20 10:54:21 -05001983 return fcb;
1984}
1985
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00001986static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001987 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001988{
Kumar Gala0bbaf062005-06-20 10:54:21 -05001989 /* If we're here, it's a IP packet with a TCP or UDP
1990 * payload. We set it to checksum, using a pseudo-header
1991 * we provide
1992 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00001993 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001994
Jan Ceuleers0977f812012-06-05 03:42:12 +00001995 /* Tell the controller what the protocol is
1996 * And provide the already calculated phcs
1997 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001998 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001999 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03002000 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002001 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05002002 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002003
2004 /* l3os is the distance between the start of the
2005 * frame (skb->data) and the start of the IP hdr.
2006 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002007 * l3 hdr and the l4 hdr
2008 */
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002009 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002010 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002011
Andy Fleming7f7f5312005-11-11 12:38:59 -06002012 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002013}
2014
Andy Fleming7f7f5312005-11-11 12:38:59 -06002015void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002016{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002017 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002018 fcb->vlctl = vlan_tx_tag_get(skb);
2019}
2020
Dai Haruki4669bc92008-12-17 16:51:04 -08002021static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002022 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002023{
2024 struct txbd8 *new_bd = bdp + stride;
2025
2026 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2027}
2028
2029static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002030 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002031{
2032 return skip_txbd(bdp, 1, base, ring_size);
2033}
2034
Jan Ceuleers0977f812012-06-05 03:42:12 +00002035/* This is called by the kernel when a frame is ready for transmission.
2036 * It is pointed to by the dev->hard_start_xmit function pointer
2037 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2039{
2040 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002041 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002042 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002043 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002044 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002045 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002046 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002047 int i, rq = 0, do_tstamp = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002048 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05002049 unsigned long flags;
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002050 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002051
Jan Ceuleers0977f812012-06-05 03:42:12 +00002052 /* TOE=1 frames larger than 2500 bytes may see excess delays
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00002053 * before start of transmission.
2054 */
2055 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002056 skb->ip_summed == CHECKSUM_PARTIAL &&
2057 skb->len > 2500)) {
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00002058 int ret;
2059
2060 ret = skb_checksum_help(skb);
2061 if (ret)
2062 return ret;
2063 }
2064
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002065 rq = skb->queue_mapping;
2066 tx_queue = priv->tx_queue[rq];
2067 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002068 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002069 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002070
2071 /* check if time stamp should be generated */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002072 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002073 priv->hwts_tx_en)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002074 do_tstamp = 1;
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002075 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2076 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002077
Li Yang5b28bea2009-03-27 15:54:30 -07002078 /* make space for additional header when fcb is needed */
2079 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002080 vlan_tx_tag_present(skb) ||
2081 unlikely(do_tstamp)) &&
2082 (skb_headroom(skb) < fcb_length)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002083 struct sk_buff *skb_new;
2084
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002085 skb_new = skb_realloc_headroom(skb, fcb_length);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002086 if (!skb_new) {
2087 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07002088 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002089 return NETDEV_TX_OK;
2090 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002091
Eric Dumazet313b0372012-07-05 11:45:13 +00002092 if (skb->sk)
2093 skb_set_owner_w(skb_new, skb->sk);
2094 consume_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002095 skb = skb_new;
2096 }
2097
Dai Haruki4669bc92008-12-17 16:51:04 -08002098 /* total number of fragments in the SKB */
2099 nr_frags = skb_shinfo(skb)->nr_frags;
2100
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002101 /* calculate the required number of TxBDs for this skb */
2102 if (unlikely(do_tstamp))
2103 nr_txbds = nr_frags + 2;
2104 else
2105 nr_txbds = nr_frags + 1;
2106
Dai Haruki4669bc92008-12-17 16:51:04 -08002107 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002108 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002109 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002110 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002111 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002112 return NETDEV_TX_BUSY;
2113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115 /* Update transmit stats */
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002116 tx_queue->stats.tx_bytes += skb->len;
2117 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002119 txbdp = txbdp_start = tx_queue->cur_tx;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002120 lstatus = txbdp->lstatus;
2121
2122 /* Time stamp insertion requires one additional TxBD */
2123 if (unlikely(do_tstamp))
2124 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002125 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
Dai Haruki4669bc92008-12-17 16:51:04 -08002127 if (nr_frags == 0) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002128 if (unlikely(do_tstamp))
2129 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002130 TXBD_INTERRUPT);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002131 else
2132 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Dai Haruki4669bc92008-12-17 16:51:04 -08002133 } else {
2134 /* Place the fragment addresses and lengths into the TxBDs */
2135 for (i = 0; i < nr_frags; i++) {
2136 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002137 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
Dai Haruki4669bc92008-12-17 16:51:04 -08002139 length = skb_shinfo(skb)->frags[i].size;
2140
2141 lstatus = txbdp->lstatus | length |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002142 BD_LFLAG(TXBD_READY);
Dai Haruki4669bc92008-12-17 16:51:04 -08002143
2144 /* Handle the last BD specially */
2145 if (i == nr_frags - 1)
2146 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2147
Ian Campbell2234a722011-08-29 23:18:29 +00002148 bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2149 &skb_shinfo(skb)->frags[i],
2150 0,
2151 length,
2152 DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08002153
2154 /* set the TxBD length and buffer pointer */
2155 txbdp->bufPtr = bufaddr;
2156 txbdp->lstatus = lstatus;
2157 }
2158
2159 lstatus = txbdp_start->lstatus;
2160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002162 /* Add TxPAL between FCB and frame if required */
2163 if (unlikely(do_tstamp)) {
2164 skb_push(skb, GMAC_TXPAL_LEN);
2165 memset(skb->data, 0, GMAC_TXPAL_LEN);
2166 }
2167
Kumar Gala0bbaf062005-06-20 10:54:21 -05002168 /* Set up checksumming */
Dai Haruki12dea572008-12-16 15:30:20 -08002169 if (CHECKSUM_PARTIAL == skb->ip_summed) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002170 fcb = gfar_add_fcb(skb);
Alex Dubov4363c2f2011-03-16 17:57:13 +00002171 /* as specified by errata */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002172 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2173 ((unsigned long)fcb % 0x20) > 0x18)) {
Alex Dubov4363c2f2011-03-16 17:57:13 +00002174 __skb_pull(skb, GMAC_FCB_LEN);
2175 skb_checksum_help(skb);
2176 } else {
2177 lstatus |= BD_LFLAG(TXBD_TOE);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002178 gfar_tx_checksum(skb, fcb, fcb_length);
Alex Dubov4363c2f2011-03-16 17:57:13 +00002179 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002180 }
2181
Jesse Grosseab6d182010-10-20 13:56:03 +00002182 if (vlan_tx_tag_present(skb)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002183 if (unlikely(NULL == fcb)) {
2184 fcb = gfar_add_fcb(skb);
Dai Haruki5a5efed2008-12-16 15:34:50 -08002185 lstatus |= BD_LFLAG(TXBD_TOE);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002186 }
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002187
2188 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002189 }
2190
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002191 /* Setup tx hardware time stamping if requested */
2192 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002193 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002194 if (fcb == NULL)
2195 fcb = gfar_add_fcb(skb);
2196 fcb->ptp = 1;
2197 lstatus |= BD_LFLAG(TXBD_TOE);
2198 }
2199
Kumar Gala48268572009-03-18 23:28:22 -07002200 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002201 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
Jan Ceuleers0977f812012-06-05 03:42:12 +00002203 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002204 * first TxBD points to the FCB and must have a data length of
2205 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2206 * the full frame length.
2207 */
2208 if (unlikely(do_tstamp)) {
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002209 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002210 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002211 (skb_headlen(skb) - fcb_length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002212 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2213 } else {
2214 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002217 netdev_tx_sent_queue(txq, skb->len);
2218
Jan Ceuleers0977f812012-06-05 03:42:12 +00002219 /* We can work in parallel with gfar_clean_tx_ring(), except
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002220 * when modifying num_txbdfree. Note that we didn't grab the lock
2221 * when we were reading the num_txbdfree and checking for available
2222 * space, that's because outside of this function it can only grow,
2223 * and once we've got needed space, it cannot suddenly disappear.
2224 *
2225 * The lock also protects us from gfar_error(), which can modify
2226 * regs->tstat and thus retrigger the transfers, which is why we
2227 * also must grab the lock before setting ready bit for the first
2228 * to be transmitted BD.
2229 */
2230 spin_lock_irqsave(&tx_queue->txlock, flags);
2231
Jan Ceuleers0977f812012-06-05 03:42:12 +00002232 /* The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002233 * semantics (it requires synchronization between cacheable and
2234 * uncacheable mappings, which eieio doesn't provide and which we
2235 * don't need), thus requiring a more expensive sync instruction. At
2236 * some point, the set of architecture-independent barrier functions
2237 * should be expanded to include weaker barriers.
2238 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002239 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002240
Dai Haruki4669bc92008-12-17 16:51:04 -08002241 txbdp_start->lstatus = lstatus;
2242
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002243 eieio(); /* force lstatus write before tx_skbuff */
2244
2245 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2246
Dai Haruki4669bc92008-12-17 16:51:04 -08002247 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002248 * (wrapping if necessary)
2249 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002250 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002251 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002252
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002253 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002254
2255 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002256 tx_queue->num_txbdfree -= (nr_txbds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
2258 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002259 * are full. We need to tell the kernel to stop sending us stuff.
2260 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002261 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002262 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002264 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 }
2266
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002268 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
2270 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002271 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002273 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274}
2275
2276/* Stops the kernel queue, and halts the controller */
2277static int gfar_close(struct net_device *dev)
2278{
2279 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002280
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002281 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002282
Sebastian Siewiorab939902008-08-19 21:12:45 +02002283 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 stop_gfar(dev);
2285
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002286 /* Disconnect from the PHY */
2287 phy_disconnect(priv->phydev);
2288 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002290 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
2292 return 0;
2293}
2294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002296static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002298 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
2300 return 0;
2301}
2302
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002303/* Check if rx parser should be activated */
2304void gfar_check_rx_parser_mode(struct gfar_private *priv)
2305{
2306 struct gfar __iomem *regs;
2307 u32 tempval;
2308
2309 regs = priv->gfargrp[0].regs;
2310
2311 tempval = gfar_read(&regs->rctrl);
2312 /* If parse is no longer required, then disable parser */
2313 if (tempval & RCTRL_REQ_PARSER)
2314 tempval |= RCTRL_PRSDEP_INIT;
2315 else
2316 tempval &= ~RCTRL_PRSDEP_INIT;
2317 gfar_write(&regs->rctrl, tempval);
2318}
2319
Kumar Gala0bbaf062005-06-20 10:54:21 -05002320/* Enables and disables VLAN insertion/extraction */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002321void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002322{
2323 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002324 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002325 unsigned long flags;
2326 u32 tempval;
2327
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002328 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002329 local_irq_save(flags);
2330 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002331
Jiri Pirko87c288c2011-07-20 04:54:19 +00002332 if (features & NETIF_F_HW_VLAN_TX) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002333 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002334 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002335 tempval |= TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002336 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002337 } else {
2338 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002339 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002340 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002341 gfar_write(&regs->tctrl, tempval);
Jiri Pirko87c288c2011-07-20 04:54:19 +00002342 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002343
Jiri Pirko87c288c2011-07-20 04:54:19 +00002344 if (features & NETIF_F_HW_VLAN_RX) {
2345 /* Enable VLAN tag extraction */
2346 tempval = gfar_read(&regs->rctrl);
2347 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2348 gfar_write(&regs->rctrl, tempval);
2349 } else {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002350 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002351 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002352 tempval &= ~RCTRL_VLEX;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002353 gfar_write(&regs->rctrl, tempval);
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002354
2355 gfar_check_rx_parser_mode(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002356 }
2357
Dai Haruki77ecaf22008-12-16 15:30:48 -08002358 gfar_change_mtu(dev, dev->mtu);
2359
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002360 unlock_rx_qs(priv);
2361 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002362}
2363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2365{
2366 int tempsize, tempval;
2367 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002368 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002370 int frame_size = new_mtu + ETH_HLEN;
2371
Jiri Pirko87c288c2011-07-20 04:54:19 +00002372 if (gfar_is_vlan_on(priv))
Dai Harukifaa89572008-03-24 10:53:26 -05002373 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Joe Perches59deab22011-06-14 08:57:47 +00002376 netif_err(priv, drv, dev, "Invalid MTU setting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 return -EINVAL;
2378 }
2379
Dai Haruki77ecaf22008-12-16 15:30:48 -08002380 if (gfar_uses_fcb(priv))
2381 frame_size += GMAC_FCB_LEN;
2382
2383 frame_size += priv->padding;
2384
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002385 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2386 INCREMENTAL_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388 /* Only stop and start the controller if it isn't already
Jan Ceuleers0977f812012-06-05 03:42:12 +00002389 * stopped, and we changed something
2390 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2392 stop_gfar(dev);
2393
2394 priv->rx_buffer_size = tempsize;
2395
2396 dev->mtu = new_mtu;
2397
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002398 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2399 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
2401 /* If the mtu is larger than the max size for standard
2402 * ethernet frames (ie, a jumbo frame), then set maccfg2
Jan Ceuleers0977f812012-06-05 03:42:12 +00002403 * to allow huge frames, and to check the length
2404 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002405 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Anton Vorontsov7d350972010-06-30 06:39:12 +00002407 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002408 gfar_has_errata(priv, GFAR_ERRATA_74))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2410 else
2411 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2412
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002413 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
2415 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2416 startup_gfar(dev);
2417
2418 return 0;
2419}
2420
Sebastian Siewiorab939902008-08-19 21:12:45 +02002421/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 * transmitted after a set amount of time.
2423 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002424 * starting over will fix the problem.
2425 */
2426static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002428 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002429 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002430 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431
2432 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002433 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 stop_gfar(dev);
2435 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002436 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 }
2438
David S. Miller263ba322008-07-15 03:47:41 -07002439 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440}
2441
Sebastian Siewiorab939902008-08-19 21:12:45 +02002442static void gfar_timeout(struct net_device *dev)
2443{
2444 struct gfar_private *priv = netdev_priv(dev);
2445
2446 dev->stats.tx_errors++;
2447 schedule_work(&priv->reset_task);
2448}
2449
Eran Libertyacbc0f02010-07-07 15:54:54 -07002450static void gfar_align_skb(struct sk_buff *skb)
2451{
2452 /* We need the data buffer to be aligned properly. We will reserve
2453 * as many bytes as needed to align the data properly
2454 */
2455 skb_reserve(skb, RXBUF_ALIGNMENT -
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002456 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
Eran Libertyacbc0f02010-07-07 15:54:54 -07002457}
2458
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459/* Interrupt Handler for Transmit complete */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002460static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002462 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002463 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002464 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002465 struct gfar_priv_rx_q *rx_queue = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002466 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002467 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002468 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002469 struct sk_buff *skb;
2470 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002471 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002472 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002473 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002474 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002475 int tqi = tx_queue->qindex;
2476 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002477 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002478 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002480 rx_queue = priv->rx_queue[tqi];
2481 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002482 bdp = tx_queue->dirty_tx;
2483 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002484
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002485 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002486 unsigned long flags;
2487
Dai Haruki4669bc92008-12-17 16:51:04 -08002488 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002489
Jan Ceuleers0977f812012-06-05 03:42:12 +00002490 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002491 * Also, we need to dma_unmap_single() the TxPAL.
2492 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002493 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002494 nr_txbds = frags + 2;
2495 else
2496 nr_txbds = frags + 1;
2497
2498 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002499
2500 lstatus = lbdp->lstatus;
2501
2502 /* Only clean completed frames */
2503 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002504 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 break;
2506
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002507 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002508 next = next_txbd(bdp, base, tx_ring_size);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002509 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002510 } else
2511 buflen = bdp->length;
2512
2513 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002514 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002515
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002516 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002517 struct skb_shared_hwtstamps shhwtstamps;
2518 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002519
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002520 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2521 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002522 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002523 skb_tstamp_tx(skb, &shhwtstamps);
2524 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2525 bdp = next;
2526 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002527
2528 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2529 bdp = next_txbd(bdp, base, tx_ring_size);
2530
2531 for (i = 0; i < frags; i++) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002532 dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2533 bdp->length, DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08002534 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2535 bdp = next_txbd(bdp, base, tx_ring_size);
2536 }
2537
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002538 bytes_sent += skb->len;
2539
Eric Dumazetacb600d2012-10-05 06:23:55 +00002540 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002541
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002542 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002543
2544 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002545 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002546
Dai Harukid080cd62008-04-09 19:37:51 -05002547 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002548 spin_lock_irqsave(&tx_queue->txlock, flags);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002549 tx_queue->num_txbdfree += nr_txbds;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002550 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
Dai Haruki4669bc92008-12-17 16:51:04 -08002553 /* If we freed a buffer, we can restart transmission, if necessary */
Paul Gortmaker5407b14c2012-03-18 17:11:22 -04002554 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002555 netif_wake_subqueue(dev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
Dai Haruki4669bc92008-12-17 16:51:04 -08002557 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002558 tx_queue->skb_dirtytx = skb_dirtytx;
2559 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002561 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2562
Dai Harukid080cd62008-04-09 19:37:51 -05002563 return howmany;
2564}
2565
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002566static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002567{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002568 unsigned long flags;
2569
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002570 spin_lock_irqsave(&gfargrp->grplock, flags);
2571 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002572 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002573 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002574 } else {
Jan Ceuleers0977f812012-06-05 03:42:12 +00002575 /* Clear IEVENT, so interrupts aren't called again
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002576 * because of the packets that have already arrived.
2577 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002578 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002579 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002580 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002581
Dai Haruki8c7396a2008-12-17 16:52:00 -08002582}
2583
Dai Harukid080cd62008-04-09 19:37:51 -05002584/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002585static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002586{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002587 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 return IRQ_HANDLED;
2589}
2590
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002591static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002592 struct sk_buff *skb)
Andy Fleming815b97c2008-04-22 17:18:29 -05002593{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002594 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002595 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002596 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002597
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002598 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2599 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002600 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002601}
2602
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002603static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002604{
2605 struct gfar_private *priv = netdev_priv(dev);
Eric Dumazetacb600d2012-10-05 06:23:55 +00002606 struct sk_buff *skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002607
2608 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2609 if (!skb)
2610 return NULL;
2611
2612 gfar_align_skb(skb);
2613
2614 return skb;
2615}
Andy Fleming815b97c2008-04-22 17:18:29 -05002616
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002617struct sk_buff *gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618{
Eric Dumazetacb600d2012-10-05 06:23:55 +00002619 return gfar_alloc_skb(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620}
2621
Li Yang298e1a92007-10-16 14:18:13 +08002622static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623{
Li Yang298e1a92007-10-16 14:18:13 +08002624 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002625 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 struct gfar_extra_stats *estats = &priv->extra_stats;
2627
Jan Ceuleers0977f812012-06-05 03:42:12 +00002628 /* If the packet was truncated, none of the other errors matter */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 if (status & RXBD_TRUNCATED) {
2630 stats->rx_length_errors++;
2631
2632 estats->rx_trunc++;
2633
2634 return;
2635 }
2636 /* Count the errors, if there were any */
2637 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2638 stats->rx_length_errors++;
2639
2640 if (status & RXBD_LARGE)
2641 estats->rx_large++;
2642 else
2643 estats->rx_short++;
2644 }
2645 if (status & RXBD_NONOCTET) {
2646 stats->rx_frame_errors++;
2647 estats->rx_nonoctet++;
2648 }
2649 if (status & RXBD_CRCERR) {
2650 estats->rx_crcerr++;
2651 stats->rx_crc_errors++;
2652 }
2653 if (status & RXBD_OVERRUN) {
2654 estats->rx_overrun++;
2655 stats->rx_crc_errors++;
2656 }
2657}
2658
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002659irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002661 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 return IRQ_HANDLED;
2663}
2664
Kumar Gala0bbaf062005-06-20 10:54:21 -05002665static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2666{
2667 /* If valid headers were found, and valid sums
2668 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00002669 * checksumming is necessary. Otherwise, it is [FIXME]
2670 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002671 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002672 skb->ip_summed = CHECKSUM_UNNECESSARY;
2673 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002674 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002675}
2676
2677
Jan Ceuleers0977f812012-06-05 03:42:12 +00002678/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002680 int amount_pull, struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681{
2682 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002683 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002685 gro_result_t ret;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002686
Dai Haruki2c2db482008-12-16 15:31:15 -08002687 /* fcb is at the beginning if exists */
2688 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
Jan Ceuleers0977f812012-06-05 03:42:12 +00002690 /* Remove the FCB from the skb
2691 * Remove the padded bytes, if there are any
2692 */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002693 if (amount_pull) {
2694 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002695 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002696 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002697
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002698 /* Get receive timestamp from the skb */
2699 if (priv->hwts_rx_en) {
2700 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2701 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002702
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002703 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2704 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2705 }
2706
2707 if (priv->padding)
2708 skb_pull(skb, priv->padding);
2709
Michał Mirosław8b3afe92011-04-15 04:50:50 +00002710 if (dev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08002711 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002712
Dai Haruki2c2db482008-12-16 15:31:15 -08002713 /* Tell the skb what kind of packet this is */
2714 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002715
Jan Ceuleers0977f812012-06-05 03:42:12 +00002716 /* There's need to check for NETIF_F_HW_VLAN_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07002717 * Even if vlan rx accel is disabled, on some chips
2718 * RXFCB_VLN is pseudo randomly set.
2719 */
2720 if (dev->features & NETIF_F_HW_VLAN_RX &&
2721 fcb->flags & RXFCB_VLN)
Jiri Pirko87c288c2011-07-20 04:54:19 +00002722 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2723
Dai Haruki2c2db482008-12-16 15:31:15 -08002724 /* Send the packet up the stack */
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002725 ret = napi_gro_receive(napi, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002727 if (GRO_DROP == ret)
Dai Haruki2c2db482008-12-16 15:31:15 -08002728 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729
2730 return 0;
2731}
2732
2733/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002734 * until the budget/quota has been reached. Returns the number
2735 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002737int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002739 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002740 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002742 int pkt_len;
2743 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 int howmany = 0;
2745 struct gfar_private *priv = netdev_priv(dev);
2746
2747 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002748 bdp = rx_queue->cur_rx;
2749 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002751 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
Dai Haruki2c2db482008-12-16 15:31:15 -08002752
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002754 struct sk_buff *newskb;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002755
Scott Wood3b6330c2007-05-16 15:06:59 -05002756 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002757
2758 /* Add another skb for the future */
2759 newskb = gfar_new_skb(dev);
2760
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002761 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Kumar Gala48268572009-03-18 23:28:22 -07002763 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002764 priv->rx_buffer_size, DMA_FROM_DEVICE);
Andy Fleming81183052008-11-12 10:07:11 -06002765
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002766 if (unlikely(!(bdp->status & RXBD_ERR) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002767 bdp->length > priv->rx_buffer_size))
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002768 bdp->status = RXBD_LARGE;
2769
Andy Fleming815b97c2008-04-22 17:18:29 -05002770 /* We drop the frame if we failed to allocate a new buffer */
2771 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002772 bdp->status & RXBD_ERR)) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002773 count_errors(bdp->status, dev);
2774
2775 if (unlikely(!newskb))
2776 newskb = skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002777 else if (skb)
Eric Dumazetacb600d2012-10-05 06:23:55 +00002778 dev_kfree_skb(skb);
Andy Fleming815b97c2008-04-22 17:18:29 -05002779 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002781 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 howmany++;
2783
Dai Haruki2c2db482008-12-16 15:31:15 -08002784 if (likely(skb)) {
2785 pkt_len = bdp->length - ETH_FCS_LEN;
2786 /* Remove the FCS from the packet length */
2787 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002788 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002789 skb_record_rx_queue(skb, rx_queue->qindex);
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002790 gfar_process_frame(dev, skb, amount_pull,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002791 &rx_queue->grp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
Dai Haruki2c2db482008-12-16 15:31:15 -08002793 } else {
Joe Perches59deab22011-06-14 08:57:47 +00002794 netif_warn(priv, rx_err, dev, "Missing skb!\n");
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002795 rx_queue->stats.rx_dropped++;
Dai Haruki2c2db482008-12-16 15:31:15 -08002796 priv->extra_stats.rx_skbmissing++;
2797 }
2798
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 }
2800
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002801 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
Andy Fleming815b97c2008-04-22 17:18:29 -05002803 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002804 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805
2806 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002807 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
2809 /* update to point at the next skb */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002810 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2811 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 }
2813
2814 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002815 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 return howmany;
2818}
2819
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002820static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002822 struct gfar_priv_grp *gfargrp =
2823 container_of(napi, struct gfar_priv_grp, napi);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002824 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002825 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002826 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002827 struct gfar_priv_rx_q *rx_queue = NULL;
2828 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00002829 int tx_cleaned = 0, i, left_over_budget = budget;
2830 unsigned long serviced_queues = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002831 int num_queues = 0;
Dai Harukid080cd62008-04-09 19:37:51 -05002832
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002833 num_queues = gfargrp->num_rx_queues;
2834 budget_per_queue = budget/num_queues;
2835
Dai Haruki8c7396a2008-12-17 16:52:00 -08002836 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00002837 * because of the packets that have already arrived
2838 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002839 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002840
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002841 while (num_queues && left_over_budget) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002842 budget_per_queue = left_over_budget/num_queues;
2843 left_over_budget = 0;
2844
Akinobu Mita984b3f52010-03-05 13:41:37 -08002845 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002846 if (test_bit(i, &serviced_queues))
2847 continue;
2848 rx_queue = priv->rx_queue[i];
2849 tx_queue = priv->tx_queue[rx_queue->qindex];
2850
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002851 tx_cleaned += gfar_clean_tx_ring(tx_queue);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002852 rx_cleaned_per_queue =
2853 gfar_clean_rx_ring(rx_queue, budget_per_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002854 rx_cleaned += rx_cleaned_per_queue;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002855 if (rx_cleaned_per_queue < budget_per_queue) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002856 left_over_budget = left_over_budget +
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002857 (budget_per_queue -
2858 rx_cleaned_per_queue);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002859 set_bit(i, &serviced_queues);
2860 num_queues--;
2861 }
2862 }
Dai Harukid080cd62008-04-09 19:37:51 -05002863 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
Andy Fleming42199882008-12-17 16:52:30 -08002865 if (tx_cleaned)
2866 return budget;
2867
2868 if (rx_cleaned < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002869 napi_complete(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
2871 /* Clear the halt bit in RSTAT */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002872 gfar_write(&regs->rstat, gfargrp->rstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002874 gfar_write(&regs->imask, IMASK_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Jan Ceuleers0977f812012-06-05 03:42:12 +00002876 /* If we are coalescing interrupts, update the timer
2877 * Otherwise, clear it
2878 */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002879 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2880 gfargrp->tx_bit_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 }
2882
Andy Fleming42199882008-12-17 16:52:30 -08002883 return rx_cleaned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002886#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00002887/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002888 * without having to re-enable interrupts. It's not called while
2889 * the interrupt routine is executing.
2890 */
2891static void gfar_netpoll(struct net_device *dev)
2892{
2893 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002894 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002895
2896 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002897 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002898 for (i = 0; i < priv->num_grps; i++) {
2899 disable_irq(priv->gfargrp[i].interruptTransmit);
2900 disable_irq(priv->gfargrp[i].interruptReceive);
2901 disable_irq(priv->gfargrp[i].interruptError);
2902 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002903 &priv->gfargrp[i]);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002904 enable_irq(priv->gfargrp[i].interruptError);
2905 enable_irq(priv->gfargrp[i].interruptReceive);
2906 enable_irq(priv->gfargrp[i].interruptTransmit);
2907 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002908 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002909 for (i = 0; i < priv->num_grps; i++) {
2910 disable_irq(priv->gfargrp[i].interruptTransmit);
2911 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002912 &priv->gfargrp[i]);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002913 enable_irq(priv->gfargrp[i].interruptTransmit);
Anton Vorontsov43de0042009-12-09 02:52:19 -08002914 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002915 }
2916}
2917#endif
2918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002920static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002922 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
2924 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002925 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002928 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002929 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
2931 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002932 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002933 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002935 /* Check for errors */
2936 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002937 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938
2939 return IRQ_HANDLED;
2940}
2941
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942/* Called every time the controller might need to be made
2943 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002944 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 * function converts those variables into the appropriate
2946 * register values, and can bring down the device if needed.
2947 */
2948static void adjust_link(struct net_device *dev)
2949{
2950 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002951 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002952 unsigned long flags;
2953 struct phy_device *phydev = priv->phydev;
2954 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002956 local_irq_save(flags);
2957 lock_tx_qs(priv);
2958
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002959 if (phydev->link) {
2960 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002961 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002962
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 /* Now we make sure that we can be in full duplex mode.
Jan Ceuleers0977f812012-06-05 03:42:12 +00002964 * If not, we operate in half-duplex mode.
2965 */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002966 if (phydev->duplex != priv->oldduplex) {
2967 new_state = 1;
2968 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002970 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002973 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 }
2975
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002976 if (phydev->speed != priv->oldspeed) {
2977 new_state = 1;
2978 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 tempval =
2981 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08002982
2983 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 break;
2985 case 100:
2986 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 tempval =
2988 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002989
2990 /* Reduced mode distinguishes
Jan Ceuleers0977f812012-06-05 03:42:12 +00002991 * between 10 and 100
2992 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002993 if (phydev->speed == SPEED_100)
2994 ecntrl |= ECNTRL_R100;
2995 else
2996 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 break;
2998 default:
Joe Perches59deab22011-06-14 08:57:47 +00002999 netif_warn(priv, link, dev,
3000 "Ack! Speed (%d) is not 10/100/1000!\n",
3001 phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 break;
3003 }
3004
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003005 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 }
3007
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003008 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003009 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003010
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003012 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003015 } else if (priv->oldlink) {
3016 new_state = 1;
3017 priv->oldlink = 0;
3018 priv->oldspeed = 0;
3019 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003022 if (new_state && netif_msg_link(priv))
3023 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003024 unlock_tx_qs(priv);
3025 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003026}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
3028/* Update the hash table based on the current list of multicast
3029 * addresses we subscribe to. Also, change the promiscuity of
3030 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003031 * whenever dev->flags is changed
3032 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033static void gfar_set_multi(struct net_device *dev)
3034{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003035 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003037 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 u32 tempval;
3039
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003040 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 /* Set RCTRL to PROM */
3042 tempval = gfar_read(&regs->rctrl);
3043 tempval |= RCTRL_PROM;
3044 gfar_write(&regs->rctrl, tempval);
3045 } else {
3046 /* Set RCTRL to not PROM */
3047 tempval = gfar_read(&regs->rctrl);
3048 tempval &= ~(RCTRL_PROM);
3049 gfar_write(&regs->rctrl, tempval);
3050 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003051
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003052 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003054 gfar_write(&regs->igaddr0, 0xffffffff);
3055 gfar_write(&regs->igaddr1, 0xffffffff);
3056 gfar_write(&regs->igaddr2, 0xffffffff);
3057 gfar_write(&regs->igaddr3, 0xffffffff);
3058 gfar_write(&regs->igaddr4, 0xffffffff);
3059 gfar_write(&regs->igaddr5, 0xffffffff);
3060 gfar_write(&regs->igaddr6, 0xffffffff);
3061 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 gfar_write(&regs->gaddr0, 0xffffffff);
3063 gfar_write(&regs->gaddr1, 0xffffffff);
3064 gfar_write(&regs->gaddr2, 0xffffffff);
3065 gfar_write(&regs->gaddr3, 0xffffffff);
3066 gfar_write(&regs->gaddr4, 0xffffffff);
3067 gfar_write(&regs->gaddr5, 0xffffffff);
3068 gfar_write(&regs->gaddr6, 0xffffffff);
3069 gfar_write(&regs->gaddr7, 0xffffffff);
3070 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003071 int em_num;
3072 int idx;
3073
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003075 gfar_write(&regs->igaddr0, 0x0);
3076 gfar_write(&regs->igaddr1, 0x0);
3077 gfar_write(&regs->igaddr2, 0x0);
3078 gfar_write(&regs->igaddr3, 0x0);
3079 gfar_write(&regs->igaddr4, 0x0);
3080 gfar_write(&regs->igaddr5, 0x0);
3081 gfar_write(&regs->igaddr6, 0x0);
3082 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 gfar_write(&regs->gaddr0, 0x0);
3084 gfar_write(&regs->gaddr1, 0x0);
3085 gfar_write(&regs->gaddr2, 0x0);
3086 gfar_write(&regs->gaddr3, 0x0);
3087 gfar_write(&regs->gaddr4, 0x0);
3088 gfar_write(&regs->gaddr5, 0x0);
3089 gfar_write(&regs->gaddr6, 0x0);
3090 gfar_write(&regs->gaddr7, 0x0);
3091
Andy Fleming7f7f5312005-11-11 12:38:59 -06003092 /* If we have extended hash tables, we need to
3093 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003094 * setting them
3095 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003096 if (priv->extended_hash) {
3097 em_num = GFAR_EM_NUM + 1;
3098 gfar_clear_exact_match(dev);
3099 idx = 1;
3100 } else {
3101 idx = 0;
3102 em_num = 0;
3103 }
3104
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003105 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106 return;
3107
3108 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003109 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003110 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003111 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003112 idx++;
3113 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003114 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 }
3116 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117}
3118
Andy Fleming7f7f5312005-11-11 12:38:59 -06003119
3120/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003121 * don't interfere with normal reception
3122 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003123static void gfar_clear_exact_match(struct net_device *dev)
3124{
3125 int idx;
Joe Perches6a3c9102011-11-16 09:38:02 +00003126 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003127
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003128 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003129 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003130}
3131
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132/* Set the appropriate hash bit for the given addr */
3133/* The algorithm works like so:
3134 * 1) Take the Destination Address (ie the multicast address), and
3135 * do a CRC on it (little endian), and reverse the bits of the
3136 * result.
3137 * 2) Use the 8 most significant bits as a hash into a 256-entry
3138 * table. The table is controlled through 8 32-bit registers:
3139 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3140 * gaddr7. This means that the 3 most significant bits in the
3141 * hash index which gaddr register to use, and the 5 other bits
3142 * indicate which bit (assuming an IBM numbering scheme, which
3143 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003144 * the entry.
3145 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3147{
3148 u32 tempval;
3149 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c9102011-11-16 09:38:02 +00003150 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003151 int width = priv->hash_width;
3152 u8 whichbit = (result >> (32 - width)) & 0x1f;
3153 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 u32 value = (1 << (31-whichbit));
3155
Kumar Gala0bbaf062005-06-20 10:54:21 -05003156 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003158 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159}
3160
Andy Fleming7f7f5312005-11-11 12:38:59 -06003161
3162/* There are multiple MAC Address register pairs on some controllers
3163 * This function sets the numth pair to a given address
3164 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003165static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3166 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003167{
3168 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003169 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003170 int idx;
Joe Perches6a3c9102011-11-16 09:38:02 +00003171 char tmpbuf[ETH_ALEN];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003172 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003173 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003174
3175 macptr += num*2;
3176
Jan Ceuleers0977f812012-06-05 03:42:12 +00003177 /* Now copy it into the mac registers backwards, cuz
3178 * little endian is silly
3179 */
Joe Perches6a3c9102011-11-16 09:38:02 +00003180 for (idx = 0; idx < ETH_ALEN; idx++)
3181 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003182
3183 gfar_write(macptr, *((u32 *) (tmpbuf)));
3184
3185 tempval = *((u32 *) (tmpbuf + 4));
3186
3187 gfar_write(macptr+1, tempval);
3188}
3189
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003191static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003193 struct gfar_priv_grp *gfargrp = grp_id;
3194 struct gfar __iomem *regs = gfargrp->regs;
3195 struct gfar_private *priv= gfargrp->priv;
3196 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197
3198 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003199 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200
3201 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003202 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003203
3204 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003205 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003206 (events & IEVENT_MAG))
3207 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208
3209 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003210 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003211 netdev_dbg(dev,
3212 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003213 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
3215 /* Update the error counters */
3216 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003217 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
3219 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003220 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003222 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003224 unsigned long flags;
3225
Joe Perches59deab22011-06-14 08:57:47 +00003226 netif_dbg(priv, tx_err, dev,
3227 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003228 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 priv->extra_stats.tx_underrun++;
3230
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003231 local_irq_save(flags);
3232 lock_tx_qs(priv);
3233
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003235 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003236
3237 unlock_tx_qs(priv);
3238 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 }
Joe Perches59deab22011-06-14 08:57:47 +00003240 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 }
3242 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003243 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 priv->extra_stats.rx_bsy++;
3245
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003246 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
Joe Perches59deab22011-06-14 08:57:47 +00003248 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3249 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 }
3251 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003252 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 priv->extra_stats.rx_babr++;
3254
Joe Perches59deab22011-06-14 08:57:47 +00003255 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 }
3257 if (events & IEVENT_EBERR) {
3258 priv->extra_stats.eberr++;
Joe Perches59deab22011-06-14 08:57:47 +00003259 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 }
Joe Perches59deab22011-06-14 08:57:47 +00003261 if (events & IEVENT_RXC)
3262 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263
3264 if (events & IEVENT_BABT) {
3265 priv->extra_stats.tx_babt++;
Joe Perches59deab22011-06-14 08:57:47 +00003266 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 }
3268 return IRQ_HANDLED;
3269}
3270
Andy Flemingb31a1d82008-12-16 15:29:15 -08003271static struct of_device_id gfar_match[] =
3272{
3273 {
3274 .type = "network",
3275 .compatible = "gianfar",
3276 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003277 {
3278 .compatible = "fsl,etsec2",
3279 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003280 {},
3281};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003282MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003283
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003285static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003286 .driver = {
3287 .name = "fsl-gianfar",
3288 .owner = THIS_MODULE,
3289 .pm = GFAR_PM_OPS,
3290 .of_match_table = gfar_match,
3291 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 .probe = gfar_probe,
3293 .remove = gfar_remove,
3294};
3295
Axel Lindb62f682011-11-27 16:44:17 +00003296module_platform_driver(gfar_driver);