blob: 385de80643717d583b919d25bb347104edcf60d9 [file] [log] [blame]
Jan Ceuleers0977f812012-06-05 03:42:12 +00001/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06004 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Claudiu Manoil20862782014-02-17 12:53:14 +020012 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080028 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050033 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050038 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040042 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020044 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050045 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
Joe Perches59deab22011-06-14 08:57:47 +000064#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050077#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/spinlock.h>
79#include <linux/mm.h>
Rob Herring5af50732013-09-17 14:28:33 -050080#include <linux/of_address.h>
81#include <linux/of_irq.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Anton Vorontsov7d350972010-06-30 06:39:12 +000091#include <asm/reg.h>
Claudiu Manoil2969b1f2013-10-09 20:20:41 +030092#include <asm/mpc85xx.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <asm/irq.h>
94#include <asm/uaccess.h>
95#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/dma-mapping.h>
97#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040098#include <linux/mii.h>
99#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -0800100#include <linux/phy_fixed.h>
101#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700102#include <linux/of_net.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Andy Fleming7f7f5312005-11-11 12:38:59 -0600108const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static int gfar_enet_open(struct net_device *dev);
111static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200112static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113static void gfar_timeout(struct net_device *dev);
114static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500115struct sk_buff *gfar_new_skb(struct net_device *dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000117 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static int gfar_set_mac_address(struct net_device *dev);
119static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100120static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123static void adjust_link(struct net_device *dev);
124static void init_registers(struct net_device *dev);
125static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700126static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600127static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400128static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129static void gfar_set_multi(struct net_device *dev);
130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500131static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700132static int gfar_poll(struct napi_struct *napi, int budget);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +0300133static int gfar_poll_sq(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300134#ifdef CONFIG_NET_POLL_CONTROLLER
135static void gfar_netpoll(struct net_device *dev);
136#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000137int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
Claudiu Manoilc233cf402013-03-19 07:40:02 +0000138static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Claudiu Manoil61db26c2013-02-14 05:00:05 +0000139static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
140 int amount_pull, struct napi_struct *napi);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600141void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500142static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600143void gfar_start(struct net_device *dev);
144static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800145static void gfar_set_mac_for_addr(struct net_device *dev, int num,
146 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149MODULE_AUTHOR("Freescale Semiconductor, Inc");
150MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151MODULE_LICENSE("GPL");
152
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000153static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000154 dma_addr_t buf)
155{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000156 u32 lstatus;
157
158 bdp->bufPtr = buf;
159
160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000162 lstatus |= BD_LFLAG(RXBD_WRAP);
163
164 eieio();
165
166 bdp->lstatus = lstatus;
167}
168
Anton Vorontsov87283272009-10-12 06:00:39 +0000169static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000170{
Anton Vorontsov87283272009-10-12 06:00:39 +0000171 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000172 struct gfar_priv_tx_q *tx_queue = NULL;
173 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000174 struct txbd8 *txbdp;
175 struct rxbd8 *rxbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000176 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000177
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000178 for (i = 0; i < priv->num_tx_queues; i++) {
179 tx_queue = priv->tx_queue[i];
180 /* Initialize some variables in our dev structure */
181 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
182 tx_queue->dirty_tx = tx_queue->tx_bd_base;
183 tx_queue->cur_tx = tx_queue->tx_bd_base;
184 tx_queue->skb_curtx = 0;
185 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000186
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000187 /* Initialize Transmit Descriptor Ring */
188 txbdp = tx_queue->tx_bd_base;
189 for (j = 0; j < tx_queue->tx_ring_size; j++) {
190 txbdp->lstatus = 0;
191 txbdp->bufPtr = 0;
192 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000193 }
194
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000195 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp--;
197 txbdp->status |= TXBD_WRAP;
198 }
199
200 for (i = 0; i < priv->num_rx_queues; i++) {
201 rx_queue = priv->rx_queue[i];
202 rx_queue->cur_rx = rx_queue->rx_bd_base;
203 rx_queue->skb_currx = 0;
204 rxbdp = rx_queue->rx_bd_base;
205
206 for (j = 0; j < rx_queue->rx_ring_size; j++) {
207 struct sk_buff *skb = rx_queue->rx_skbuff[j];
208
209 if (skb) {
210 gfar_init_rxbdp(rx_queue, rxbdp,
211 rxbdp->bufPtr);
212 } else {
213 skb = gfar_new_skb(ndev);
214 if (!skb) {
Joe Perches59deab22011-06-14 08:57:47 +0000215 netdev_err(ndev, "Can't allocate RX buffers\n");
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +0000216 return -ENOMEM;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 }
222
223 rxbdp++;
224 }
225
Anton Vorontsov87283272009-10-12 06:00:39 +0000226 }
227
228 return 0;
229}
230
231static int gfar_alloc_skb_resources(struct net_device *ndev)
232{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000233 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000234 dma_addr_t addr;
235 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000236 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil369ec162013-02-14 05:00:02 +0000237 struct device *dev = priv->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000238 struct gfar_priv_tx_q *tx_queue = NULL;
239 struct gfar_priv_rx_q *rx_queue = NULL;
240
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000241 priv->total_tx_ring_size = 0;
242 for (i = 0; i < priv->num_tx_queues; i++)
243 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
244
245 priv->total_rx_ring_size = 0;
246 for (i = 0; i < priv->num_rx_queues; i++)
247 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000248
249 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000250 vaddr = dma_alloc_coherent(dev,
Joe Perchesd0320f72013-03-14 13:07:21 +0000251 (priv->total_tx_ring_size *
252 sizeof(struct txbd8)) +
253 (priv->total_rx_ring_size *
254 sizeof(struct rxbd8)),
255 &addr, GFP_KERNEL);
256 if (!vaddr)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000257 return -ENOMEM;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000258
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000259 for (i = 0; i < priv->num_tx_queues; i++) {
260 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000261 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000262 tx_queue->tx_bd_dma_base = addr;
263 tx_queue->dev = ndev;
264 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000265 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
266 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000267 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000268
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000269 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000270 for (i = 0; i < priv->num_rx_queues; i++) {
271 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000272 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000273 rx_queue->rx_bd_dma_base = addr;
274 rx_queue->dev = ndev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000275 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
276 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000277 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000278
279 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000280 for (i = 0; i < priv->num_tx_queues; i++) {
281 tx_queue = priv->tx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000282 tx_queue->tx_skbuff =
283 kmalloc_array(tx_queue->tx_ring_size,
284 sizeof(*tx_queue->tx_skbuff),
285 GFP_KERNEL);
286 if (!tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000287 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000288
289 for (k = 0; k < tx_queue->tx_ring_size; k++)
290 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000291 }
292
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000293 for (i = 0; i < priv->num_rx_queues; i++) {
294 rx_queue = priv->rx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000295 rx_queue->rx_skbuff =
296 kmalloc_array(rx_queue->rx_ring_size,
297 sizeof(*rx_queue->rx_skbuff),
298 GFP_KERNEL);
299 if (!rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000300 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000301
302 for (j = 0; j < rx_queue->rx_ring_size; j++)
303 rx_queue->rx_skbuff[j] = NULL;
304 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000305
Anton Vorontsov87283272009-10-12 06:00:39 +0000306 if (gfar_init_bds(ndev))
307 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000308
309 return 0;
310
311cleanup:
312 free_skb_resources(priv);
313 return -ENOMEM;
314}
315
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000316static void gfar_init_tx_rx_base(struct gfar_private *priv)
317{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000318 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000319 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000320 int i;
321
322 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000323 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000324 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000325 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000326 }
327
328 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000329 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000330 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000331 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000332 }
333}
334
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000335static void gfar_init_mac(struct net_device *ndev)
336{
337 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000338 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000339 u32 rctrl = 0;
340 u32 tctrl = 0;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000341
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000342 /* write the tx/rx base registers */
343 gfar_init_tx_rx_base(priv);
Anton Vorontsov32c513b2009-10-12 06:00:36 +0000344
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000345 /* Configure the coalescing support */
Claudiu Manoil800c6442013-03-19 07:40:05 +0000346 gfar_configure_coalescing_all(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000347
Claudiu Manoilba779712013-02-14 05:00:07 +0000348 /* set this when rx hw offload (TOE) functions are being used */
349 priv->uses_rxfcb = 0;
350
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000351 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000352 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000353 /* Program the RIR0 reg with the required distribution */
354 gfar_write(&regs->rir0, DEFAULT_RIR0);
355 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000356
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000357 /* Restore PROMISC mode */
358 if (ndev->flags & IFF_PROMISC)
359 rctrl |= RCTRL_PROM;
360
Claudiu Manoilba779712013-02-14 05:00:07 +0000361 if (ndev->features & NETIF_F_RXCSUM) {
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000362 rctrl |= RCTRL_CHECKSUMMING;
Claudiu Manoilba779712013-02-14 05:00:07 +0000363 priv->uses_rxfcb = 1;
364 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000365
366 if (priv->extended_hash) {
367 rctrl |= RCTRL_EXTHASH;
368
369 gfar_clear_exact_match(ndev);
370 rctrl |= RCTRL_EMEN;
371 }
372
373 if (priv->padding) {
374 rctrl &= ~RCTRL_PAL_MASK;
375 rctrl |= RCTRL_PADDING(priv->padding);
376 }
377
Manfred Rudigier97553f72010-06-11 01:49:05 +0000378 /* Enable HW time stamping if requested from user space */
Claudiu Manoilba779712013-02-14 05:00:07 +0000379 if (priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000380 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
Claudiu Manoilba779712013-02-14 05:00:07 +0000381 priv->uses_rxfcb = 1;
382 }
Manfred Rudigier97553f72010-06-11 01:49:05 +0000383
Patrick McHardyf6469682013-04-19 02:04:27 +0000384 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000385 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Claudiu Manoilba779712013-02-14 05:00:07 +0000386 priv->uses_rxfcb = 1;
387 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000388
389 /* Init rctrl based on our settings */
390 gfar_write(&regs->rctrl, rctrl);
391
392 if (ndev->features & NETIF_F_IP_CSUM)
393 tctrl |= TCTRL_INIT_CSUM;
394
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000395 if (priv->prio_sched_en)
396 tctrl |= TCTRL_TXSCHED_PRIO;
397 else {
398 tctrl |= TCTRL_TXSCHED_WRRS;
399 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
400 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
401 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000402
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000403 gfar_write(&regs->tctrl, tctrl);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000404}
405
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000406static struct net_device_stats *gfar_get_stats(struct net_device *dev)
407{
408 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000409 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
410 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000411 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000412
413 for (i = 0; i < priv->num_rx_queues; i++) {
414 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000415 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000416 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
417 }
418
419 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000420 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000421 dev->stats.rx_dropped = rx_dropped;
422
423 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000424 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
425 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000426 }
427
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000428 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000429 dev->stats.tx_packets = tx_packets;
430
431 return &dev->stats;
432}
433
Andy Fleming26ccfc32009-03-10 12:58:28 +0000434static const struct net_device_ops gfar_netdev_ops = {
435 .ndo_open = gfar_enet_open,
436 .ndo_start_xmit = gfar_start_xmit,
437 .ndo_stop = gfar_close,
438 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000439 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000440 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000441 .ndo_tx_timeout = gfar_timeout,
442 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000443 .ndo_get_stats = gfar_get_stats,
Ben Hutchings240c1022009-07-09 17:54:35 +0000444 .ndo_set_mac_address = eth_mac_addr,
445 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000446#ifdef CONFIG_NET_POLL_CONTROLLER
447 .ndo_poll_controller = gfar_netpoll,
448#endif
449};
450
Claudiu Manoilefeddce2014-02-17 12:53:17 +0200451static void gfar_ints_disable(struct gfar_private *priv)
452{
453 int i;
454 for (i = 0; i < priv->num_grps; i++) {
455 struct gfar __iomem *regs = priv->gfargrp[i].regs;
456 /* Clear IEVENT */
457 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
458
459 /* Initialize IMASK */
460 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
461 }
462}
463
464static void gfar_ints_enable(struct gfar_private *priv)
465{
466 int i;
467 for (i = 0; i < priv->num_grps; i++) {
468 struct gfar __iomem *regs = priv->gfargrp[i].regs;
469 /* Unmask the interrupts we look for */
470 gfar_write(&regs->imask, IMASK_DEFAULT);
471 }
472}
473
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000474void lock_rx_qs(struct gfar_private *priv)
475{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000476 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000477
478 for (i = 0; i < priv->num_rx_queues; i++)
479 spin_lock(&priv->rx_queue[i]->rxlock);
480}
481
482void lock_tx_qs(struct gfar_private *priv)
483{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000484 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000485
486 for (i = 0; i < priv->num_tx_queues; i++)
487 spin_lock(&priv->tx_queue[i]->txlock);
488}
489
490void unlock_rx_qs(struct gfar_private *priv)
491{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000492 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000493
494 for (i = 0; i < priv->num_rx_queues; i++)
495 spin_unlock(&priv->rx_queue[i]->rxlock);
496}
497
498void unlock_tx_qs(struct gfar_private *priv)
499{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000500 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000501
502 for (i = 0; i < priv->num_tx_queues; i++)
503 spin_unlock(&priv->tx_queue[i]->txlock);
504}
505
Claudiu Manoil20862782014-02-17 12:53:14 +0200506static int gfar_alloc_tx_queues(struct gfar_private *priv)
507{
508 int i;
509
510 for (i = 0; i < priv->num_tx_queues; i++) {
511 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
512 GFP_KERNEL);
513 if (!priv->tx_queue[i])
514 return -ENOMEM;
515
516 priv->tx_queue[i]->tx_skbuff = NULL;
517 priv->tx_queue[i]->qindex = i;
518 priv->tx_queue[i]->dev = priv->ndev;
519 spin_lock_init(&(priv->tx_queue[i]->txlock));
520 }
521 return 0;
522}
523
524static int gfar_alloc_rx_queues(struct gfar_private *priv)
525{
526 int i;
527
528 for (i = 0; i < priv->num_rx_queues; i++) {
529 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
530 GFP_KERNEL);
531 if (!priv->rx_queue[i])
532 return -ENOMEM;
533
534 priv->rx_queue[i]->rx_skbuff = NULL;
535 priv->rx_queue[i]->qindex = i;
536 priv->rx_queue[i]->dev = priv->ndev;
537 spin_lock_init(&(priv->rx_queue[i]->rxlock));
538 }
539 return 0;
540}
541
542static void gfar_free_tx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000543{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000544 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000545
546 for (i = 0; i < priv->num_tx_queues; i++)
547 kfree(priv->tx_queue[i]);
548}
549
Claudiu Manoil20862782014-02-17 12:53:14 +0200550static void gfar_free_rx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000551{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000552 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000553
554 for (i = 0; i < priv->num_rx_queues; i++)
555 kfree(priv->rx_queue[i]);
556}
557
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000558static void unmap_group_regs(struct gfar_private *priv)
559{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000560 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000561
562 for (i = 0; i < MAXGROUPS; i++)
563 if (priv->gfargrp[i].regs)
564 iounmap(priv->gfargrp[i].regs);
565}
566
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000567static void free_gfar_dev(struct gfar_private *priv)
568{
569 int i, j;
570
571 for (i = 0; i < priv->num_grps; i++)
572 for (j = 0; j < GFAR_NUM_IRQS; j++) {
573 kfree(priv->gfargrp[i].irqinfo[j]);
574 priv->gfargrp[i].irqinfo[j] = NULL;
575 }
576
577 free_netdev(priv->ndev);
578}
579
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000580static void disable_napi(struct gfar_private *priv)
581{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000582 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000583
584 for (i = 0; i < priv->num_grps; i++)
585 napi_disable(&priv->gfargrp[i].napi);
586}
587
588static void enable_napi(struct gfar_private *priv)
589{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000590 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000591
592 for (i = 0; i < priv->num_grps; i++)
593 napi_enable(&priv->gfargrp[i].napi);
594}
595
596static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000597 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000598{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000599 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000600 u32 *queue_mask;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000601 int i;
602
Paul Gortmaker7c1e7e92013-02-04 09:49:42 +0000603 for (i = 0; i < GFAR_NUM_IRQS; i++) {
604 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
605 GFP_KERNEL);
606 if (!grp->irqinfo[i])
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000607 return -ENOMEM;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000608 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000609
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000610 grp->regs = of_iomap(np, 0);
611 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000612 return -ENOMEM;
613
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000614 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000615
616 /* If we aren't the FEC we have multiple interrupts */
617 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000618 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
619 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
620 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
621 gfar_irq(grp, RX)->irq == NO_IRQ ||
622 gfar_irq(grp, ER)->irq == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000623 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000624 }
625
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000626 grp->priv = priv;
627 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000628 if (priv->mode == MQ_MG_MODE) {
629 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000630 grp->rx_bit_map = queue_mask ?
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000631 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
632 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000633 grp->tx_bit_map = queue_mask ?
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000634 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000635 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000636 grp->rx_bit_map = 0xFF;
637 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000638 }
Claudiu Manoil20862782014-02-17 12:53:14 +0200639
640 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
641 * right to left, so we need to revert the 8 bits to get the q index
642 */
643 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
644 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
645
646 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
647 * also assign queues to groups
648 */
649 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
650 grp->num_rx_queues++;
651 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
652 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
653 priv->rx_queue[i]->grp = grp;
654 }
655
656 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
657 grp->num_tx_queues++;
658 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
659 priv->tqueue |= (TQUEUE_EN0 >> i);
660 priv->tx_queue[i]->grp = grp;
661 }
662
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000663 priv->num_grps++;
664
665 return 0;
666}
667
Grant Likely2dc11582010-08-06 09:25:50 -0600668static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800669{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800670 const char *model;
671 const char *ctype;
672 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000673 int err = 0, i;
674 struct net_device *dev = NULL;
675 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700676 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000677 struct device_node *child = NULL;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800678 const u32 *stash;
679 const u32 *stash_len;
680 const u32 *stash_idx;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000681 unsigned int num_tx_qs, num_rx_qs;
682 u32 *tx_queues, *rx_queues;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800683
684 if (!np || !of_device_is_available(np))
685 return -ENODEV;
686
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000687 /* parse the num of tx and rx queues */
688 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
689 num_tx_qs = tx_queues ? *tx_queues : 1;
690
691 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000692 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
693 num_tx_qs, MAX_TX_QS);
694 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000695 return -EINVAL;
696 }
697
698 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
699 num_rx_qs = rx_queues ? *rx_queues : 1;
700
701 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000702 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
703 num_rx_qs, MAX_RX_QS);
704 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000705 return -EINVAL;
706 }
707
708 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
709 dev = *pdev;
710 if (NULL == dev)
711 return -ENOMEM;
712
713 priv = netdev_priv(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000714 priv->ndev = dev;
715
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000716 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000717 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000718 priv->num_rx_queues = num_rx_qs;
Claudiu Manoil20862782014-02-17 12:53:14 +0200719
720 err = gfar_alloc_tx_queues(priv);
721 if (err)
722 goto tx_alloc_failed;
723
724 err = gfar_alloc_rx_queues(priv);
725 if (err)
726 goto rx_alloc_failed;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800727
Jan Ceuleers0977f812012-06-05 03:42:12 +0000728 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700729 INIT_LIST_HEAD(&priv->rx_list.list);
730 priv->rx_list.count = 0;
731 mutex_init(&priv->rx_queue_access);
732
Andy Flemingb31a1d82008-12-16 15:29:15 -0800733 model = of_get_property(np, "model", NULL);
734
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000735 for (i = 0; i < MAXGROUPS; i++)
736 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800737
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000738 /* Parse and initialize group specific information */
739 if (of_device_is_compatible(np, "fsl,etsec2")) {
740 priv->mode = MQ_MG_MODE;
741 for_each_child_of_node(np, child) {
742 err = gfar_parse_group(child, priv, model);
743 if (err)
744 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800745 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000746 } else {
747 priv->mode = SQ_SG_MODE;
748 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000749 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000750 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800751 }
752
Andy Fleming4d7902f2009-02-04 16:43:44 -0800753 stash = of_get_property(np, "bd-stash", NULL);
754
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000755 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800756 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
757 priv->bd_stash_en = 1;
758 }
759
760 stash_len = of_get_property(np, "rx-stash-len", NULL);
761
762 if (stash_len)
763 priv->rx_stash_size = *stash_len;
764
765 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
766
767 if (stash_idx)
768 priv->rx_stash_index = *stash_idx;
769
770 if (stash_len || stash_idx)
771 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
772
Andy Flemingb31a1d82008-12-16 15:29:15 -0800773 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000774
Andy Flemingb31a1d82008-12-16 15:29:15 -0800775 if (mac_addr)
Joe Perches6a3c910c2011-11-16 09:38:02 +0000776 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800777
778 if (model && !strcasecmp(model, "TSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200779 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000780 FSL_GIANFAR_DEV_HAS_COALESCE |
781 FSL_GIANFAR_DEV_HAS_RMON |
782 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
783
Andy Flemingb31a1d82008-12-16 15:29:15 -0800784 if (model && !strcasecmp(model, "eTSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200785 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000786 FSL_GIANFAR_DEV_HAS_COALESCE |
787 FSL_GIANFAR_DEV_HAS_RMON |
788 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000789 FSL_GIANFAR_DEV_HAS_CSUM |
790 FSL_GIANFAR_DEV_HAS_VLAN |
791 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
792 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
793 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800794
795 ctype = of_get_property(np, "phy-connection-type", NULL);
796
797 /* We only care about rgmii-id. The rest are autodetected */
798 if (ctype && !strcmp(ctype, "rgmii-id"))
799 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
800 else
801 priv->interface = PHY_INTERFACE_MODE_MII;
802
803 if (of_get_property(np, "fsl,magic-packet", NULL))
804 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
805
Grant Likelyfe192a42009-04-25 12:53:12 +0000806 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800807
808 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000809 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800810
811 return 0;
812
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000813err_grp_init:
814 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +0200815rx_alloc_failed:
816 gfar_free_rx_queues(priv);
817tx_alloc_failed:
818 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000819 free_gfar_dev(priv);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800820 return err;
821}
822
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000823static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000824{
825 struct hwtstamp_config config;
826 struct gfar_private *priv = netdev_priv(netdev);
827
828 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
829 return -EFAULT;
830
831 /* reserved for future extensions */
832 if (config.flags)
833 return -EINVAL;
834
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000835 switch (config.tx_type) {
836 case HWTSTAMP_TX_OFF:
837 priv->hwts_tx_en = 0;
838 break;
839 case HWTSTAMP_TX_ON:
840 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
841 return -ERANGE;
842 priv->hwts_tx_en = 1;
843 break;
844 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000845 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000846 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000847
848 switch (config.rx_filter) {
849 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000850 if (priv->hwts_rx_en) {
851 stop_gfar(netdev);
852 priv->hwts_rx_en = 0;
853 startup_gfar(netdev);
854 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000855 break;
856 default:
857 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
858 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000859 if (!priv->hwts_rx_en) {
860 stop_gfar(netdev);
861 priv->hwts_rx_en = 1;
862 startup_gfar(netdev);
863 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000864 config.rx_filter = HWTSTAMP_FILTER_ALL;
865 break;
866 }
867
868 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
869 -EFAULT : 0;
870}
871
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000872static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
873{
874 struct hwtstamp_config config;
875 struct gfar_private *priv = netdev_priv(netdev);
876
877 config.flags = 0;
878 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
879 config.rx_filter = (priv->hwts_rx_en ?
880 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
881
882 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
883 -EFAULT : 0;
884}
885
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000886static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
887{
888 struct gfar_private *priv = netdev_priv(dev);
889
890 if (!netif_running(dev))
891 return -EINVAL;
892
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000893 if (cmd == SIOCSHWTSTAMP)
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000894 return gfar_hwtstamp_set(dev, rq);
895 if (cmd == SIOCGHWTSTAMP)
896 return gfar_hwtstamp_get(dev, rq);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000897
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000898 if (!priv->phydev)
899 return -ENODEV;
900
Richard Cochran28b04112010-07-17 08:48:55 +0000901 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000902}
903
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000904static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
905 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000906{
907 u32 rqfpr = FPR_FILER_MASK;
908 u32 rqfcr = 0x0;
909
910 rqfar--;
911 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000912 priv->ftp_rqfpr[rqfar] = rqfpr;
913 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000914 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
915
916 rqfar--;
917 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000918 priv->ftp_rqfpr[rqfar] = rqfpr;
919 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000920 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
921
922 rqfar--;
923 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
924 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000925 priv->ftp_rqfcr[rqfar] = rqfcr;
926 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000927 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
928
929 rqfar--;
930 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
931 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000932 priv->ftp_rqfcr[rqfar] = rqfcr;
933 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000934 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
935
936 return rqfar;
937}
938
939static void gfar_init_filer_table(struct gfar_private *priv)
940{
941 int i = 0x0;
942 u32 rqfar = MAX_FILER_IDX;
943 u32 rqfcr = 0x0;
944 u32 rqfpr = FPR_FILER_MASK;
945
946 /* Default rule */
947 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000948 priv->ftp_rqfcr[rqfar] = rqfcr;
949 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000950 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
951
952 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
953 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
954 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
955 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
956 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
957 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
958
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +0200959 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000960 priv->cur_filer_idx = rqfar;
961
962 /* Rest are masked rules */
963 rqfcr = RQFCR_CMP_NOMATCH;
964 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000965 priv->ftp_rqfcr[i] = rqfcr;
966 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000967 gfar_write_filer(priv, i, rqfcr, rqfpr);
968 }
969}
970
Claudiu Manoil2969b1f2013-10-09 20:20:41 +0300971static void __gfar_detect_errata_83xx(struct gfar_private *priv)
Anton Vorontsov7d350972010-06-30 06:39:12 +0000972{
Anton Vorontsov7d350972010-06-30 06:39:12 +0000973 unsigned int pvr = mfspr(SPRN_PVR);
974 unsigned int svr = mfspr(SPRN_SVR);
975 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
976 unsigned int rev = svr & 0xffff;
977
978 /* MPC8313 Rev 2.0 and higher; All MPC837x */
979 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000980 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +0000981 priv->errata |= GFAR_ERRATA_74;
982
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000983 /* MPC8313 and MPC837x all rev */
984 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000985 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +0000986 priv->errata |= GFAR_ERRATA_76;
987
Claudiu Manoil2969b1f2013-10-09 20:20:41 +0300988 /* MPC8313 Rev < 2.0 */
989 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
Alex Dubov4363c2fdd2011-03-16 17:57:13 +0000990 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil2969b1f2013-10-09 20:20:41 +0300991}
992
993static void __gfar_detect_errata_85xx(struct gfar_private *priv)
994{
995 unsigned int svr = mfspr(SPRN_SVR);
996
997 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
998 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil53fad772013-10-09 20:20:42 +0300999 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1000 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1001 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001002}
1003
1004static void gfar_detect_errata(struct gfar_private *priv)
1005{
1006 struct device *dev = &priv->ofdev->dev;
1007
1008 /* no plans to fix */
1009 priv->errata |= GFAR_ERRATA_A002;
1010
1011 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1012 __gfar_detect_errata_85xx(priv);
1013 else /* non-mpc85xx parts, i.e. e300 core based */
1014 __gfar_detect_errata_83xx(priv);
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001015
Anton Vorontsov7d350972010-06-30 06:39:12 +00001016 if (priv->errata)
1017 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1018 priv->errata);
1019}
1020
Claudiu Manoil20862782014-02-17 12:53:14 +02001021static void gfar_hw_init(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022{
Claudiu Manoil20862782014-02-17 12:53:14 +02001023 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001024 u32 tempval, attrs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025
1026 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001027 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
Andy Flemingb98ac702009-02-04 16:38:05 -08001029 /* We need to delay at least 3 TX clocks */
1030 udelay(2);
1031
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001032 /* the soft reset bit is not self-resetting, so we need to
1033 * clear it before resuming normal operation
1034 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001035 gfar_write(&regs->maccfg1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001038 tempval = MACCFG2_INIT_SETTINGS;
1039 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1040 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1041 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
1043 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001044 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001046 /* Set the extraction length and index */
1047 attrs = ATTRELI_EL(priv->rx_stash_size) |
1048 ATTRELI_EI(priv->rx_stash_index);
1049
1050 gfar_write(&regs->attreli, attrs);
1051
1052 /* Start with defaults, and add stashing
1053 * depending on driver parameters
1054 */
1055 attrs = ATTR_INIT_SETTINGS;
1056
1057 if (priv->bd_stash_en)
1058 attrs |= ATTR_BDSTASH;
1059
1060 if (priv->rx_stash_size != 0)
1061 attrs |= ATTR_BUFSTASH;
1062
1063 gfar_write(&regs->attr, attrs);
1064
1065 /* FIFO configs */
1066 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1067 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1068 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1069
Claudiu Manoil20862782014-02-17 12:53:14 +02001070 /* Program the interrupt steering regs, only for MG devices */
1071 if (priv->num_grps > 1)
1072 gfar_write_isrg(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Claudiu Manoil20862782014-02-17 12:53:14 +02001074 /* Enable all Rx/Tx queues after MAC reset */
1075 gfar_write(&regs->rqueue, priv->rqueue);
1076 gfar_write(&regs->tqueue, priv->tqueue);
1077}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Claudiu Manoil20862782014-02-17 12:53:14 +02001079static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
1080{
1081 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001082
Andy Flemingb31a1d82008-12-16 15:29:15 -08001083 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001084 priv->extended_hash = 1;
1085 priv->hash_width = 9;
1086
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001087 priv->hash_regs[0] = &regs->igaddr0;
1088 priv->hash_regs[1] = &regs->igaddr1;
1089 priv->hash_regs[2] = &regs->igaddr2;
1090 priv->hash_regs[3] = &regs->igaddr3;
1091 priv->hash_regs[4] = &regs->igaddr4;
1092 priv->hash_regs[5] = &regs->igaddr5;
1093 priv->hash_regs[6] = &regs->igaddr6;
1094 priv->hash_regs[7] = &regs->igaddr7;
1095 priv->hash_regs[8] = &regs->gaddr0;
1096 priv->hash_regs[9] = &regs->gaddr1;
1097 priv->hash_regs[10] = &regs->gaddr2;
1098 priv->hash_regs[11] = &regs->gaddr3;
1099 priv->hash_regs[12] = &regs->gaddr4;
1100 priv->hash_regs[13] = &regs->gaddr5;
1101 priv->hash_regs[14] = &regs->gaddr6;
1102 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001103
1104 } else {
1105 priv->extended_hash = 0;
1106 priv->hash_width = 8;
1107
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001108 priv->hash_regs[0] = &regs->gaddr0;
1109 priv->hash_regs[1] = &regs->gaddr1;
1110 priv->hash_regs[2] = &regs->gaddr2;
1111 priv->hash_regs[3] = &regs->gaddr3;
1112 priv->hash_regs[4] = &regs->gaddr4;
1113 priv->hash_regs[5] = &regs->gaddr5;
1114 priv->hash_regs[6] = &regs->gaddr6;
1115 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001116 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001117}
1118
1119/* Set up the ethernet device structure, private data,
1120 * and anything else we need before we start
1121 */
1122static int gfar_probe(struct platform_device *ofdev)
1123{
1124 struct net_device *dev = NULL;
1125 struct gfar_private *priv = NULL;
1126 int err = 0, i;
1127
1128 err = gfar_of_init(ofdev, &dev);
1129
1130 if (err)
1131 return err;
1132
1133 priv = netdev_priv(dev);
1134 priv->ndev = dev;
1135 priv->ofdev = ofdev;
1136 priv->dev = &ofdev->dev;
1137 SET_NETDEV_DEV(dev, &ofdev->dev);
1138
1139 spin_lock_init(&priv->bflock);
1140 INIT_WORK(&priv->reset_task, gfar_reset_task);
1141
1142 platform_set_drvdata(ofdev, priv);
1143
1144 gfar_detect_errata(priv);
1145
1146 /* Stop the DMA engine now, in case it was running before
1147 * (The firmware could have used it, and left it running).
1148 */
1149 gfar_halt(dev);
1150
1151 gfar_hw_init(priv);
1152
1153 /* Set the dev->base_addr to the gfar reg region */
1154 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1155
1156 /* Fill in the dev structure */
1157 dev->watchdog_timeo = TX_TIMEOUT;
1158 dev->mtu = 1500;
1159 dev->netdev_ops = &gfar_netdev_ops;
1160 dev->ethtool_ops = &gfar_ethtool_ops;
1161
1162 /* Register for napi ...We are registering NAPI for each grp */
1163 if (priv->mode == SQ_SG_MODE)
1164 netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
1165 GFAR_DEV_WEIGHT);
1166 else
1167 for (i = 0; i < priv->num_grps; i++)
1168 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1169 GFAR_DEV_WEIGHT);
1170
1171 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1172 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1173 NETIF_F_RXCSUM;
1174 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1175 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1176 }
1177
1178 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1179 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1180 NETIF_F_HW_VLAN_CTAG_RX;
1181 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1182 }
1183
1184 gfar_init_addr_hash_table(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001185
Claudiu Manoil532c37b2014-02-17 12:53:16 +02001186 /* Insert receive time stamps into padding alignment bytes */
1187 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1188 priv->padding = 8;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001189
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001190 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001191 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001192 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
1194 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001196 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001197 for (i = 0; i < priv->num_tx_queues; i++) {
1198 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1199 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1200 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1201 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1202 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001203
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001204 for (i = 0; i < priv->num_rx_queues; i++) {
1205 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1206 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1207 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Jan Ceuleers0977f812012-06-05 03:42:12 +00001210 /* always enable rx filer */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001211 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001212 /* Enable most messages by default */
1213 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001214 /* use pritority h/w tx queue scheduling for single queue devices */
1215 if (priv->num_tx_queues == 1)
1216 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001217
Trent Piephod3eab822008-10-02 11:12:24 +00001218 /* Carrier starts down, phylib will bring it up */
1219 netif_carrier_off(dev);
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 err = register_netdev(dev);
1222
1223 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001224 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 goto register_fail;
1226 }
1227
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001228 device_init_wakeup(&dev->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001229 priv->device_flags &
1230 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001231
Dai Harukic50a5d92008-12-17 16:51:32 -08001232 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001233 for (i = 0; i < priv->num_grps; i++) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001234 struct gfar_priv_grp *grp = &priv->gfargrp[i];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001235 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001236 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001237 dev->name, "_g", '0' + i, "_tx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001238 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001239 dev->name, "_g", '0' + i, "_rx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001240 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001241 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001242 } else
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001243 strcpy(gfar_irq(grp, TX)->name, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001244 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001245
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001246 /* Initialize the filer table */
1247 gfar_init_filer_table(priv);
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001250 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
Jan Ceuleers0977f812012-06-05 03:42:12 +00001252 /* Even more device info helps when determining which kernel
1253 * provided which set of benchmarks.
1254 */
Joe Perches59deab22011-06-14 08:57:47 +00001255 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001256 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001257 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1258 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001259 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001260 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1261 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263 return 0;
1264
1265register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001266 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001267 gfar_free_rx_queues(priv);
1268 gfar_free_tx_queues(priv);
Grant Likelyfe192a42009-04-25 12:53:12 +00001269 if (priv->phy_node)
1270 of_node_put(priv->phy_node);
1271 if (priv->tbi_node)
1272 of_node_put(priv->tbi_node);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001273 free_gfar_dev(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001274 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275}
1276
Grant Likely2dc11582010-08-06 09:25:50 -06001277static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001279 struct gfar_private *priv = platform_get_drvdata(ofdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Grant Likelyfe192a42009-04-25 12:53:12 +00001281 if (priv->phy_node)
1282 of_node_put(priv->phy_node);
1283 if (priv->tbi_node)
1284 of_node_put(priv->tbi_node);
1285
David S. Millerd9d8e042009-09-06 01:41:02 -07001286 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001287 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001288 gfar_free_rx_queues(priv);
1289 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001290 free_gfar_dev(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
1292 return 0;
1293}
1294
Scott Woodd87eb122008-07-11 18:04:45 -05001295#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001296
1297static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001298{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001299 struct gfar_private *priv = dev_get_drvdata(dev);
1300 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001301 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001302 unsigned long flags;
1303 u32 tempval;
1304
1305 int magic_packet = priv->wol_en &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001306 (priv->device_flags &
1307 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001308
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001309 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001310
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001311 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001312
1313 local_irq_save(flags);
1314 lock_tx_qs(priv);
1315 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001316
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001317 gfar_halt_nodisable(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001318
1319 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001320 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001321
1322 tempval &= ~MACCFG1_TX_EN;
1323
1324 if (!magic_packet)
1325 tempval &= ~MACCFG1_RX_EN;
1326
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001327 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001328
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001329 unlock_rx_qs(priv);
1330 unlock_tx_qs(priv);
1331 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001332
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001333 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001334
1335 if (magic_packet) {
1336 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001337 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001338
1339 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001340 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001341 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001342 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001343 } else {
1344 phy_stop(priv->phydev);
1345 }
1346 }
1347
1348 return 0;
1349}
1350
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001351static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001352{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001353 struct gfar_private *priv = dev_get_drvdata(dev);
1354 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001355 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001356 unsigned long flags;
1357 u32 tempval;
1358 int magic_packet = priv->wol_en &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001359 (priv->device_flags &
1360 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001361
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001362 if (!netif_running(ndev)) {
1363 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001364 return 0;
1365 }
1366
1367 if (!magic_packet && priv->phydev)
1368 phy_start(priv->phydev);
1369
1370 /* Disable Magic Packet mode, in case something
1371 * else woke us up.
1372 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001373 local_irq_save(flags);
1374 lock_tx_qs(priv);
1375 lock_rx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001376
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001377 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001378 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001379 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001380
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001381 gfar_start(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001382
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001383 unlock_rx_qs(priv);
1384 unlock_tx_qs(priv);
1385 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001386
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001387 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001388
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001389 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001390
1391 return 0;
1392}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001393
1394static int gfar_restore(struct device *dev)
1395{
1396 struct gfar_private *priv = dev_get_drvdata(dev);
1397 struct net_device *ndev = priv->ndev;
1398
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001399 if (!netif_running(ndev)) {
1400 netif_device_attach(ndev);
1401
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001402 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001403 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001404
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001405 if (gfar_init_bds(ndev)) {
1406 free_skb_resources(priv);
1407 return -ENOMEM;
1408 }
1409
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001410 init_registers(ndev);
1411 gfar_set_mac_address(ndev);
1412 gfar_init_mac(ndev);
1413 gfar_start(ndev);
1414
1415 priv->oldlink = 0;
1416 priv->oldspeed = 0;
1417 priv->oldduplex = -1;
1418
1419 if (priv->phydev)
1420 phy_start(priv->phydev);
1421
1422 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001423 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001424
1425 return 0;
1426}
1427
1428static struct dev_pm_ops gfar_pm_ops = {
1429 .suspend = gfar_suspend,
1430 .resume = gfar_resume,
1431 .freeze = gfar_suspend,
1432 .thaw = gfar_resume,
1433 .restore = gfar_restore,
1434};
1435
1436#define GFAR_PM_OPS (&gfar_pm_ops)
1437
Scott Woodd87eb122008-07-11 18:04:45 -05001438#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001439
1440#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001441
Scott Woodd87eb122008-07-11 18:04:45 -05001442#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001444/* Reads the controller's registers to determine what interface
1445 * connects it to the PHY.
1446 */
1447static phy_interface_t gfar_get_interface(struct net_device *dev)
1448{
1449 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001450 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001451 u32 ecntrl;
1452
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001453 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001454
1455 if (ecntrl & ECNTRL_SGMII_MODE)
1456 return PHY_INTERFACE_MODE_SGMII;
1457
1458 if (ecntrl & ECNTRL_TBI_MODE) {
1459 if (ecntrl & ECNTRL_REDUCED_MODE)
1460 return PHY_INTERFACE_MODE_RTBI;
1461 else
1462 return PHY_INTERFACE_MODE_TBI;
1463 }
1464
1465 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001466 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001467 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001468 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001469 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001470 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001471
Jan Ceuleers0977f812012-06-05 03:42:12 +00001472 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001473 * be set by the device tree or platform code.
1474 */
1475 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1476 return PHY_INTERFACE_MODE_RGMII_ID;
1477
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001478 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001479 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001480 }
1481
Andy Flemingb31a1d82008-12-16 15:29:15 -08001482 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001483 return PHY_INTERFACE_MODE_GMII;
1484
1485 return PHY_INTERFACE_MODE_MII;
1486}
1487
1488
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001489/* Initializes driver's PHY state, and attaches to the PHY.
1490 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 */
1492static int init_phy(struct net_device *dev)
1493{
1494 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001495 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001496 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001497 GFAR_SUPPORTED_GBIT : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001498 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
1500 priv->oldlink = 0;
1501 priv->oldspeed = 0;
1502 priv->oldduplex = -1;
1503
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001504 interface = gfar_get_interface(dev);
1505
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001506 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1507 interface);
1508 if (!priv->phydev)
1509 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1510 interface);
1511 if (!priv->phydev) {
1512 dev_err(&dev->dev, "could not attach to PHY\n");
1513 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Kapil Junejad3c12872007-05-11 18:25:11 -05001516 if (interface == PHY_INTERFACE_MODE_SGMII)
1517 gfar_configure_serdes(dev);
1518
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001519 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001520 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1521 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
1523 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524}
1525
Jan Ceuleers0977f812012-06-05 03:42:12 +00001526/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001527 * SERDES lynx PHY on the chip. We communicate with this PHY
1528 * through the MDIO bus on each controller, treating it as a
1529 * "normal" PHY at the address found in the TBIPA register. We assume
1530 * that the TBIPA register is valid. Either the MDIO bus code will set
1531 * it to a value that doesn't conflict with other PHYs on the bus, or the
1532 * value doesn't matter, as there are no other PHYs on the bus.
1533 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001534static void gfar_configure_serdes(struct net_device *dev)
1535{
1536 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001537 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001538
Grant Likelyfe192a42009-04-25 12:53:12 +00001539 if (!priv->tbi_node) {
1540 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1541 "device tree specify a tbi-handle\n");
1542 return;
1543 }
1544
1545 tbiphy = of_phy_find_device(priv->tbi_node);
1546 if (!tbiphy) {
1547 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001548 return;
1549 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001550
Jan Ceuleers0977f812012-06-05 03:42:12 +00001551 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001552 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1553 * everything for us? Resetting it takes the link down and requires
1554 * several seconds for it to come back.
1555 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001556 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001557 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001558
Paul Gortmakerd0313582008-04-17 00:08:10 -04001559 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001560 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001561
Grant Likelyfe192a42009-04-25 12:53:12 +00001562 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001563 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1564 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001565
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001566 phy_write(tbiphy, MII_BMCR,
1567 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1568 BMCR_SPEED1000);
Kapil Junejad3c12872007-05-11 18:25:11 -05001569}
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571static void init_registers(struct net_device *dev)
1572{
1573 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001574 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001576 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 /* Init hash registers to zero */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001579 gfar_write(&regs->igaddr0, 0);
1580 gfar_write(&regs->igaddr1, 0);
1581 gfar_write(&regs->igaddr2, 0);
1582 gfar_write(&regs->igaddr3, 0);
1583 gfar_write(&regs->igaddr4, 0);
1584 gfar_write(&regs->igaddr5, 0);
1585 gfar_write(&regs->igaddr6, 0);
1586 gfar_write(&regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001588 gfar_write(&regs->gaddr0, 0);
1589 gfar_write(&regs->gaddr1, 0);
1590 gfar_write(&regs->gaddr2, 0);
1591 gfar_write(&regs->gaddr3, 0);
1592 gfar_write(&regs->gaddr4, 0);
1593 gfar_write(&regs->gaddr5, 0);
1594 gfar_write(&regs->gaddr6, 0);
1595 gfar_write(&regs->gaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 /* Zero out the rmon mib registers if it has them */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001598 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001599 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600
1601 /* Mask off the CAM interrupts */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001602 gfar_write(&regs->rmon.cam1, 0xffffffff);
1603 gfar_write(&regs->rmon.cam2, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 }
1605
1606 /* Initialize the max receive buffer length */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001607 gfar_write(&regs->mrblr, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 /* Initialize the Minimum Frame Length Register */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001610 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611}
1612
Anton Vorontsov511d9342010-06-30 06:39:15 +00001613static int __gfar_is_rx_idle(struct gfar_private *priv)
1614{
1615 u32 res;
1616
Jan Ceuleers0977f812012-06-05 03:42:12 +00001617 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001618 * actually wait for IEVENT_GRSC flag.
1619 */
Claudiu Manoilad3660c2013-10-09 20:20:40 +03001620 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
Anton Vorontsov511d9342010-06-30 06:39:15 +00001621 return 0;
1622
Jan Ceuleers0977f812012-06-05 03:42:12 +00001623 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001624 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1625 * and the Rx can be safely reset.
1626 */
1627 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1628 res &= 0x7f807f80;
1629 if ((res & 0xffff) == (res >> 16))
1630 return 1;
1631
1632 return 0;
1633}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001634
1635/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -05001636static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637{
1638 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001639 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 u32 tempval;
1641
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001642 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001645 tempval = gfar_read(&regs->dmactrl);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001646 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1647 (DMACTRL_GRS | DMACTRL_GTS)) {
Anton Vorontsov511d9342010-06-30 06:39:15 +00001648 int ret;
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001651 gfar_write(&regs->dmactrl, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Anton Vorontsov511d9342010-06-30 06:39:15 +00001653 do {
1654 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1655 (IEVENT_GRSC | IEVENT_GTSC)) ==
1656 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1657 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1658 ret = __gfar_is_rx_idle(priv);
1659 } while (!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 }
Scott Woodd87eb122008-07-11 18:04:45 -05001661}
Scott Woodd87eb122008-07-11 18:04:45 -05001662
1663/* Halt the receive and transmit queues */
1664void gfar_halt(struct net_device *dev)
1665{
1666 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001667 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001668 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Scott Wood2a54adc2008-08-12 15:10:46 -05001670 gfar_halt_nodisable(dev);
1671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 /* Disable Rx and Tx */
1673 tempval = gfar_read(&regs->maccfg1);
1674 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1675 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001676}
1677
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001678static void free_grp_irqs(struct gfar_priv_grp *grp)
1679{
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001680 free_irq(gfar_irq(grp, TX)->irq, grp);
1681 free_irq(gfar_irq(grp, RX)->irq, grp);
1682 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001683}
1684
Kumar Gala0bbaf062005-06-20 10:54:21 -05001685void stop_gfar(struct net_device *dev)
1686{
1687 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001688 unsigned long flags;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001689 int i;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001690
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001691 phy_stop(priv->phydev);
1692
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001693
Kumar Gala0bbaf062005-06-20 10:54:21 -05001694 /* Lock it down */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001695 local_irq_save(flags);
1696 lock_tx_qs(priv);
1697 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001698
Kumar Gala0bbaf062005-06-20 10:54:21 -05001699 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001701 unlock_rx_qs(priv);
1702 unlock_tx_qs(priv);
1703 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
1705 /* Free the IRQs */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001706 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001707 for (i = 0; i < priv->num_grps; i++)
1708 free_grp_irqs(&priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001710 for (i = 0; i < priv->num_grps; i++)
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001711 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001712 &priv->gfargrp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 }
1714
1715 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716}
1717
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001718static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001721 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001722 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001724 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001726 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1727 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001728 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Claudiu Manoil369ec162013-02-14 05:00:02 +00001730 dma_unmap_single(priv->dev, txbdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001731 txbdp->length, DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001732 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001733 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001734 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001735 txbdp++;
Claudiu Manoil369ec162013-02-14 05:00:02 +00001736 dma_unmap_page(priv->dev, txbdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001737 txbdp->length, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001739 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001740 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1741 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001743 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001744 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001747static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1748{
1749 struct rxbd8 *rxbdp;
1750 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1751 int i;
1752
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001753 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001755 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1756 if (rx_queue->rx_skbuff[i]) {
Claudiu Manoil369ec162013-02-14 05:00:02 +00001757 dma_unmap_single(priv->dev, rxbdp->bufPtr,
1758 priv->rx_buffer_size,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001759 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001760 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1761 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001763 rxbdp->lstatus = 0;
1764 rxbdp->bufPtr = 0;
1765 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001767 kfree(rx_queue->rx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001768 rx_queue->rx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001769}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001770
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001771/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00001772 * Then free tx_skbuff and rx_skbuff
1773 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001774static void free_skb_resources(struct gfar_private *priv)
1775{
1776 struct gfar_priv_tx_q *tx_queue = NULL;
1777 struct gfar_priv_rx_q *rx_queue = NULL;
1778 int i;
1779
1780 /* Go through all the buffer descriptors and free their data buffers */
1781 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001782 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001783
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001784 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001785 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001786 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001787 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001788 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001789 }
1790
1791 for (i = 0; i < priv->num_rx_queues; i++) {
1792 rx_queue = priv->rx_queue[i];
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001793 if (rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001794 free_skb_rx_queue(rx_queue);
1795 }
1796
Claudiu Manoil369ec162013-02-14 05:00:02 +00001797 dma_free_coherent(priv->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001798 sizeof(struct txbd8) * priv->total_tx_ring_size +
1799 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1800 priv->tx_queue[0]->tx_bd_base,
1801 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
1803
Kumar Gala0bbaf062005-06-20 10:54:21 -05001804void gfar_start(struct net_device *dev)
1805{
1806 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001807 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001808 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001809 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001810
1811 /* Enable Rx and Tx in MACCFG1 */
1812 tempval = gfar_read(&regs->maccfg1);
1813 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1814 gfar_write(&regs->maccfg1, tempval);
1815
1816 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001817 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001818 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001819 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001820
Kumar Gala0bbaf062005-06-20 10:54:21 -05001821 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001822 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001823 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001824 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001825
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001826 for (i = 0; i < priv->num_grps; i++) {
1827 regs = priv->gfargrp[i].regs;
1828 /* Clear THLT/RHLT, so that the DMA starts polling now */
1829 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1830 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001831 }
Dai Haruki12dea572008-12-16 15:30:20 -08001832
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001833 gfar_ints_enable(priv);
1834
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001835 dev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001836}
1837
Claudiu Manoil800c6442013-03-19 07:40:05 +00001838static void gfar_configure_coalescing(struct gfar_private *priv,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001839 unsigned long tx_mask, unsigned long rx_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001841 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001842 u32 __iomem *baddr;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001843
1844 if (priv->mode == MQ_MG_MODE) {
Claudiu Manoil5d9657d2013-03-19 07:40:04 +00001845 int i = 0;
Claudiu Manoilc6e11602013-03-21 03:12:14 +00001846
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001847 baddr = &regs->txic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001848 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
Claudiu Manoil9740e002012-06-28 04:40:53 +00001849 gfar_write(baddr + i, 0);
1850 if (likely(priv->tx_queue[i]->txcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001851 gfar_write(baddr + i, priv->tx_queue[i]->txic);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001852 }
1853
1854 baddr = &regs->rxic0;
Akinobu Mita984b3f52010-03-05 13:41:37 -08001855 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
Claudiu Manoil9740e002012-06-28 04:40:53 +00001856 gfar_write(baddr + i, 0);
1857 if (likely(priv->rx_queue[i]->rxcoalescing))
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001858 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001859 }
Claudiu Manoil5d9657d2013-03-19 07:40:04 +00001860 } else {
Claudiu Manoilc6e11602013-03-21 03:12:14 +00001861 /* Backward compatible case -- even if we enable
Claudiu Manoil5d9657d2013-03-19 07:40:04 +00001862 * multiple queues, there's only single reg to program
1863 */
1864 gfar_write(&regs->txic, 0);
1865 if (likely(priv->tx_queue[0]->txcoalescing))
1866 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1867
1868 gfar_write(&regs->rxic, 0);
1869 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1870 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001871 }
1872}
1873
Claudiu Manoil800c6442013-03-19 07:40:05 +00001874void gfar_configure_coalescing_all(struct gfar_private *priv)
1875{
1876 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1877}
1878
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001879static int register_grp_irqs(struct gfar_priv_grp *grp)
1880{
1881 struct gfar_private *priv = grp->priv;
1882 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001883 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00001886 * them. Otherwise, only register for the one
1887 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08001888 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001889 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00001890 * Transmit, and Receive
1891 */
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001892 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1893 gfar_irq(grp, ER)->name, grp);
1894 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001895 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001896 gfar_irq(grp, ER)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001897
Julia Lawall2145f1a2010-08-05 10:26:20 +00001898 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001900 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1901 gfar_irq(grp, TX)->name, grp);
1902 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001903 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001904 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 goto tx_irq_fail;
1906 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001907 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1908 gfar_irq(grp, RX)->name, grp);
1909 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001910 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001911 gfar_irq(grp, RX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 goto rx_irq_fail;
1913 }
1914 } else {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001915 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1916 gfar_irq(grp, TX)->name, grp);
1917 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00001918 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001919 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 goto err_irq_fail;
1921 }
1922 }
1923
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001924 return 0;
1925
1926rx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001927 free_irq(gfar_irq(grp, TX)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001928tx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001929 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001930err_irq_fail:
1931 return err;
1932
1933}
1934
1935/* Bring the controller up and running */
1936int startup_gfar(struct net_device *ndev)
1937{
1938 struct gfar_private *priv = netdev_priv(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001939 int err, i, j;
1940
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001941 gfar_ints_disable(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001942
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001943 err = gfar_alloc_skb_resources(ndev);
1944 if (err)
1945 return err;
1946
1947 gfar_init_mac(ndev);
1948
1949 for (i = 0; i < priv->num_grps; i++) {
1950 err = register_grp_irqs(&priv->gfargrp[i]);
1951 if (err) {
1952 for (j = 0; j < i; j++)
1953 free_grp_irqs(&priv->gfargrp[j]);
Anton Vorontsovff760152011-01-18 02:36:02 +00001954 goto irq_fail;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001955 }
1956 }
1957
Andy Fleming7f7f5312005-11-11 12:38:59 -06001958 /* Start the controller */
Anton Vorontsovccc05c62009-10-12 06:00:26 +00001959 gfar_start(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00001961 phy_start(priv->phydev);
1962
Claudiu Manoil800c6442013-03-19 07:40:05 +00001963 gfar_configure_coalescing_all(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001964
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 return 0;
1966
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001967irq_fail:
Anton Vorontsove69edd22009-10-12 06:00:30 +00001968 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 return err;
1970}
1971
Jan Ceuleers0977f812012-06-05 03:42:12 +00001972/* Called when something needs to use the ethernet device
1973 * Returns 0 for success.
1974 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975static int gfar_enet_open(struct net_device *dev)
1976{
Li Yang94e8cc32007-10-12 21:53:51 +08001977 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 int err;
1979
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001980 enable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001981
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 /* Initialize a bunch of registers */
1983 init_registers(dev);
1984
1985 gfar_set_mac_address(dev);
1986
1987 err = init_phy(dev);
1988
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001989 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001990 disable_napi(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
1994 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001995 if (err) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001996 disable_napi(priv);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001997 return err;
1998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002000 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08002002 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2003
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 return err;
2005}
2006
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002007static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002008{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002009 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07002010
2011 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002012
Kumar Gala0bbaf062005-06-20 10:54:21 -05002013 return fcb;
2014}
2015
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002016static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002017 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002018{
Kumar Gala0bbaf062005-06-20 10:54:21 -05002019 /* If we're here, it's a IP packet with a TCP or UDP
2020 * payload. We set it to checksum, using a pseudo-header
2021 * we provide
2022 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002023 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002024
Jan Ceuleers0977f812012-06-05 03:42:12 +00002025 /* Tell the controller what the protocol is
2026 * And provide the already calculated phcs
2027 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002028 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002029 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03002030 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002031 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05002032 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002033
2034 /* l3os is the distance between the start of the
2035 * frame (skb->data) and the start of the IP hdr.
2036 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002037 * l3 hdr and the l4 hdr
2038 */
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002039 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002040 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002041
Andy Fleming7f7f5312005-11-11 12:38:59 -06002042 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002043}
2044
Andy Fleming7f7f5312005-11-11 12:38:59 -06002045void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002046{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002047 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002048 fcb->vlctl = vlan_tx_tag_get(skb);
2049}
2050
Dai Haruki4669bc92008-12-17 16:51:04 -08002051static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002052 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002053{
2054 struct txbd8 *new_bd = bdp + stride;
2055
2056 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2057}
2058
2059static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002060 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002061{
2062 return skip_txbd(bdp, 1, base, ring_size);
2063}
2064
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002065/* eTSEC12: csum generation not supported for some fcb offsets */
2066static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2067 unsigned long fcb_addr)
2068{
2069 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2070 (fcb_addr % 0x20) > 0x18);
2071}
2072
2073/* eTSEC76: csum generation for frames larger than 2500 may
2074 * cause excess delays before start of transmission
2075 */
2076static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2077 unsigned int len)
2078{
2079 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2080 (len > 2500));
2081}
2082
Jan Ceuleers0977f812012-06-05 03:42:12 +00002083/* This is called by the kernel when a frame is ready for transmission.
2084 * It is pointed to by the dev->hard_start_xmit function pointer
2085 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2087{
2088 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002089 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002090 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002091 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002092 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002093 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002094 u32 lstatus;
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002095 int i, rq = 0;
2096 int do_tstamp, do_csum, do_vlan;
Dai Haruki4669bc92008-12-17 16:51:04 -08002097 u32 bufaddr;
Andy Flemingfef61082006-04-20 16:44:29 -05002098 unsigned long flags;
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002099 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002100
2101 rq = skb->queue_mapping;
2102 tx_queue = priv->tx_queue[rq];
2103 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002104 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002105 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002106
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002107 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2108 do_vlan = vlan_tx_tag_present(skb);
2109 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2110 priv->hwts_tx_en;
2111
2112 if (do_csum || do_vlan)
2113 fcb_len = GMAC_FCB_LEN;
2114
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002115 /* check if time stamp should be generated */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002116 if (unlikely(do_tstamp))
2117 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Dai Haruki4669bc92008-12-17 16:51:04 -08002118
Li Yang5b28bea2009-03-27 15:54:30 -07002119 /* make space for additional header when fcb is needed */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002120 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002121 struct sk_buff *skb_new;
2122
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002123 skb_new = skb_realloc_headroom(skb, fcb_len);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002124 if (!skb_new) {
2125 dev->stats.tx_errors++;
David S. Millerbd14ba82009-03-27 01:10:58 -07002126 kfree_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002127 return NETDEV_TX_OK;
2128 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002129
Eric Dumazet313b0372012-07-05 11:45:13 +00002130 if (skb->sk)
2131 skb_set_owner_w(skb_new, skb->sk);
2132 consume_skb(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002133 skb = skb_new;
2134 }
2135
Dai Haruki4669bc92008-12-17 16:51:04 -08002136 /* total number of fragments in the SKB */
2137 nr_frags = skb_shinfo(skb)->nr_frags;
2138
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002139 /* calculate the required number of TxBDs for this skb */
2140 if (unlikely(do_tstamp))
2141 nr_txbds = nr_frags + 2;
2142 else
2143 nr_txbds = nr_frags + 1;
2144
Dai Haruki4669bc92008-12-17 16:51:04 -08002145 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002146 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002147 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002148 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002149 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002150 return NETDEV_TX_BUSY;
2151 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152
2153 /* Update transmit stats */
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002154 bytes_sent = skb->len;
2155 tx_queue->stats.tx_bytes += bytes_sent;
2156 /* keep Tx bytes on wire for BQL accounting */
2157 GFAR_CB(skb)->bytes_sent = bytes_sent;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002158 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002160 txbdp = txbdp_start = tx_queue->cur_tx;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002161 lstatus = txbdp->lstatus;
2162
2163 /* Time stamp insertion requires one additional TxBD */
2164 if (unlikely(do_tstamp))
2165 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002166 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Dai Haruki4669bc92008-12-17 16:51:04 -08002168 if (nr_frags == 0) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002169 if (unlikely(do_tstamp))
2170 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002171 TXBD_INTERRUPT);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002172 else
2173 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Dai Haruki4669bc92008-12-17 16:51:04 -08002174 } else {
2175 /* Place the fragment addresses and lengths into the TxBDs */
2176 for (i = 0; i < nr_frags; i++) {
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002177 unsigned int frag_len;
Dai Haruki4669bc92008-12-17 16:51:04 -08002178 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002179 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002181 frag_len = skb_shinfo(skb)->frags[i].size;
Dai Haruki4669bc92008-12-17 16:51:04 -08002182
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002183 lstatus = txbdp->lstatus | frag_len |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002184 BD_LFLAG(TXBD_READY);
Dai Haruki4669bc92008-12-17 16:51:04 -08002185
2186 /* Handle the last BD specially */
2187 if (i == nr_frags - 1)
2188 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2189
Claudiu Manoil369ec162013-02-14 05:00:02 +00002190 bufaddr = skb_frag_dma_map(priv->dev,
Ian Campbell2234a722011-08-29 23:18:29 +00002191 &skb_shinfo(skb)->frags[i],
2192 0,
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002193 frag_len,
Ian Campbell2234a722011-08-29 23:18:29 +00002194 DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08002195
2196 /* set the TxBD length and buffer pointer */
2197 txbdp->bufPtr = bufaddr;
2198 txbdp->lstatus = lstatus;
2199 }
2200
2201 lstatus = txbdp_start->lstatus;
2202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002204 /* Add TxPAL between FCB and frame if required */
2205 if (unlikely(do_tstamp)) {
2206 skb_push(skb, GMAC_TXPAL_LEN);
2207 memset(skb->data, 0, GMAC_TXPAL_LEN);
2208 }
2209
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002210 /* Add TxFCB if required */
2211 if (fcb_len) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002212 fcb = gfar_add_fcb(skb);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002213 lstatus |= BD_LFLAG(TXBD_TOE);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002214 }
2215
2216 /* Set up checksumming */
2217 if (do_csum) {
2218 gfar_tx_checksum(skb, fcb, fcb_len);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002219
2220 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2221 unlikely(gfar_csum_errata_76(priv, skb->len))) {
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002222 __skb_pull(skb, GMAC_FCB_LEN);
2223 skb_checksum_help(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002224 if (do_vlan || do_tstamp) {
2225 /* put back a new fcb for vlan/tstamp TOE */
2226 fcb = gfar_add_fcb(skb);
2227 } else {
2228 /* Tx TOE not used */
2229 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2230 fcb = NULL;
2231 }
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002232 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002233 }
2234
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002235 if (do_vlan)
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002236 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002237
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002238 /* Setup tx hardware time stamping if requested */
2239 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002240 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002241 fcb->ptp = 1;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002242 }
2243
Claudiu Manoil369ec162013-02-14 05:00:02 +00002244 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002245 skb_headlen(skb), DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
Jan Ceuleers0977f812012-06-05 03:42:12 +00002247 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002248 * first TxBD points to the FCB and must have a data length of
2249 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2250 * the full frame length.
2251 */
2252 if (unlikely(do_tstamp)) {
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002253 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002254 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002255 (skb_headlen(skb) - fcb_len);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002256 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2257 } else {
2258 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002261 netdev_tx_sent_queue(txq, bytes_sent);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002262
Jan Ceuleers0977f812012-06-05 03:42:12 +00002263 /* We can work in parallel with gfar_clean_tx_ring(), except
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002264 * when modifying num_txbdfree. Note that we didn't grab the lock
2265 * when we were reading the num_txbdfree and checking for available
2266 * space, that's because outside of this function it can only grow,
2267 * and once we've got needed space, it cannot suddenly disappear.
2268 *
2269 * The lock also protects us from gfar_error(), which can modify
2270 * regs->tstat and thus retrigger the transfers, which is why we
2271 * also must grab the lock before setting ready bit for the first
2272 * to be transmitted BD.
2273 */
2274 spin_lock_irqsave(&tx_queue->txlock, flags);
2275
Jan Ceuleers0977f812012-06-05 03:42:12 +00002276 /* The powerpc-specific eieio() is used, as wmb() has too strong
Scott Wood3b6330c2007-05-16 15:06:59 -05002277 * semantics (it requires synchronization between cacheable and
2278 * uncacheable mappings, which eieio doesn't provide and which we
2279 * don't need), thus requiring a more expensive sync instruction. At
2280 * some point, the set of architecture-independent barrier functions
2281 * should be expanded to include weaker barriers.
2282 */
Scott Wood3b6330c2007-05-16 15:06:59 -05002283 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002284
Dai Haruki4669bc92008-12-17 16:51:04 -08002285 txbdp_start->lstatus = lstatus;
2286
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002287 eieio(); /* force lstatus write before tx_skbuff */
2288
2289 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2290
Dai Haruki4669bc92008-12-17 16:51:04 -08002291 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002292 * (wrapping if necessary)
2293 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002294 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002295 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002296
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002297 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002298
2299 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002300 tx_queue->num_txbdfree -= (nr_txbds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
2302 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002303 * are full. We need to tell the kernel to stop sending us stuff.
2304 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002305 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002306 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002308 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 }
2310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002312 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
2314 /* Unlock priv */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002315 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002317 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318}
2319
2320/* Stops the kernel queue, and halts the controller */
2321static int gfar_close(struct net_device *dev)
2322{
2323 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002324
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002325 disable_napi(priv);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002326
Sebastian Siewiorab939902008-08-19 21:12:45 +02002327 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 stop_gfar(dev);
2329
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002330 /* Disconnect from the PHY */
2331 phy_disconnect(priv->phydev);
2332 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002334 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
2336 return 0;
2337}
2338
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002340static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002342 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
2344 return 0;
2345}
2346
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002347/* Check if rx parser should be activated */
2348void gfar_check_rx_parser_mode(struct gfar_private *priv)
2349{
2350 struct gfar __iomem *regs;
2351 u32 tempval;
2352
2353 regs = priv->gfargrp[0].regs;
2354
2355 tempval = gfar_read(&regs->rctrl);
2356 /* If parse is no longer required, then disable parser */
Claudiu Manoilba779712013-02-14 05:00:07 +00002357 if (tempval & RCTRL_REQ_PARSER) {
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002358 tempval |= RCTRL_PRSDEP_INIT;
Claudiu Manoilba779712013-02-14 05:00:07 +00002359 priv->uses_rxfcb = 1;
2360 } else {
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002361 tempval &= ~RCTRL_PRSDEP_INIT;
Claudiu Manoilba779712013-02-14 05:00:07 +00002362 priv->uses_rxfcb = 0;
2363 }
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002364 gfar_write(&regs->rctrl, tempval);
2365}
2366
Kumar Gala0bbaf062005-06-20 10:54:21 -05002367/* Enables and disables VLAN insertion/extraction */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002368void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002369{
2370 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002371 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002372 unsigned long flags;
2373 u32 tempval;
2374
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002375 regs = priv->gfargrp[0].regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002376 local_irq_save(flags);
2377 lock_rx_qs(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002378
Patrick McHardyf6469682013-04-19 02:04:27 +00002379 if (features & NETIF_F_HW_VLAN_CTAG_TX) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002380 /* Enable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002381 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002382 tempval |= TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002383 gfar_write(&regs->tctrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002384 } else {
2385 /* Disable VLAN tag insertion */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002386 tempval = gfar_read(&regs->tctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002387 tempval &= ~TCTRL_VLINS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002388 gfar_write(&regs->tctrl, tempval);
Jiri Pirko87c288c2011-07-20 04:54:19 +00002389 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002390
Patrick McHardyf6469682013-04-19 02:04:27 +00002391 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Jiri Pirko87c288c2011-07-20 04:54:19 +00002392 /* Enable VLAN tag extraction */
2393 tempval = gfar_read(&regs->rctrl);
2394 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2395 gfar_write(&regs->rctrl, tempval);
Claudiu Manoilba779712013-02-14 05:00:07 +00002396 priv->uses_rxfcb = 1;
Jiri Pirko87c288c2011-07-20 04:54:19 +00002397 } else {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002398 /* Disable VLAN tag extraction */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002399 tempval = gfar_read(&regs->rctrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002400 tempval &= ~RCTRL_VLEX;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002401 gfar_write(&regs->rctrl, tempval);
Sebastian Pöhnf3dc1582011-07-15 16:00:20 -07002402
2403 gfar_check_rx_parser_mode(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002404 }
2405
Dai Haruki77ecaf22008-12-16 15:30:48 -08002406 gfar_change_mtu(dev, dev->mtu);
2407
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002408 unlock_rx_qs(priv);
2409 local_irq_restore(flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002410}
2411
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2413{
2414 int tempsize, tempval;
2415 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002416 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002418 int frame_size = new_mtu + ETH_HLEN;
2419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Joe Perches59deab22011-06-14 08:57:47 +00002421 netif_err(priv, drv, dev, "Invalid MTU setting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 return -EINVAL;
2423 }
2424
Claudiu Manoilba779712013-02-14 05:00:07 +00002425 if (priv->uses_rxfcb)
Dai Haruki77ecaf22008-12-16 15:30:48 -08002426 frame_size += GMAC_FCB_LEN;
2427
2428 frame_size += priv->padding;
2429
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002430 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2431 INCREMENTAL_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
2433 /* Only stop and start the controller if it isn't already
Jan Ceuleers0977f812012-06-05 03:42:12 +00002434 * stopped, and we changed something
2435 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2437 stop_gfar(dev);
2438
2439 priv->rx_buffer_size = tempsize;
2440
2441 dev->mtu = new_mtu;
2442
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002443 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2444 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
2446 /* If the mtu is larger than the max size for standard
2447 * ethernet frames (ie, a jumbo frame), then set maccfg2
Jan Ceuleers0977f812012-06-05 03:42:12 +00002448 * to allow huge frames, and to check the length
2449 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002450 tempval = gfar_read(&regs->maccfg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451
Anton Vorontsov7d350972010-06-30 06:39:12 +00002452 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002453 gfar_has_errata(priv, GFAR_ERRATA_74))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2455 else
2456 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2457
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002458 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
2460 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2461 startup_gfar(dev);
2462
2463 return 0;
2464}
2465
Sebastian Siewiorab939902008-08-19 21:12:45 +02002466/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 * transmitted after a set amount of time.
2468 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002469 * starting over will fix the problem.
2470 */
2471static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002473 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002474 reset_task);
Kumar Gala48268572009-03-18 23:28:22 -07002475 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
2477 if (dev->flags & IFF_UP) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002478 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 stop_gfar(dev);
2480 startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002481 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 }
2483
David S. Miller263ba322008-07-15 03:47:41 -07002484 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485}
2486
Sebastian Siewiorab939902008-08-19 21:12:45 +02002487static void gfar_timeout(struct net_device *dev)
2488{
2489 struct gfar_private *priv = netdev_priv(dev);
2490
2491 dev->stats.tx_errors++;
2492 schedule_work(&priv->reset_task);
2493}
2494
Eran Libertyacbc0f02010-07-07 15:54:54 -07002495static void gfar_align_skb(struct sk_buff *skb)
2496{
2497 /* We need the data buffer to be aligned properly. We will reserve
2498 * as many bytes as needed to align the data properly
2499 */
2500 skb_reserve(skb, RXBUF_ALIGNMENT -
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002501 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
Eran Libertyacbc0f02010-07-07 15:54:54 -07002502}
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504/* Interrupt Handler for Transmit complete */
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002505static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002507 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002508 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002509 struct gfar_private *priv = netdev_priv(dev);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002510 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002511 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002512 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002513 struct sk_buff *skb;
2514 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002515 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002516 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002517 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002518 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002519 int tqi = tx_queue->qindex;
2520 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002521 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002522 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002524 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002525 bdp = tx_queue->dirty_tx;
2526 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002527
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002528 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002529 unsigned long flags;
2530
Dai Haruki4669bc92008-12-17 16:51:04 -08002531 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002532
Jan Ceuleers0977f812012-06-05 03:42:12 +00002533 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002534 * Also, we need to dma_unmap_single() the TxPAL.
2535 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002536 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002537 nr_txbds = frags + 2;
2538 else
2539 nr_txbds = frags + 1;
2540
2541 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002542
2543 lstatus = lbdp->lstatus;
2544
2545 /* Only clean completed frames */
2546 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002547 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 break;
2549
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002550 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002551 next = next_txbd(bdp, base, tx_ring_size);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002552 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002553 } else
2554 buflen = bdp->length;
2555
Claudiu Manoil369ec162013-02-14 05:00:02 +00002556 dma_unmap_single(priv->dev, bdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002557 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002558
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002559 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002560 struct skb_shared_hwtstamps shhwtstamps;
2561 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002562
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002563 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2564 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002565 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002566 skb_tstamp_tx(skb, &shhwtstamps);
2567 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2568 bdp = next;
2569 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002570
2571 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2572 bdp = next_txbd(bdp, base, tx_ring_size);
2573
2574 for (i = 0; i < frags; i++) {
Claudiu Manoil369ec162013-02-14 05:00:02 +00002575 dma_unmap_page(priv->dev, bdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002576 bdp->length, DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08002577 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2578 bdp = next_txbd(bdp, base, tx_ring_size);
2579 }
2580
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002581 bytes_sent += GFAR_CB(skb)->bytes_sent;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002582
Eric Dumazetacb600d2012-10-05 06:23:55 +00002583 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002584
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002585 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002586
2587 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002588 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002589
Dai Harukid080cd62008-04-09 19:37:51 -05002590 howmany++;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002591 spin_lock_irqsave(&tx_queue->txlock, flags);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002592 tx_queue->num_txbdfree += nr_txbds;
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002593 spin_unlock_irqrestore(&tx_queue->txlock, flags);
Dai Haruki4669bc92008-12-17 16:51:04 -08002594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
Dai Haruki4669bc92008-12-17 16:51:04 -08002596 /* If we freed a buffer, we can restart transmission, if necessary */
Paul Gortmaker5407b14c2012-03-18 17:11:22 -04002597 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002598 netif_wake_subqueue(dev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
Dai Haruki4669bc92008-12-17 16:51:04 -08002600 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002601 tx_queue->skb_dirtytx = skb_dirtytx;
2602 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002604 netdev_tx_completed_queue(txq, howmany, bytes_sent);
Dai Harukid080cd62008-04-09 19:37:51 -05002605}
2606
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002607static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
Dai Haruki8c7396a2008-12-17 16:52:00 -08002608{
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002609 unsigned long flags;
2610
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002611 spin_lock_irqsave(&gfargrp->grplock, flags);
2612 if (napi_schedule_prep(&gfargrp->napi)) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002613 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002614 __napi_schedule(&gfargrp->napi);
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002615 } else {
Jan Ceuleers0977f812012-06-05 03:42:12 +00002616 /* Clear IEVENT, so interrupts aren't called again
Jarek Poplawski8707bdd2009-02-09 14:59:30 -08002617 * because of the packets that have already arrived.
2618 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002619 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002620 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002621 spin_unlock_irqrestore(&gfargrp->grplock, flags);
Anton Vorontsova6d0b912009-01-12 21:57:34 -08002622
Dai Haruki8c7396a2008-12-17 16:52:00 -08002623}
2624
Dai Harukid080cd62008-04-09 19:37:51 -05002625/* Interrupt Handler for Transmit complete */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002626static irqreturn_t gfar_transmit(int irq, void *grp_id)
Dai Harukid080cd62008-04-09 19:37:51 -05002627{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002628 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 return IRQ_HANDLED;
2630}
2631
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002632static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002633 struct sk_buff *skb)
Andy Fleming815b97c2008-04-22 17:18:29 -05002634{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002635 struct net_device *dev = rx_queue->dev;
Andy Fleming815b97c2008-04-22 17:18:29 -05002636 struct gfar_private *priv = netdev_priv(dev);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002637 dma_addr_t buf;
Andy Fleming815b97c2008-04-22 17:18:29 -05002638
Claudiu Manoil369ec162013-02-14 05:00:02 +00002639 buf = dma_map_single(priv->dev, skb->data,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +00002640 priv->rx_buffer_size, DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002641 gfar_init_rxbdp(rx_queue, bdp, buf);
Andy Fleming815b97c2008-04-22 17:18:29 -05002642}
2643
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002644static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002645{
2646 struct gfar_private *priv = netdev_priv(dev);
Eric Dumazetacb600d2012-10-05 06:23:55 +00002647 struct sk_buff *skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002648
2649 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2650 if (!skb)
2651 return NULL;
2652
2653 gfar_align_skb(skb);
2654
2655 return skb;
2656}
Andy Fleming815b97c2008-04-22 17:18:29 -05002657
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002658struct sk_buff *gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659{
Eric Dumazetacb600d2012-10-05 06:23:55 +00002660 return gfar_alloc_skb(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661}
2662
Li Yang298e1a92007-10-16 14:18:13 +08002663static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664{
Li Yang298e1a92007-10-16 14:18:13 +08002665 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002666 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 struct gfar_extra_stats *estats = &priv->extra_stats;
2668
Jan Ceuleers0977f812012-06-05 03:42:12 +00002669 /* If the packet was truncated, none of the other errors matter */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 if (status & RXBD_TRUNCATED) {
2671 stats->rx_length_errors++;
2672
Paul Gortmaker212079d2013-02-12 15:38:19 -05002673 atomic64_inc(&estats->rx_trunc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
2675 return;
2676 }
2677 /* Count the errors, if there were any */
2678 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2679 stats->rx_length_errors++;
2680
2681 if (status & RXBD_LARGE)
Paul Gortmaker212079d2013-02-12 15:38:19 -05002682 atomic64_inc(&estats->rx_large);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 else
Paul Gortmaker212079d2013-02-12 15:38:19 -05002684 atomic64_inc(&estats->rx_short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 }
2686 if (status & RXBD_NONOCTET) {
2687 stats->rx_frame_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002688 atomic64_inc(&estats->rx_nonoctet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 }
2690 if (status & RXBD_CRCERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002691 atomic64_inc(&estats->rx_crcerr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 stats->rx_crc_errors++;
2693 }
2694 if (status & RXBD_OVERRUN) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002695 atomic64_inc(&estats->rx_overrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 stats->rx_crc_errors++;
2697 }
2698}
2699
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002700irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002702 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 return IRQ_HANDLED;
2704}
2705
Kumar Gala0bbaf062005-06-20 10:54:21 -05002706static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2707{
2708 /* If valid headers were found, and valid sums
2709 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00002710 * checksumming is necessary. Otherwise, it is [FIXME]
2711 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06002712 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002713 skb->ip_summed = CHECKSUM_UNNECESSARY;
2714 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002715 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002716}
2717
2718
Jan Ceuleers0977f812012-06-05 03:42:12 +00002719/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Claudiu Manoil61db26c2013-02-14 05:00:05 +00002720static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2721 int amount_pull, struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722{
2723 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002724 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Dai Haruki2c2db482008-12-16 15:31:15 -08002726 /* fcb is at the beginning if exists */
2727 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728
Jan Ceuleers0977f812012-06-05 03:42:12 +00002729 /* Remove the FCB from the skb
2730 * Remove the padded bytes, if there are any
2731 */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002732 if (amount_pull) {
2733 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002734 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002735 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002736
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002737 /* Get receive timestamp from the skb */
2738 if (priv->hwts_rx_en) {
2739 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2740 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002741
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002742 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2743 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2744 }
2745
2746 if (priv->padding)
2747 skb_pull(skb, priv->padding);
2748
Michał Mirosław8b3afe92011-04-15 04:50:50 +00002749 if (dev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08002750 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002751
Dai Haruki2c2db482008-12-16 15:31:15 -08002752 /* Tell the skb what kind of packet this is */
2753 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002754
Patrick McHardyf6469682013-04-19 02:04:27 +00002755 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07002756 * Even if vlan rx accel is disabled, on some chips
2757 * RXFCB_VLN is pseudo randomly set.
2758 */
Patrick McHardyf6469682013-04-19 02:04:27 +00002759 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
David S. Miller823dcd22011-08-20 10:39:12 -07002760 fcb->flags & RXFCB_VLN)
David S. Millere5905c82013-04-22 19:24:19 -04002761 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
Jiri Pirko87c288c2011-07-20 04:54:19 +00002762
Dai Haruki2c2db482008-12-16 15:31:15 -08002763 /* Send the packet up the stack */
Claudiu Manoil953d2762013-03-21 03:12:15 +00002764 napi_gro_receive(napi, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766}
2767
2768/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002769 * until the budget/quota has been reached. Returns the number
2770 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002772int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002774 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002775 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002777 int pkt_len;
2778 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 int howmany = 0;
2780 struct gfar_private *priv = netdev_priv(dev);
2781
2782 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002783 bdp = rx_queue->cur_rx;
2784 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Claudiu Manoilba779712013-02-14 05:00:07 +00002786 amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
Dai Haruki2c2db482008-12-16 15:31:15 -08002787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002789 struct sk_buff *newskb;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002790
Scott Wood3b6330c2007-05-16 15:06:59 -05002791 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002792
2793 /* Add another skb for the future */
2794 newskb = gfar_new_skb(dev);
2795
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002796 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
Claudiu Manoil369ec162013-02-14 05:00:02 +00002798 dma_unmap_single(priv->dev, bdp->bufPtr,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002799 priv->rx_buffer_size, DMA_FROM_DEVICE);
Andy Fleming81183052008-11-12 10:07:11 -06002800
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002801 if (unlikely(!(bdp->status & RXBD_ERR) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002802 bdp->length > priv->rx_buffer_size))
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002803 bdp->status = RXBD_LARGE;
2804
Andy Fleming815b97c2008-04-22 17:18:29 -05002805 /* We drop the frame if we failed to allocate a new buffer */
2806 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002807 bdp->status & RXBD_ERR)) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002808 count_errors(bdp->status, dev);
2809
2810 if (unlikely(!newskb))
2811 newskb = skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002812 else if (skb)
Eric Dumazetacb600d2012-10-05 06:23:55 +00002813 dev_kfree_skb(skb);
Andy Fleming815b97c2008-04-22 17:18:29 -05002814 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002816 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 howmany++;
2818
Dai Haruki2c2db482008-12-16 15:31:15 -08002819 if (likely(skb)) {
2820 pkt_len = bdp->length - ETH_FCS_LEN;
2821 /* Remove the FCS from the packet length */
2822 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002823 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002824 skb_record_rx_queue(skb, rx_queue->qindex);
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002825 gfar_process_frame(dev, skb, amount_pull,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002826 &rx_queue->grp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Dai Haruki2c2db482008-12-16 15:31:15 -08002828 } else {
Joe Perches59deab22011-06-14 08:57:47 +00002829 netif_warn(priv, rx_err, dev, "Missing skb!\n");
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002830 rx_queue->stats.rx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002831 atomic64_inc(&priv->extra_stats.rx_skbmissing);
Dai Haruki2c2db482008-12-16 15:31:15 -08002832 }
2833
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 }
2835
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002836 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Andy Fleming815b97c2008-04-22 17:18:29 -05002838 /* Setup the new bdp */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002839 gfar_new_rxbdp(rx_queue, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840
2841 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002842 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843
2844 /* update to point at the next skb */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002845 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2846 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 }
2848
2849 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002850 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 return howmany;
2853}
2854
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03002855static int gfar_poll_sq(struct napi_struct *napi, int budget)
2856{
2857 struct gfar_priv_grp *gfargrp =
2858 container_of(napi, struct gfar_priv_grp, napi);
2859 struct gfar __iomem *regs = gfargrp->regs;
2860 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
2861 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
2862 int work_done = 0;
2863
2864 /* Clear IEVENT, so interrupts aren't called again
2865 * because of the packets that have already arrived
2866 */
2867 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2868
2869 /* run Tx cleanup to completion */
2870 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2871 gfar_clean_tx_ring(tx_queue);
2872
2873 work_done = gfar_clean_rx_ring(rx_queue, budget);
2874
2875 if (work_done < budget) {
2876 napi_complete(napi);
2877 /* Clear the halt bit in RSTAT */
2878 gfar_write(&regs->rstat, gfargrp->rstat);
2879
2880 gfar_write(&regs->imask, IMASK_DEFAULT);
2881
2882 /* If we are coalescing interrupts, update the timer
2883 * Otherwise, clear it
2884 */
2885 gfar_write(&regs->txic, 0);
2886 if (likely(tx_queue->txcoalescing))
2887 gfar_write(&regs->txic, tx_queue->txic);
2888
2889 gfar_write(&regs->rxic, 0);
2890 if (unlikely(rx_queue->rxcoalescing))
2891 gfar_write(&regs->rxic, rx_queue->rxic);
2892 }
2893
2894 return work_done;
2895}
2896
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002897static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002899 struct gfar_priv_grp *gfargrp =
2900 container_of(napi, struct gfar_priv_grp, napi);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002901 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002902 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002903 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002904 struct gfar_priv_rx_q *rx_queue = NULL;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002905 int work_done = 0, work_done_per_q = 0;
Claudiu Manoil39c0a0d2013-03-21 03:12:13 +00002906 int i, budget_per_q = 0;
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002907 int has_tx_work = 0;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00002908 unsigned long rstat_rxf;
2909 int num_act_queues;
Dai Harukid080cd62008-04-09 19:37:51 -05002910
Dai Haruki8c7396a2008-12-17 16:52:00 -08002911 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00002912 * because of the packets that have already arrived
2913 */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002914 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08002915
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00002916 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2917
2918 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2919 if (num_act_queues)
2920 budget_per_q = budget/num_act_queues;
2921
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002922 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2923 tx_queue = priv->tx_queue[i];
2924 /* run Tx cleanup to completion */
2925 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2926 gfar_clean_tx_ring(tx_queue);
2927 has_tx_work = 1;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002928 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002929 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002930
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002931 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2932 /* skip queue if not active */
2933 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2934 continue;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002935
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002936 rx_queue = priv->rx_queue[i];
2937 work_done_per_q =
2938 gfar_clean_rx_ring(rx_queue, budget_per_q);
2939 work_done += work_done_per_q;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002940
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002941 /* finished processing this queue */
2942 if (work_done_per_q < budget_per_q) {
2943 /* clear active queue hw indication */
2944 gfar_write(&regs->rstat,
2945 RSTAT_CLEAR_RXF0 >> i);
2946 num_act_queues--;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00002947
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002948 if (!num_act_queues)
2949 break;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002950 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002951 }
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002952
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002953 if (!num_act_queues && !has_tx_work) {
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002954
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002955 napi_complete(napi);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002956
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002957 /* Clear the halt bit in RSTAT */
2958 gfar_write(&regs->rstat, gfargrp->rstat);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002959
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002960 gfar_write(&regs->imask, IMASK_DEFAULT);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002961
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03002962 /* If we are coalescing interrupts, update the timer
2963 * Otherwise, clear it
2964 */
2965 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2966 gfargrp->tx_bit_map);
Dai Harukid080cd62008-04-09 19:37:51 -05002967 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002969 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002972#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00002973/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002974 * without having to re-enable interrupts. It's not called while
2975 * the interrupt routine is executing.
2976 */
2977static void gfar_netpoll(struct net_device *dev)
2978{
2979 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002980 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002981
2982 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002983 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002984 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00002985 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2986
2987 disable_irq(gfar_irq(grp, TX)->irq);
2988 disable_irq(gfar_irq(grp, RX)->irq);
2989 disable_irq(gfar_irq(grp, ER)->irq);
2990 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2991 enable_irq(gfar_irq(grp, ER)->irq);
2992 enable_irq(gfar_irq(grp, RX)->irq);
2993 enable_irq(gfar_irq(grp, TX)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002994 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03002995 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002996 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00002997 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2998
2999 disable_irq(gfar_irq(grp, TX)->irq);
3000 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3001 enable_irq(gfar_irq(grp, TX)->irq);
Anton Vorontsov43de0042009-12-09 02:52:19 -08003002 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003003 }
3004}
3005#endif
3006
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003008static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003010 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
3012 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003013 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003016 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003017 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018
3019 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003020 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003021 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003023 /* Check for errors */
3024 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003025 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
3027 return IRQ_HANDLED;
3028}
3029
Claudiu Manoil23402bd2013-08-12 13:53:26 +03003030static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3031{
3032 struct phy_device *phydev = priv->phydev;
3033 u32 val = 0;
3034
3035 if (!phydev->duplex)
3036 return val;
3037
3038 if (!priv->pause_aneg_en) {
3039 if (priv->tx_pause_en)
3040 val |= MACCFG1_TX_FLOW;
3041 if (priv->rx_pause_en)
3042 val |= MACCFG1_RX_FLOW;
3043 } else {
3044 u16 lcl_adv, rmt_adv;
3045 u8 flowctrl;
3046 /* get link partner capabilities */
3047 rmt_adv = 0;
3048 if (phydev->pause)
3049 rmt_adv = LPA_PAUSE_CAP;
3050 if (phydev->asym_pause)
3051 rmt_adv |= LPA_PAUSE_ASYM;
3052
3053 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3054
3055 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3056 if (flowctrl & FLOW_CTRL_TX)
3057 val |= MACCFG1_TX_FLOW;
3058 if (flowctrl & FLOW_CTRL_RX)
3059 val |= MACCFG1_RX_FLOW;
3060 }
3061
3062 return val;
3063}
3064
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065/* Called every time the controller might need to be made
3066 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003067 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 * function converts those variables into the appropriate
3069 * register values, and can bring down the device if needed.
3070 */
3071static void adjust_link(struct net_device *dev)
3072{
3073 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003074 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003075 unsigned long flags;
3076 struct phy_device *phydev = priv->phydev;
3077 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003079 local_irq_save(flags);
3080 lock_tx_qs(priv);
3081
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003082 if (phydev->link) {
Claudiu Manoil23402bd2013-08-12 13:53:26 +03003083 u32 tempval1 = gfar_read(&regs->maccfg1);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003084 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003085 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003086
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087 /* Now we make sure that we can be in full duplex mode.
Jan Ceuleers0977f812012-06-05 03:42:12 +00003088 * If not, we operate in half-duplex mode.
3089 */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003090 if (phydev->duplex != priv->oldduplex) {
3091 new_state = 1;
3092 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003094 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003097 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 }
3099
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003100 if (phydev->speed != priv->oldspeed) {
3101 new_state = 1;
3102 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104 tempval =
3105 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Li Yangf430e492009-01-06 14:08:10 -08003106
3107 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 break;
3109 case 100:
3110 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 tempval =
3112 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003113
3114 /* Reduced mode distinguishes
Jan Ceuleers0977f812012-06-05 03:42:12 +00003115 * between 10 and 100
3116 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003117 if (phydev->speed == SPEED_100)
3118 ecntrl |= ECNTRL_R100;
3119 else
3120 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 break;
3122 default:
Joe Perches59deab22011-06-14 08:57:47 +00003123 netif_warn(priv, link, dev,
3124 "Ack! Speed (%d) is not 10/100/1000!\n",
3125 phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 break;
3127 }
3128
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003129 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 }
3131
Claudiu Manoil23402bd2013-08-12 13:53:26 +03003132 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3133 tempval1 |= gfar_get_flowctrl_cfg(priv);
3134
3135 gfar_write(&regs->maccfg1, tempval1);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003136 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003137 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003138
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003140 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003143 } else if (priv->oldlink) {
3144 new_state = 1;
3145 priv->oldlink = 0;
3146 priv->oldspeed = 0;
3147 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003150 if (new_state && netif_msg_link(priv))
3151 phy_print_status(phydev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003152 unlock_tx_qs(priv);
3153 local_irq_restore(flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003154}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155
3156/* Update the hash table based on the current list of multicast
3157 * addresses we subscribe to. Also, change the promiscuity of
3158 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003159 * whenever dev->flags is changed
3160 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161static void gfar_set_multi(struct net_device *dev)
3162{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003163 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003165 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 u32 tempval;
3167
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003168 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 /* Set RCTRL to PROM */
3170 tempval = gfar_read(&regs->rctrl);
3171 tempval |= RCTRL_PROM;
3172 gfar_write(&regs->rctrl, tempval);
3173 } else {
3174 /* Set RCTRL to not PROM */
3175 tempval = gfar_read(&regs->rctrl);
3176 tempval &= ~(RCTRL_PROM);
3177 gfar_write(&regs->rctrl, tempval);
3178 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003179
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003180 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003182 gfar_write(&regs->igaddr0, 0xffffffff);
3183 gfar_write(&regs->igaddr1, 0xffffffff);
3184 gfar_write(&regs->igaddr2, 0xffffffff);
3185 gfar_write(&regs->igaddr3, 0xffffffff);
3186 gfar_write(&regs->igaddr4, 0xffffffff);
3187 gfar_write(&regs->igaddr5, 0xffffffff);
3188 gfar_write(&regs->igaddr6, 0xffffffff);
3189 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 gfar_write(&regs->gaddr0, 0xffffffff);
3191 gfar_write(&regs->gaddr1, 0xffffffff);
3192 gfar_write(&regs->gaddr2, 0xffffffff);
3193 gfar_write(&regs->gaddr3, 0xffffffff);
3194 gfar_write(&regs->gaddr4, 0xffffffff);
3195 gfar_write(&regs->gaddr5, 0xffffffff);
3196 gfar_write(&regs->gaddr6, 0xffffffff);
3197 gfar_write(&regs->gaddr7, 0xffffffff);
3198 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003199 int em_num;
3200 int idx;
3201
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003203 gfar_write(&regs->igaddr0, 0x0);
3204 gfar_write(&regs->igaddr1, 0x0);
3205 gfar_write(&regs->igaddr2, 0x0);
3206 gfar_write(&regs->igaddr3, 0x0);
3207 gfar_write(&regs->igaddr4, 0x0);
3208 gfar_write(&regs->igaddr5, 0x0);
3209 gfar_write(&regs->igaddr6, 0x0);
3210 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 gfar_write(&regs->gaddr0, 0x0);
3212 gfar_write(&regs->gaddr1, 0x0);
3213 gfar_write(&regs->gaddr2, 0x0);
3214 gfar_write(&regs->gaddr3, 0x0);
3215 gfar_write(&regs->gaddr4, 0x0);
3216 gfar_write(&regs->gaddr5, 0x0);
3217 gfar_write(&regs->gaddr6, 0x0);
3218 gfar_write(&regs->gaddr7, 0x0);
3219
Andy Fleming7f7f5312005-11-11 12:38:59 -06003220 /* If we have extended hash tables, we need to
3221 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003222 * setting them
3223 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003224 if (priv->extended_hash) {
3225 em_num = GFAR_EM_NUM + 1;
3226 gfar_clear_exact_match(dev);
3227 idx = 1;
3228 } else {
3229 idx = 0;
3230 em_num = 0;
3231 }
3232
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003233 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 return;
3235
3236 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003237 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003238 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003239 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003240 idx++;
3241 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003242 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 }
3244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245}
3246
Andy Fleming7f7f5312005-11-11 12:38:59 -06003247
3248/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003249 * don't interfere with normal reception
3250 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003251static void gfar_clear_exact_match(struct net_device *dev)
3252{
3253 int idx;
Joe Perches6a3c910c2011-11-16 09:38:02 +00003254 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003255
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003256 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003257 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003258}
3259
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260/* Set the appropriate hash bit for the given addr */
3261/* The algorithm works like so:
3262 * 1) Take the Destination Address (ie the multicast address), and
3263 * do a CRC on it (little endian), and reverse the bits of the
3264 * result.
3265 * 2) Use the 8 most significant bits as a hash into a 256-entry
3266 * table. The table is controlled through 8 32-bit registers:
3267 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3268 * gaddr7. This means that the 3 most significant bits in the
3269 * hash index which gaddr register to use, and the 5 other bits
3270 * indicate which bit (assuming an IBM numbering scheme, which
3271 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003272 * the entry.
3273 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3275{
3276 u32 tempval;
3277 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c910c2011-11-16 09:38:02 +00003278 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003279 int width = priv->hash_width;
3280 u8 whichbit = (result >> (32 - width)) & 0x1f;
3281 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 u32 value = (1 << (31-whichbit));
3283
Kumar Gala0bbaf062005-06-20 10:54:21 -05003284 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003286 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287}
3288
Andy Fleming7f7f5312005-11-11 12:38:59 -06003289
3290/* There are multiple MAC Address register pairs on some controllers
3291 * This function sets the numth pair to a given address
3292 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003293static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3294 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003295{
3296 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003297 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003298 int idx;
Joe Perches6a3c910c2011-11-16 09:38:02 +00003299 char tmpbuf[ETH_ALEN];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003300 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003301 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003302
3303 macptr += num*2;
3304
Jan Ceuleers0977f812012-06-05 03:42:12 +00003305 /* Now copy it into the mac registers backwards, cuz
3306 * little endian is silly
3307 */
Joe Perches6a3c910c2011-11-16 09:38:02 +00003308 for (idx = 0; idx < ETH_ALEN; idx++)
3309 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003310
3311 gfar_write(macptr, *((u32 *) (tmpbuf)));
3312
3313 tempval = *((u32 *) (tmpbuf + 4));
3314
3315 gfar_write(macptr+1, tempval);
3316}
3317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003319static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003321 struct gfar_priv_grp *gfargrp = grp_id;
3322 struct gfar __iomem *regs = gfargrp->regs;
3323 struct gfar_private *priv= gfargrp->priv;
3324 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325
3326 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003327 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328
3329 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003330 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003331
3332 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003333 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003334 (events & IEVENT_MAG))
3335 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336
3337 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003338 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003339 netdev_dbg(dev,
3340 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003341 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342
3343 /* Update the error counters */
3344 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003345 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
3347 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003348 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003350 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 if (events & IEVENT_XFUN) {
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003352 unsigned long flags;
3353
Joe Perches59deab22011-06-14 08:57:47 +00003354 netif_dbg(priv, tx_err, dev,
3355 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003356 dev->stats.tx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003357 atomic64_inc(&priv->extra_stats.tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003359 local_irq_save(flags);
3360 lock_tx_qs(priv);
3361
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 /* Reactivate the Tx Queues */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003363 gfar_write(&regs->tstat, gfargrp->tstat);
Anton Vorontsov836cf7f2009-11-10 14:11:08 +00003364
3365 unlock_tx_qs(priv);
3366 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 }
Joe Perches59deab22011-06-14 08:57:47 +00003368 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 }
3370 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003371 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003372 atomic64_inc(&priv->extra_stats.rx_bsy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003374 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375
Joe Perches59deab22011-06-14 08:57:47 +00003376 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3377 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 }
3379 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003380 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003381 atomic64_inc(&priv->extra_stats.rx_babr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Joe Perches59deab22011-06-14 08:57:47 +00003383 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 }
3385 if (events & IEVENT_EBERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003386 atomic64_inc(&priv->extra_stats.eberr);
Joe Perches59deab22011-06-14 08:57:47 +00003387 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 }
Joe Perches59deab22011-06-14 08:57:47 +00003389 if (events & IEVENT_RXC)
3390 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391
3392 if (events & IEVENT_BABT) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003393 atomic64_inc(&priv->extra_stats.tx_babt);
Joe Perches59deab22011-06-14 08:57:47 +00003394 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 }
3396 return IRQ_HANDLED;
3397}
3398
Andy Flemingb31a1d82008-12-16 15:29:15 -08003399static struct of_device_id gfar_match[] =
3400{
3401 {
3402 .type = "network",
3403 .compatible = "gianfar",
3404 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003405 {
3406 .compatible = "fsl,etsec2",
3407 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003408 {},
3409};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003410MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003411
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003413static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003414 .driver = {
3415 .name = "fsl-gianfar",
3416 .owner = THIS_MODULE,
3417 .pm = GFAR_PM_OPS,
3418 .of_match_table = gfar_match,
3419 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 .probe = gfar_probe,
3421 .remove = gfar_remove,
3422};
3423
Axel Lindb62f682011-11-27 16:44:17 +00003424module_platform_driver(gfar_driver);