blob: 67b1850c034e1f3859ff767494699d38f18681f5 [file] [log] [blame]
Jan Ceuleers0977f812012-06-05 03:42:12 +00001/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06004 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Claudiu Manoil20862782014-02-17 12:53:14 +020012 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080028 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050033 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050038 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040042 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020044 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050045 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
Joe Perches59deab22011-06-14 08:57:47 +000064#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050077#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/spinlock.h>
79#include <linux/mm.h>
Rob Herring5af50732013-09-17 14:28:33 -050080#include <linux/of_address.h>
81#include <linux/of_irq.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030091#ifdef CONFIG_PPC
Anton Vorontsov7d350972010-06-30 06:39:12 +000092#include <asm/reg.h>
Claudiu Manoil2969b1f2013-10-09 20:20:41 +030093#include <asm/mpc85xx.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030094#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/irq.h>
96#include <asm/uaccess.h>
97#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/dma-mapping.h>
99#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400100#include <linux/mii.h>
101#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -0800102#include <linux/phy_fixed.h>
103#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700104#include <linux/of_net.h>
Claudiu Manoilfd31a952014-10-07 10:44:31 +0300105#include <linux/of_address.h>
106#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Abhimanyu8fcc6032015-10-27 14:17:43 +0530110#define TX_TIMEOUT (5*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Claudiu Manoil75354142015-07-13 16:22:06 +0300112const char gfar_driver_version[] = "2.0";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static int gfar_enet_open(struct net_device *dev);
115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200116static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300119static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
120 int alloc_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int gfar_set_mac_address(struct net_device *dev);
122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100123static irqreturn_t gfar_error(int irq, void *dev_id);
124static irqreturn_t gfar_transmit(int irq, void *dev_id);
125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static void adjust_link(struct net_device *dev);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +0300127static noinline void gfar_update_link_state(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700129static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600130static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400131static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static void gfar_set_multi(struct net_device *dev);
133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500134static void gfar_configure_serdes(struct net_device *dev);
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200135static int gfar_poll_rx(struct napi_struct *napi, int budget);
136static int gfar_poll_tx(struct napi_struct *napi, int budget);
137static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300139#ifdef CONFIG_NET_POLL_CONTROLLER
140static void gfar_netpoll(struct net_device *dev);
141#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000142int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
Claudiu Manoilc233cf402013-03-19 07:40:02 +0000143static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300144static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
Claudiu Manoilc10650b2014-02-17 12:53:18 +0200145static void gfar_halt_nodisable(struct gfar_private *priv);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600146static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800147static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000149static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151MODULE_AUTHOR("Freescale Semiconductor, Inc");
152MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153MODULE_LICENSE("GPL");
154
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000155static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000156 dma_addr_t buf)
157{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000158 u32 lstatus;
159
Claudiu Manoila7312d52015-03-13 10:36:28 +0200160 bdp->bufPtr = cpu_to_be32(buf);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161
162 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000163 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000164 lstatus |= BD_LFLAG(RXBD_WRAP);
165
Claudiu Manoild55398b2014-10-07 10:44:35 +0300166 gfar_wmb();
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000167
Claudiu Manoila7312d52015-03-13 10:36:28 +0200168 bdp->lstatus = cpu_to_be32(lstatus);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000169}
170
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300171static void gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000172{
Anton Vorontsov87283272009-10-12 06:00:39 +0000173 struct gfar_private *priv = netdev_priv(ndev);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200174 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000175 struct gfar_priv_tx_q *tx_queue = NULL;
176 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000177 struct txbd8 *txbdp;
Kevin Hao03366a332014-12-24 14:05:45 +0800178 u32 __iomem *rfbptr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000179 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000180
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000181 for (i = 0; i < priv->num_tx_queues; i++) {
182 tx_queue = priv->tx_queue[i];
183 /* Initialize some variables in our dev structure */
184 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
185 tx_queue->dirty_tx = tx_queue->tx_bd_base;
186 tx_queue->cur_tx = tx_queue->tx_bd_base;
187 tx_queue->skb_curtx = 0;
188 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000189
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000190 /* Initialize Transmit Descriptor Ring */
191 txbdp = tx_queue->tx_bd_base;
192 for (j = 0; j < tx_queue->tx_ring_size; j++) {
193 txbdp->lstatus = 0;
194 txbdp->bufPtr = 0;
195 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000196 }
197
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000198 /* Set the last descriptor in the ring to indicate wrap */
199 txbdp--;
Claudiu Manoila7312d52015-03-13 10:36:28 +0200200 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
201 TXBD_WRAP);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000202 }
203
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200204 rfbptr = &regs->rfbptr0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000205 for (i = 0; i < priv->num_rx_queues; i++) {
206 rx_queue = priv->rx_queue[i];
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000207
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300208 rx_queue->next_to_clean = 0;
209 rx_queue->next_to_use = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +0300210 rx_queue->next_to_alloc = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000211
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300212 /* make sure next_to_clean != next_to_use after this
213 * by leaving at least 1 unused descriptor
214 */
215 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000216
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200217 rx_queue->rfbptr = rfbptr;
218 rfbptr += 2;
Anton Vorontsov87283272009-10-12 06:00:39 +0000219 }
Anton Vorontsov87283272009-10-12 06:00:39 +0000220}
221
222static int gfar_alloc_skb_resources(struct net_device *ndev)
223{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000224 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000225 dma_addr_t addr;
Claudiu Manoil75354142015-07-13 16:22:06 +0300226 int i, j;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000227 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil369ec162013-02-14 05:00:02 +0000228 struct device *dev = priv->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000229 struct gfar_priv_tx_q *tx_queue = NULL;
230 struct gfar_priv_rx_q *rx_queue = NULL;
231
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000232 priv->total_tx_ring_size = 0;
233 for (i = 0; i < priv->num_tx_queues; i++)
234 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
235
236 priv->total_rx_ring_size = 0;
237 for (i = 0; i < priv->num_rx_queues; i++)
238 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000239
240 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000241 vaddr = dma_alloc_coherent(dev,
Joe Perchesd0320f72013-03-14 13:07:21 +0000242 (priv->total_tx_ring_size *
243 sizeof(struct txbd8)) +
244 (priv->total_rx_ring_size *
245 sizeof(struct rxbd8)),
246 &addr, GFP_KERNEL);
247 if (!vaddr)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000248 return -ENOMEM;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000249
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000250 for (i = 0; i < priv->num_tx_queues; i++) {
251 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000252 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000253 tx_queue->tx_bd_dma_base = addr;
254 tx_queue->dev = ndev;
255 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000256 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
257 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000258 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000259
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000260 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000261 for (i = 0; i < priv->num_rx_queues; i++) {
262 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000263 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000264 rx_queue->rx_bd_dma_base = addr;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300265 rx_queue->ndev = ndev;
Claudiu Manoil75354142015-07-13 16:22:06 +0300266 rx_queue->dev = dev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000267 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
268 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000269 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000270
271 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000272 for (i = 0; i < priv->num_tx_queues; i++) {
273 tx_queue = priv->tx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000274 tx_queue->tx_skbuff =
275 kmalloc_array(tx_queue->tx_ring_size,
276 sizeof(*tx_queue->tx_skbuff),
277 GFP_KERNEL);
278 if (!tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000279 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000280
Claudiu Manoil75354142015-07-13 16:22:06 +0300281 for (j = 0; j < tx_queue->tx_ring_size; j++)
282 tx_queue->tx_skbuff[j] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000283 }
284
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000285 for (i = 0; i < priv->num_rx_queues; i++) {
286 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +0300287 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
288 sizeof(*rx_queue->rx_buff),
289 GFP_KERNEL);
290 if (!rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000291 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000292 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000293
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300294 gfar_init_bds(ndev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000295
296 return 0;
297
298cleanup:
299 free_skb_resources(priv);
300 return -ENOMEM;
301}
302
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000303static void gfar_init_tx_rx_base(struct gfar_private *priv)
304{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000305 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000306 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000307 int i;
308
309 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000310 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000311 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000312 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000313 }
314
315 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000316 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000317 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000318 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000319 }
320}
321
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200322static void gfar_init_rqprm(struct gfar_private *priv)
323{
324 struct gfar __iomem *regs = priv->gfargrp[0].regs;
325 u32 __iomem *baddr;
326 int i;
327
328 baddr = &regs->rqprm0;
329 for (i = 0; i < priv->num_rx_queues; i++) {
330 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
331 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
332 baddr++;
333 }
334}
335
Claudiu Manoil75354142015-07-13 16:22:06 +0300336static void gfar_rx_offload_en(struct gfar_private *priv)
Claudiu Manoil88302642014-02-24 12:13:43 +0200337{
Claudiu Manoil88302642014-02-24 12:13:43 +0200338 /* set this when rx hw offload (TOE) functions are being used */
339 priv->uses_rxfcb = 0;
340
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 priv->uses_rxfcb = 1;
343
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300344 if (priv->hwts_rx_en || priv->rx_filer_enable)
Claudiu Manoil88302642014-02-24 12:13:43 +0200345 priv->uses_rxfcb = 1;
Claudiu Manoil88302642014-02-24 12:13:43 +0200346}
347
Claudiu Manoila328ac92014-02-24 12:13:42 +0200348static void gfar_mac_rx_config(struct gfar_private *priv)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000349{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000350 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000351 u32 rctrl = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000352
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000353 if (priv->rx_filer_enable) {
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300354 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000355 /* Program the RIR0 reg with the required distribution */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200356 if (priv->poll_mode == GFAR_SQ_POLLING)
357 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
358 else /* GFAR_MQ_POLLING */
359 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000360 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000361
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000362 /* Restore PROMISC mode */
Claudiu Manoila328ac92014-02-24 12:13:42 +0200363 if (priv->ndev->flags & IFF_PROMISC)
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000364 rctrl |= RCTRL_PROM;
365
Claudiu Manoil88302642014-02-24 12:13:43 +0200366 if (priv->ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000367 rctrl |= RCTRL_CHECKSUMMING;
368
Claudiu Manoil88302642014-02-24 12:13:43 +0200369 if (priv->extended_hash)
370 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000371
372 if (priv->padding) {
373 rctrl &= ~RCTRL_PAL_MASK;
374 rctrl |= RCTRL_PADDING(priv->padding);
375 }
376
Manfred Rudigier97553f72010-06-11 01:49:05 +0000377 /* Enable HW time stamping if requested from user space */
Claudiu Manoil88302642014-02-24 12:13:43 +0200378 if (priv->hwts_rx_en)
Manfred Rudigier97553f72010-06-11 01:49:05 +0000379 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
380
Claudiu Manoil88302642014-02-24 12:13:43 +0200381 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000383
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200384 /* Clear the LFC bit */
385 gfar_write(&regs->rctrl, rctrl);
386 /* Init flow control threshold values */
387 gfar_init_rqprm(priv);
388 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
389 rctrl |= RCTRL_LFC;
390
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000391 /* Init rctrl based on our settings */
392 gfar_write(&regs->rctrl, rctrl);
Claudiu Manoila328ac92014-02-24 12:13:42 +0200393}
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000394
Claudiu Manoila328ac92014-02-24 12:13:42 +0200395static void gfar_mac_tx_config(struct gfar_private *priv)
396{
397 struct gfar __iomem *regs = priv->gfargrp[0].regs;
398 u32 tctrl = 0;
399
400 if (priv->ndev->features & NETIF_F_IP_CSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000401 tctrl |= TCTRL_INIT_CSUM;
402
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000403 if (priv->prio_sched_en)
404 tctrl |= TCTRL_TXSCHED_PRIO;
405 else {
406 tctrl |= TCTRL_TXSCHED_WRRS;
407 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
408 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
409 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000410
Claudiu Manoil88302642014-02-24 12:13:43 +0200411 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
412 tctrl |= TCTRL_VLINS;
413
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000414 gfar_write(&regs->tctrl, tctrl);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000415}
416
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200417static void gfar_configure_coalescing(struct gfar_private *priv,
418 unsigned long tx_mask, unsigned long rx_mask)
419{
420 struct gfar __iomem *regs = priv->gfargrp[0].regs;
421 u32 __iomem *baddr;
422
423 if (priv->mode == MQ_MG_MODE) {
424 int i = 0;
425
426 baddr = &regs->txic0;
427 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
428 gfar_write(baddr + i, 0);
429 if (likely(priv->tx_queue[i]->txcoalescing))
430 gfar_write(baddr + i, priv->tx_queue[i]->txic);
431 }
432
433 baddr = &regs->rxic0;
434 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
435 gfar_write(baddr + i, 0);
436 if (likely(priv->rx_queue[i]->rxcoalescing))
437 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
438 }
439 } else {
440 /* Backward compatible case -- even if we enable
441 * multiple queues, there's only single reg to program
442 */
443 gfar_write(&regs->txic, 0);
444 if (likely(priv->tx_queue[0]->txcoalescing))
445 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
446
447 gfar_write(&regs->rxic, 0);
448 if (unlikely(priv->rx_queue[0]->rxcoalescing))
449 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
450 }
451}
452
453void gfar_configure_coalescing_all(struct gfar_private *priv)
454{
455 gfar_configure_coalescing(priv, 0xFF, 0xFF);
456}
457
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000458static struct net_device_stats *gfar_get_stats(struct net_device *dev)
459{
460 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000461 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
462 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000463 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000464
465 for (i = 0; i < priv->num_rx_queues; i++) {
466 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000467 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000468 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
469 }
470
471 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000472 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000473 dev->stats.rx_dropped = rx_dropped;
474
475 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000476 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
477 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000478 }
479
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000480 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000481 dev->stats.tx_packets = tx_packets;
482
483 return &dev->stats;
484}
485
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300486static int gfar_set_mac_addr(struct net_device *dev, void *p)
487{
488 eth_mac_addr(dev, p);
489
490 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
491
492 return 0;
493}
494
Andy Fleming26ccfc32009-03-10 12:58:28 +0000495static const struct net_device_ops gfar_netdev_ops = {
496 .ndo_open = gfar_enet_open,
497 .ndo_start_xmit = gfar_start_xmit,
498 .ndo_stop = gfar_close,
499 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000500 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000501 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000502 .ndo_tx_timeout = gfar_timeout,
503 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000504 .ndo_get_stats = gfar_get_stats,
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300505 .ndo_set_mac_address = gfar_set_mac_addr,
Ben Hutchings240c1022009-07-09 17:54:35 +0000506 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000507#ifdef CONFIG_NET_POLL_CONTROLLER
508 .ndo_poll_controller = gfar_netpoll,
509#endif
510};
511
Claudiu Manoilefeddce2014-02-17 12:53:17 +0200512static void gfar_ints_disable(struct gfar_private *priv)
513{
514 int i;
515 for (i = 0; i < priv->num_grps; i++) {
516 struct gfar __iomem *regs = priv->gfargrp[i].regs;
517 /* Clear IEVENT */
518 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
519
520 /* Initialize IMASK */
521 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
522 }
523}
524
525static void gfar_ints_enable(struct gfar_private *priv)
526{
527 int i;
528 for (i = 0; i < priv->num_grps; i++) {
529 struct gfar __iomem *regs = priv->gfargrp[i].regs;
530 /* Unmask the interrupts we look for */
531 gfar_write(&regs->imask, IMASK_DEFAULT);
532 }
533}
534
Claudiu Manoil20862782014-02-17 12:53:14 +0200535static int gfar_alloc_tx_queues(struct gfar_private *priv)
536{
537 int i;
538
539 for (i = 0; i < priv->num_tx_queues; i++) {
540 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
541 GFP_KERNEL);
542 if (!priv->tx_queue[i])
543 return -ENOMEM;
544
545 priv->tx_queue[i]->tx_skbuff = NULL;
546 priv->tx_queue[i]->qindex = i;
547 priv->tx_queue[i]->dev = priv->ndev;
548 spin_lock_init(&(priv->tx_queue[i]->txlock));
549 }
550 return 0;
551}
552
553static int gfar_alloc_rx_queues(struct gfar_private *priv)
554{
555 int i;
556
557 for (i = 0; i < priv->num_rx_queues; i++) {
558 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
559 GFP_KERNEL);
560 if (!priv->rx_queue[i])
561 return -ENOMEM;
562
Claudiu Manoil20862782014-02-17 12:53:14 +0200563 priv->rx_queue[i]->qindex = i;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300564 priv->rx_queue[i]->ndev = priv->ndev;
Claudiu Manoil20862782014-02-17 12:53:14 +0200565 }
566 return 0;
567}
568
569static void gfar_free_tx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000570{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000571 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000572
573 for (i = 0; i < priv->num_tx_queues; i++)
574 kfree(priv->tx_queue[i]);
575}
576
Claudiu Manoil20862782014-02-17 12:53:14 +0200577static void gfar_free_rx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000578{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000579 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000580
581 for (i = 0; i < priv->num_rx_queues; i++)
582 kfree(priv->rx_queue[i]);
583}
584
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000585static void unmap_group_regs(struct gfar_private *priv)
586{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000587 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000588
589 for (i = 0; i < MAXGROUPS; i++)
590 if (priv->gfargrp[i].regs)
591 iounmap(priv->gfargrp[i].regs);
592}
593
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000594static void free_gfar_dev(struct gfar_private *priv)
595{
596 int i, j;
597
598 for (i = 0; i < priv->num_grps; i++)
599 for (j = 0; j < GFAR_NUM_IRQS; j++) {
600 kfree(priv->gfargrp[i].irqinfo[j]);
601 priv->gfargrp[i].irqinfo[j] = NULL;
602 }
603
604 free_netdev(priv->ndev);
605}
606
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000607static void disable_napi(struct gfar_private *priv)
608{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000609 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000610
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200611 for (i = 0; i < priv->num_grps; i++) {
612 napi_disable(&priv->gfargrp[i].napi_rx);
613 napi_disable(&priv->gfargrp[i].napi_tx);
614 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000615}
616
617static void enable_napi(struct gfar_private *priv)
618{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000619 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000620
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200621 for (i = 0; i < priv->num_grps; i++) {
622 napi_enable(&priv->gfargrp[i].napi_rx);
623 napi_enable(&priv->gfargrp[i].napi_tx);
624 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000625}
626
627static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000628 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000629{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000630 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000631 int i;
632
Paul Gortmaker7c1e7e92013-02-04 09:49:42 +0000633 for (i = 0; i < GFAR_NUM_IRQS; i++) {
634 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
635 GFP_KERNEL);
636 if (!grp->irqinfo[i])
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000637 return -ENOMEM;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000638 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000639
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000640 grp->regs = of_iomap(np, 0);
641 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000642 return -ENOMEM;
643
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000644 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000645
646 /* If we aren't the FEC we have multiple interrupts */
647 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
650 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
651 gfar_irq(grp, RX)->irq == NO_IRQ ||
652 gfar_irq(grp, ER)->irq == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000653 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000654 }
655
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000656 grp->priv = priv;
657 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000658 if (priv->mode == MQ_MG_MODE) {
Jingchang Lu55917642015-03-13 10:52:32 +0200659 u32 rxq_mask, txq_mask;
660 int ret;
661
662 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
663 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
664
665 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
666 if (!ret) {
667 grp->rx_bit_map = rxq_mask ?
668 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
669 }
670
671 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
672 if (!ret) {
673 grp->tx_bit_map = txq_mask ?
674 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
675 }
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200676
677 if (priv->poll_mode == GFAR_SQ_POLLING) {
678 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
679 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
680 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200681 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000682 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000683 grp->rx_bit_map = 0xFF;
684 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000685 }
Claudiu Manoil20862782014-02-17 12:53:14 +0200686
687 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
688 * right to left, so we need to revert the 8 bits to get the q index
689 */
690 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
691 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
692
693 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
694 * also assign queues to groups
695 */
696 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200697 if (!grp->rx_queue)
698 grp->rx_queue = priv->rx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200699 grp->num_rx_queues++;
700 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
701 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
702 priv->rx_queue[i]->grp = grp;
703 }
704
705 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200706 if (!grp->tx_queue)
707 grp->tx_queue = priv->tx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200708 grp->num_tx_queues++;
709 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
710 priv->tqueue |= (TQUEUE_EN0 >> i);
711 priv->tx_queue[i]->grp = grp;
712 }
713
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000714 priv->num_grps++;
715
716 return 0;
717}
718
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100719static int gfar_of_group_count(struct device_node *np)
720{
721 struct device_node *child;
722 int num = 0;
723
724 for_each_available_child_of_node(np, child)
725 if (!of_node_cmp(child->name, "queue-group"))
726 num++;
727
728 return num;
729}
730
Grant Likely2dc11582010-08-06 09:25:50 -0600731static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800732{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800733 const char *model;
734 const char *ctype;
735 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000736 int err = 0, i;
737 struct net_device *dev = NULL;
738 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700739 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000740 struct device_node *child = NULL;
Jingchang Lu55917642015-03-13 10:52:32 +0200741 u32 stash_len = 0;
742 u32 stash_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000743 unsigned int num_tx_qs, num_rx_qs;
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200744 unsigned short mode, poll_mode;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800745
Kevin Hao4b222ca2015-01-28 20:06:48 +0800746 if (!np)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800747 return -ENODEV;
748
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200749 if (of_device_is_compatible(np, "fsl,etsec2")) {
750 mode = MQ_MG_MODE;
751 poll_mode = GFAR_SQ_POLLING;
752 } else {
753 mode = SQ_SG_MODE;
754 poll_mode = GFAR_SQ_POLLING;
755 }
756
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200757 if (mode == SQ_SG_MODE) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200758 num_tx_qs = 1;
759 num_rx_qs = 1;
760 } else { /* MQ_MG_MODE */
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200761 /* get the actual number of supported groups */
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100762 unsigned int num_grps = gfar_of_group_count(np);
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200763
764 if (num_grps == 0 || num_grps > MAXGROUPS) {
765 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
766 num_grps);
767 pr_err("Cannot do alloc_etherdev, aborting\n");
768 return -EINVAL;
769 }
770
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200771 if (poll_mode == GFAR_SQ_POLLING) {
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200772 num_tx_qs = num_grps; /* one txq per int group */
773 num_rx_qs = num_grps; /* one rxq per int group */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200774 } else { /* GFAR_MQ_POLLING */
Jingchang Lu55917642015-03-13 10:52:32 +0200775 u32 tx_queues, rx_queues;
776 int ret;
777
778 /* parse the num of HW tx and rx queues */
779 ret = of_property_read_u32(np, "fsl,num_tx_queues",
780 &tx_queues);
781 num_tx_qs = ret ? 1 : tx_queues;
782
783 ret = of_property_read_u32(np, "fsl,num_rx_queues",
784 &rx_queues);
785 num_rx_qs = ret ? 1 : rx_queues;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200786 }
787 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000788
789 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000790 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
791 num_tx_qs, MAX_TX_QS);
792 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000793 return -EINVAL;
794 }
795
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000796 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000797 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
798 num_rx_qs, MAX_RX_QS);
799 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000800 return -EINVAL;
801 }
802
803 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
804 dev = *pdev;
805 if (NULL == dev)
806 return -ENOMEM;
807
808 priv = netdev_priv(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000809 priv->ndev = dev;
810
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200811 priv->mode = mode;
812 priv->poll_mode = poll_mode;
813
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000814 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000815 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000816 priv->num_rx_queues = num_rx_qs;
Claudiu Manoil20862782014-02-17 12:53:14 +0200817
818 err = gfar_alloc_tx_queues(priv);
819 if (err)
820 goto tx_alloc_failed;
821
822 err = gfar_alloc_rx_queues(priv);
823 if (err)
824 goto rx_alloc_failed;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800825
Jingchang Lu55917642015-03-13 10:52:32 +0200826 err = of_property_read_string(np, "model", &model);
827 if (err) {
828 pr_err("Device model property missing, aborting\n");
829 goto rx_alloc_failed;
830 }
831
Jan Ceuleers0977f812012-06-05 03:42:12 +0000832 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700833 INIT_LIST_HEAD(&priv->rx_list.list);
834 priv->rx_list.count = 0;
835 mutex_init(&priv->rx_queue_access);
836
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000837 for (i = 0; i < MAXGROUPS; i++)
838 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800839
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000840 /* Parse and initialize group specific information */
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200841 if (priv->mode == MQ_MG_MODE) {
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100842 for_each_available_child_of_node(np, child) {
843 if (of_node_cmp(child->name, "queue-group"))
844 continue;
845
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000846 err = gfar_parse_group(child, priv, model);
847 if (err)
848 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800849 }
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200850 } else { /* SQ_SG_MODE */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000851 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000852 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000853 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800854 }
855
Saurabh Sengar3f8c0f72015-11-20 23:23:58 +0530856 if (of_property_read_bool(np, "bd-stash")) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800857 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
858 priv->bd_stash_en = 1;
859 }
860
Jingchang Lu55917642015-03-13 10:52:32 +0200861 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800862
Jingchang Lu55917642015-03-13 10:52:32 +0200863 if (err == 0)
864 priv->rx_stash_size = stash_len;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800865
Jingchang Lu55917642015-03-13 10:52:32 +0200866 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800867
Jingchang Lu55917642015-03-13 10:52:32 +0200868 if (err == 0)
869 priv->rx_stash_index = stash_idx;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800870
871 if (stash_len || stash_idx)
872 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
873
Andy Flemingb31a1d82008-12-16 15:29:15 -0800874 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000875
Andy Flemingb31a1d82008-12-16 15:29:15 -0800876 if (mac_addr)
Joe Perches6a3c910c2011-11-16 09:38:02 +0000877 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800878
879 if (model && !strcasecmp(model, "TSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200880 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000881 FSL_GIANFAR_DEV_HAS_COALESCE |
882 FSL_GIANFAR_DEV_HAS_RMON |
883 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
884
Andy Flemingb31a1d82008-12-16 15:29:15 -0800885 if (model && !strcasecmp(model, "eTSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200886 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000887 FSL_GIANFAR_DEV_HAS_COALESCE |
888 FSL_GIANFAR_DEV_HAS_RMON |
889 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000890 FSL_GIANFAR_DEV_HAS_CSUM |
891 FSL_GIANFAR_DEV_HAS_VLAN |
892 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
893 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
894 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800895
Jingchang Lu55917642015-03-13 10:52:32 +0200896 err = of_property_read_string(np, "phy-connection-type", &ctype);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800897
898 /* We only care about rgmii-id. The rest are autodetected */
Jingchang Lu55917642015-03-13 10:52:32 +0200899 if (err == 0 && !strcmp(ctype, "rgmii-id"))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800900 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
901 else
902 priv->interface = PHY_INTERFACE_MODE_MII;
903
Jingchang Lu55917642015-03-13 10:52:32 +0200904 if (of_find_property(np, "fsl,magic-packet", NULL))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800905 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
906
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300907 if (of_get_property(np, "fsl,wake-on-filer", NULL))
908 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
909
Grant Likelyfe192a42009-04-25 12:53:12 +0000910 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800911
Florian Fainellibe403642014-05-22 09:47:48 -0700912 /* In the case of a fixed PHY, the DT node associated
913 * to the PHY is the Ethernet MAC DT node.
914 */
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200915 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
Florian Fainellibe403642014-05-22 09:47:48 -0700916 err = of_phy_register_fixed_link(np);
917 if (err)
918 goto err_grp_init;
919
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200920 priv->phy_node = of_node_get(np);
Florian Fainellibe403642014-05-22 09:47:48 -0700921 }
922
Andy Flemingb31a1d82008-12-16 15:29:15 -0800923 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000924 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800925
926 return 0;
927
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000928err_grp_init:
929 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +0200930rx_alloc_failed:
931 gfar_free_rx_queues(priv);
932tx_alloc_failed:
933 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000934 free_gfar_dev(priv);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800935 return err;
936}
937
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000938static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000939{
940 struct hwtstamp_config config;
941 struct gfar_private *priv = netdev_priv(netdev);
942
943 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
944 return -EFAULT;
945
946 /* reserved for future extensions */
947 if (config.flags)
948 return -EINVAL;
949
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000950 switch (config.tx_type) {
951 case HWTSTAMP_TX_OFF:
952 priv->hwts_tx_en = 0;
953 break;
954 case HWTSTAMP_TX_ON:
955 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
956 return -ERANGE;
957 priv->hwts_tx_en = 1;
958 break;
959 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000960 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000961 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000962
963 switch (config.rx_filter) {
964 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000965 if (priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000966 priv->hwts_rx_en = 0;
Claudiu Manoil08511332014-02-24 12:13:45 +0200967 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000968 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000969 break;
970 default:
971 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
972 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000973 if (!priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000974 priv->hwts_rx_en = 1;
Claudiu Manoil08511332014-02-24 12:13:45 +0200975 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000976 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000977 config.rx_filter = HWTSTAMP_FILTER_ALL;
978 break;
979 }
980
981 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
982 -EFAULT : 0;
983}
984
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000985static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
986{
987 struct hwtstamp_config config;
988 struct gfar_private *priv = netdev_priv(netdev);
989
990 config.flags = 0;
991 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
992 config.rx_filter = (priv->hwts_rx_en ?
993 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
994
995 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
996 -EFAULT : 0;
997}
998
Clifford Wolf0faac9f2009-01-09 10:23:11 +0000999static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1000{
1001 struct gfar_private *priv = netdev_priv(dev);
1002
1003 if (!netif_running(dev))
1004 return -EINVAL;
1005
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001006 if (cmd == SIOCSHWTSTAMP)
Ben Hutchingsca0c88c2013-11-18 23:05:27 +00001007 return gfar_hwtstamp_set(dev, rq);
1008 if (cmd == SIOCGHWTSTAMP)
1009 return gfar_hwtstamp_get(dev, rq);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001010
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001011 if (!priv->phydev)
1012 return -ENODEV;
1013
Richard Cochran28b04112010-07-17 08:48:55 +00001014 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001015}
1016
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001017static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1018 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001019{
1020 u32 rqfpr = FPR_FILER_MASK;
1021 u32 rqfcr = 0x0;
1022
1023 rqfar--;
1024 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001025 priv->ftp_rqfpr[rqfar] = rqfpr;
1026 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001027 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1028
1029 rqfar--;
1030 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001031 priv->ftp_rqfpr[rqfar] = rqfpr;
1032 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001033 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1034
1035 rqfar--;
1036 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1037 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001038 priv->ftp_rqfcr[rqfar] = rqfcr;
1039 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001040 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1041
1042 rqfar--;
1043 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1044 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001045 priv->ftp_rqfcr[rqfar] = rqfcr;
1046 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001047 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1048
1049 return rqfar;
1050}
1051
1052static void gfar_init_filer_table(struct gfar_private *priv)
1053{
1054 int i = 0x0;
1055 u32 rqfar = MAX_FILER_IDX;
1056 u32 rqfcr = 0x0;
1057 u32 rqfpr = FPR_FILER_MASK;
1058
1059 /* Default rule */
1060 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001061 priv->ftp_rqfcr[rqfar] = rqfcr;
1062 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001063 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1064
1065 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1066 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1067 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1068 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1069 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1070 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1071
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +02001072 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001073 priv->cur_filer_idx = rqfar;
1074
1075 /* Rest are masked rules */
1076 rqfcr = RQFCR_CMP_NOMATCH;
1077 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001078 priv->ftp_rqfcr[i] = rqfcr;
1079 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001080 gfar_write_filer(priv, i, rqfcr, rqfpr);
1081 }
1082}
1083
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001084#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001085static void __gfar_detect_errata_83xx(struct gfar_private *priv)
Anton Vorontsov7d350972010-06-30 06:39:12 +00001086{
Anton Vorontsov7d350972010-06-30 06:39:12 +00001087 unsigned int pvr = mfspr(SPRN_PVR);
1088 unsigned int svr = mfspr(SPRN_SVR);
1089 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1090 unsigned int rev = svr & 0xffff;
1091
1092 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1093 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001094 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001095 priv->errata |= GFAR_ERRATA_74;
1096
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001097 /* MPC8313 and MPC837x all rev */
1098 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001099 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001100 priv->errata |= GFAR_ERRATA_76;
1101
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001102 /* MPC8313 Rev < 2.0 */
1103 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001104 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001105}
1106
1107static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1108{
1109 unsigned int svr = mfspr(SPRN_SVR);
1110
1111 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1112 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil53fad772013-10-09 20:20:42 +03001113 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1114 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1115 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001116}
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001117#endif
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001118
1119static void gfar_detect_errata(struct gfar_private *priv)
1120{
1121 struct device *dev = &priv->ofdev->dev;
1122
1123 /* no plans to fix */
1124 priv->errata |= GFAR_ERRATA_A002;
1125
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001126#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001127 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1128 __gfar_detect_errata_85xx(priv);
1129 else /* non-mpc85xx parts, i.e. e300 core based */
1130 __gfar_detect_errata_83xx(priv);
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001131#endif
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001132
Anton Vorontsov7d350972010-06-30 06:39:12 +00001133 if (priv->errata)
1134 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1135 priv->errata);
1136}
1137
Claudiu Manoil08511332014-02-24 12:13:45 +02001138void gfar_mac_reset(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139{
Claudiu Manoil20862782014-02-17 12:53:14 +02001140 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoila328ac92014-02-24 12:13:42 +02001141 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
1143 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001144 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Andy Flemingb98ac702009-02-04 16:38:05 -08001146 /* We need to delay at least 3 TX clocks */
Claudiu Manoila328ac92014-02-24 12:13:42 +02001147 udelay(3);
Andy Flemingb98ac702009-02-04 16:38:05 -08001148
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001149 /* the soft reset bit is not self-resetting, so we need to
1150 * clear it before resuming normal operation
1151 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001152 gfar_write(&regs->maccfg1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Claudiu Manoila328ac92014-02-24 12:13:42 +02001154 udelay(3);
1155
Claudiu Manoil75354142015-07-13 16:22:06 +03001156 gfar_rx_offload_en(priv);
Claudiu Manoil88302642014-02-24 12:13:43 +02001157
1158 /* Initialize the max receive frame/buffer lengths */
Claudiu Manoil75354142015-07-13 16:22:06 +03001159 gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1160 gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
Claudiu Manoila328ac92014-02-24 12:13:42 +02001161
1162 /* Initialize the Minimum Frame Length Register */
1163 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1164
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001166 tempval = MACCFG2_INIT_SETTINGS;
Claudiu Manoil88302642014-02-24 12:13:43 +02001167
Claudiu Manoil75354142015-07-13 16:22:06 +03001168 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1169 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
1170 * and by checking RxBD[LG] and discarding larger than MAXFRM.
Claudiu Manoil88302642014-02-24 12:13:43 +02001171 */
Claudiu Manoil75354142015-07-13 16:22:06 +03001172 if (gfar_has_errata(priv, GFAR_ERRATA_74))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001173 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
Claudiu Manoil88302642014-02-24 12:13:43 +02001174
Anton Vorontsov7d350972010-06-30 06:39:12 +00001175 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Claudiu Manoila328ac92014-02-24 12:13:42 +02001177 /* Clear mac addr hash registers */
1178 gfar_write(&regs->igaddr0, 0);
1179 gfar_write(&regs->igaddr1, 0);
1180 gfar_write(&regs->igaddr2, 0);
1181 gfar_write(&regs->igaddr3, 0);
1182 gfar_write(&regs->igaddr4, 0);
1183 gfar_write(&regs->igaddr5, 0);
1184 gfar_write(&regs->igaddr6, 0);
1185 gfar_write(&regs->igaddr7, 0);
1186
1187 gfar_write(&regs->gaddr0, 0);
1188 gfar_write(&regs->gaddr1, 0);
1189 gfar_write(&regs->gaddr2, 0);
1190 gfar_write(&regs->gaddr3, 0);
1191 gfar_write(&regs->gaddr4, 0);
1192 gfar_write(&regs->gaddr5, 0);
1193 gfar_write(&regs->gaddr6, 0);
1194 gfar_write(&regs->gaddr7, 0);
1195
1196 if (priv->extended_hash)
1197 gfar_clear_exact_match(priv->ndev);
1198
1199 gfar_mac_rx_config(priv);
1200
1201 gfar_mac_tx_config(priv);
1202
1203 gfar_set_mac_address(priv->ndev);
1204
1205 gfar_set_multi(priv->ndev);
1206
1207 /* clear ievent and imask before configuring coalescing */
1208 gfar_ints_disable(priv);
1209
1210 /* Configure the coalescing support */
1211 gfar_configure_coalescing_all(priv);
1212}
1213
1214static void gfar_hw_init(struct gfar_private *priv)
1215{
1216 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1217 u32 attrs;
1218
1219 /* Stop the DMA engine now, in case it was running before
1220 * (The firmware could have used it, and left it running).
1221 */
1222 gfar_halt(priv);
1223
1224 gfar_mac_reset(priv);
1225
1226 /* Zero out the rmon mib registers if it has them */
1227 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1228 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1229
1230 /* Mask off the CAM interrupts */
1231 gfar_write(&regs->rmon.cam1, 0xffffffff);
1232 gfar_write(&regs->rmon.cam2, 0xffffffff);
1233 }
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001236 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001238 /* Set the extraction length and index */
1239 attrs = ATTRELI_EL(priv->rx_stash_size) |
1240 ATTRELI_EI(priv->rx_stash_index);
1241
1242 gfar_write(&regs->attreli, attrs);
1243
1244 /* Start with defaults, and add stashing
1245 * depending on driver parameters
1246 */
1247 attrs = ATTR_INIT_SETTINGS;
1248
1249 if (priv->bd_stash_en)
1250 attrs |= ATTR_BDSTASH;
1251
1252 if (priv->rx_stash_size != 0)
1253 attrs |= ATTR_BUFSTASH;
1254
1255 gfar_write(&regs->attr, attrs);
1256
1257 /* FIFO configs */
1258 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1259 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1260 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1261
Claudiu Manoil20862782014-02-17 12:53:14 +02001262 /* Program the interrupt steering regs, only for MG devices */
1263 if (priv->num_grps > 1)
1264 gfar_write_isrg(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001265}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
Xiubo Li898157e2014-06-04 16:49:16 +08001267static void gfar_init_addr_hash_table(struct gfar_private *priv)
Claudiu Manoil20862782014-02-17 12:53:14 +02001268{
1269 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001270
Andy Flemingb31a1d82008-12-16 15:29:15 -08001271 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001272 priv->extended_hash = 1;
1273 priv->hash_width = 9;
1274
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001275 priv->hash_regs[0] = &regs->igaddr0;
1276 priv->hash_regs[1] = &regs->igaddr1;
1277 priv->hash_regs[2] = &regs->igaddr2;
1278 priv->hash_regs[3] = &regs->igaddr3;
1279 priv->hash_regs[4] = &regs->igaddr4;
1280 priv->hash_regs[5] = &regs->igaddr5;
1281 priv->hash_regs[6] = &regs->igaddr6;
1282 priv->hash_regs[7] = &regs->igaddr7;
1283 priv->hash_regs[8] = &regs->gaddr0;
1284 priv->hash_regs[9] = &regs->gaddr1;
1285 priv->hash_regs[10] = &regs->gaddr2;
1286 priv->hash_regs[11] = &regs->gaddr3;
1287 priv->hash_regs[12] = &regs->gaddr4;
1288 priv->hash_regs[13] = &regs->gaddr5;
1289 priv->hash_regs[14] = &regs->gaddr6;
1290 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001291
1292 } else {
1293 priv->extended_hash = 0;
1294 priv->hash_width = 8;
1295
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001296 priv->hash_regs[0] = &regs->gaddr0;
1297 priv->hash_regs[1] = &regs->gaddr1;
1298 priv->hash_regs[2] = &regs->gaddr2;
1299 priv->hash_regs[3] = &regs->gaddr3;
1300 priv->hash_regs[4] = &regs->gaddr4;
1301 priv->hash_regs[5] = &regs->gaddr5;
1302 priv->hash_regs[6] = &regs->gaddr6;
1303 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001304 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001305}
1306
1307/* Set up the ethernet device structure, private data,
1308 * and anything else we need before we start
1309 */
1310static int gfar_probe(struct platform_device *ofdev)
1311{
1312 struct net_device *dev = NULL;
1313 struct gfar_private *priv = NULL;
1314 int err = 0, i;
1315
1316 err = gfar_of_init(ofdev, &dev);
1317
1318 if (err)
1319 return err;
1320
1321 priv = netdev_priv(dev);
1322 priv->ndev = dev;
1323 priv->ofdev = ofdev;
1324 priv->dev = &ofdev->dev;
1325 SET_NETDEV_DEV(dev, &ofdev->dev);
1326
Claudiu Manoil20862782014-02-17 12:53:14 +02001327 INIT_WORK(&priv->reset_task, gfar_reset_task);
1328
1329 platform_set_drvdata(ofdev, priv);
1330
1331 gfar_detect_errata(priv);
1332
Claudiu Manoil20862782014-02-17 12:53:14 +02001333 /* Set the dev->base_addr to the gfar reg region */
1334 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1335
1336 /* Fill in the dev structure */
1337 dev->watchdog_timeo = TX_TIMEOUT;
1338 dev->mtu = 1500;
1339 dev->netdev_ops = &gfar_netdev_ops;
1340 dev->ethtool_ops = &gfar_ethtool_ops;
1341
1342 /* Register for napi ...We are registering NAPI for each grp */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001343 for (i = 0; i < priv->num_grps; i++) {
1344 if (priv->poll_mode == GFAR_SQ_POLLING) {
1345 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1346 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001347 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001348 gfar_poll_tx_sq, 2);
1349 } else {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001350 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1351 gfar_poll_rx, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001352 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001353 gfar_poll_tx, 2);
1354 }
1355 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001356
1357 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1358 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1359 NETIF_F_RXCSUM;
1360 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1361 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1362 }
1363
1364 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1365 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1366 NETIF_F_HW_VLAN_CTAG_RX;
1367 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1368 }
1369
Claudiu Manoil3d23a052015-05-06 18:07:30 +03001370 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1371
Claudiu Manoil20862782014-02-17 12:53:14 +02001372 gfar_init_addr_hash_table(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001373
Claudiu Manoil532c37b2014-02-17 12:53:16 +02001374 /* Insert receive time stamps into padding alignment bytes */
1375 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1376 priv->padding = 8;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001377
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001378 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001379 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001380 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001382 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001383 for (i = 0; i < priv->num_tx_queues; i++) {
1384 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1385 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1386 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1387 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1388 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001389
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001390 for (i = 0; i < priv->num_rx_queues; i++) {
1391 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1392 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1393 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Jan Ceuleers0977f812012-06-05 03:42:12 +00001396 /* always enable rx filer */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001397 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001398 /* Enable most messages by default */
1399 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001400 /* use pritority h/w tx queue scheduling for single queue devices */
1401 if (priv->num_tx_queues == 1)
1402 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001403
Claudiu Manoil08511332014-02-24 12:13:45 +02001404 set_bit(GFAR_DOWN, &priv->state);
1405
Claudiu Manoila328ac92014-02-24 12:13:42 +02001406 gfar_hw_init(priv);
Trent Piephod3eab822008-10-02 11:12:24 +00001407
Fabio Estevamd4c642e2014-06-03 19:55:38 -03001408 /* Carrier starts down, phylib will bring it up */
1409 netif_carrier_off(dev);
1410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 err = register_netdev(dev);
1412
1413 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001414 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 goto register_fail;
1416 }
1417
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001418 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1419 priv->wol_supported |= GFAR_WOL_MAGIC;
1420
1421 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1422 priv->rx_filer_enable)
1423 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1424
1425 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001426
Dai Harukic50a5d92008-12-17 16:51:32 -08001427 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001428 for (i = 0; i < priv->num_grps; i++) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001429 struct gfar_priv_grp *grp = &priv->gfargrp[i];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001430 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001431 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001432 dev->name, "_g", '0' + i, "_tx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001433 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001434 dev->name, "_g", '0' + i, "_rx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001435 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001436 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001437 } else
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001438 strcpy(gfar_irq(grp, TX)->name, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001439 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001440
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001441 /* Initialize the filer table */
1442 gfar_init_filer_table(priv);
1443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001445 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
Jan Ceuleers0977f812012-06-05 03:42:12 +00001447 /* Even more device info helps when determining which kernel
1448 * provided which set of benchmarks.
1449 */
Joe Perches59deab22011-06-14 08:57:47 +00001450 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001451 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001452 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1453 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001454 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001455 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1456 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
1458 return 0;
1459
1460register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001461 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001462 gfar_free_rx_queues(priv);
1463 gfar_free_tx_queues(priv);
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001464 of_node_put(priv->phy_node);
1465 of_node_put(priv->tbi_node);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001466 free_gfar_dev(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001467 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468}
1469
Grant Likely2dc11582010-08-06 09:25:50 -06001470static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001472 struct gfar_private *priv = platform_get_drvdata(ofdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001474 of_node_put(priv->phy_node);
1475 of_node_put(priv->tbi_node);
Grant Likelyfe192a42009-04-25 12:53:12 +00001476
David S. Millerd9d8e042009-09-06 01:41:02 -07001477 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001478 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001479 gfar_free_rx_queues(priv);
1480 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001481 free_gfar_dev(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
1483 return 0;
1484}
1485
Scott Woodd87eb122008-07-11 18:04:45 -05001486#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001487
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001488static void __gfar_filer_disable(struct gfar_private *priv)
1489{
1490 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1491 u32 temp;
1492
1493 temp = gfar_read(&regs->rctrl);
1494 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1495 gfar_write(&regs->rctrl, temp);
1496}
1497
1498static void __gfar_filer_enable(struct gfar_private *priv)
1499{
1500 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1501 u32 temp;
1502
1503 temp = gfar_read(&regs->rctrl);
1504 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1505 gfar_write(&regs->rctrl, temp);
1506}
1507
1508/* Filer rules implementing wol capabilities */
1509static void gfar_filer_config_wol(struct gfar_private *priv)
1510{
1511 unsigned int i;
1512 u32 rqfcr;
1513
1514 __gfar_filer_disable(priv);
1515
1516 /* clear the filer table, reject any packet by default */
1517 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1518 for (i = 0; i <= MAX_FILER_IDX; i++)
1519 gfar_write_filer(priv, i, rqfcr, 0);
1520
1521 i = 0;
1522 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1523 /* unicast packet, accept it */
1524 struct net_device *ndev = priv->ndev;
1525 /* get the default rx queue index */
1526 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1527 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1528 (ndev->dev_addr[1] << 8) |
1529 ndev->dev_addr[2];
1530
1531 rqfcr = (qindex << 10) | RQFCR_AND |
1532 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1533
1534 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1535
1536 dest_mac_addr = (ndev->dev_addr[3] << 16) |
1537 (ndev->dev_addr[4] << 8) |
1538 ndev->dev_addr[5];
1539 rqfcr = (qindex << 10) | RQFCR_GPI |
1540 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1541 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1542 }
1543
1544 __gfar_filer_enable(priv);
1545}
1546
1547static void gfar_filer_restore_table(struct gfar_private *priv)
1548{
1549 u32 rqfcr, rqfpr;
1550 unsigned int i;
1551
1552 __gfar_filer_disable(priv);
1553
1554 for (i = 0; i <= MAX_FILER_IDX; i++) {
1555 rqfcr = priv->ftp_rqfcr[i];
1556 rqfpr = priv->ftp_rqfpr[i];
1557 gfar_write_filer(priv, i, rqfcr, rqfpr);
1558 }
1559
1560 __gfar_filer_enable(priv);
1561}
1562
1563/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1564static void gfar_start_wol_filer(struct gfar_private *priv)
1565{
1566 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1567 u32 tempval;
1568 int i = 0;
1569
1570 /* Enable Rx hw queues */
1571 gfar_write(&regs->rqueue, priv->rqueue);
1572
1573 /* Initialize DMACTRL to have WWR and WOP */
1574 tempval = gfar_read(&regs->dmactrl);
1575 tempval |= DMACTRL_INIT_SETTINGS;
1576 gfar_write(&regs->dmactrl, tempval);
1577
1578 /* Make sure we aren't stopped */
1579 tempval = gfar_read(&regs->dmactrl);
1580 tempval &= ~DMACTRL_GRS;
1581 gfar_write(&regs->dmactrl, tempval);
1582
1583 for (i = 0; i < priv->num_grps; i++) {
1584 regs = priv->gfargrp[i].regs;
1585 /* Clear RHLT, so that the DMA starts polling now */
1586 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1587 /* enable the Filer General Purpose Interrupt */
1588 gfar_write(&regs->imask, IMASK_FGPI);
1589 }
1590
1591 /* Enable Rx DMA */
1592 tempval = gfar_read(&regs->maccfg1);
1593 tempval |= MACCFG1_RX_EN;
1594 gfar_write(&regs->maccfg1, tempval);
1595}
1596
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001597static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001598{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001599 struct gfar_private *priv = dev_get_drvdata(dev);
1600 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001601 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001602 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001603 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001604
Claudiu Manoil614b4242015-07-31 18:38:32 +03001605 if (!netif_running(ndev))
1606 return 0;
1607
1608 disable_napi(priv);
1609 netif_tx_lock(ndev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001610 netif_device_detach(ndev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001611 netif_tx_unlock(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001612
Claudiu Manoil614b4242015-07-31 18:38:32 +03001613 gfar_halt(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001614
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001615 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001616 /* Enable interrupt on Magic Packet */
1617 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001618
Claudiu Manoil614b4242015-07-31 18:38:32 +03001619 /* Enable Magic Packet mode */
1620 tempval = gfar_read(&regs->maccfg2);
1621 tempval |= MACCFG2_MPEN;
1622 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001623
Claudiu Manoil614b4242015-07-31 18:38:32 +03001624 /* re-enable the Rx block */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001625 tempval = gfar_read(&regs->maccfg1);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001626 tempval |= MACCFG1_RX_EN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001627 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001628
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001629 } else if (wol & GFAR_WOL_FILER_UCAST) {
1630 gfar_filer_config_wol(priv);
1631 gfar_start_wol_filer(priv);
1632
Claudiu Manoil614b4242015-07-31 18:38:32 +03001633 } else {
1634 phy_stop(priv->phydev);
Scott Woodd87eb122008-07-11 18:04:45 -05001635 }
1636
1637 return 0;
1638}
1639
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001640static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001641{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001642 struct gfar_private *priv = dev_get_drvdata(dev);
1643 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001644 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001645 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001646 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001647
Claudiu Manoil614b4242015-07-31 18:38:32 +03001648 if (!netif_running(ndev))
Scott Woodd87eb122008-07-11 18:04:45 -05001649 return 0;
Scott Woodd87eb122008-07-11 18:04:45 -05001650
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001651 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001652 /* Disable Magic Packet mode */
1653 tempval = gfar_read(&regs->maccfg2);
1654 tempval &= ~MACCFG2_MPEN;
1655 gfar_write(&regs->maccfg2, tempval);
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001656
1657 } else if (wol & GFAR_WOL_FILER_UCAST) {
1658 /* need to stop rx only, tx is already down */
1659 gfar_halt(priv);
1660 gfar_filer_restore_table(priv);
1661
Claudiu Manoil614b4242015-07-31 18:38:32 +03001662 } else {
Scott Woodd87eb122008-07-11 18:04:45 -05001663 phy_start(priv->phydev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001664 }
Scott Woodd87eb122008-07-11 18:04:45 -05001665
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001666 gfar_start(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001667
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001668 netif_device_attach(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001669 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001670
1671 return 0;
1672}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001673
1674static int gfar_restore(struct device *dev)
1675{
1676 struct gfar_private *priv = dev_get_drvdata(dev);
1677 struct net_device *ndev = priv->ndev;
1678
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001679 if (!netif_running(ndev)) {
1680 netif_device_attach(ndev);
1681
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001682 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001683 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001684
Claudiu Manoil76f31e82015-07-13 16:22:03 +03001685 gfar_init_bds(ndev);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001686
Claudiu Manoila328ac92014-02-24 12:13:42 +02001687 gfar_mac_reset(priv);
1688
1689 gfar_init_tx_rx_base(priv);
1690
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001691 gfar_start(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001692
1693 priv->oldlink = 0;
1694 priv->oldspeed = 0;
1695 priv->oldduplex = -1;
1696
1697 if (priv->phydev)
1698 phy_start(priv->phydev);
1699
1700 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001701 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001702
1703 return 0;
1704}
1705
1706static struct dev_pm_ops gfar_pm_ops = {
1707 .suspend = gfar_suspend,
1708 .resume = gfar_resume,
1709 .freeze = gfar_suspend,
1710 .thaw = gfar_resume,
1711 .restore = gfar_restore,
1712};
1713
1714#define GFAR_PM_OPS (&gfar_pm_ops)
1715
Scott Woodd87eb122008-07-11 18:04:45 -05001716#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001717
1718#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001719
Scott Woodd87eb122008-07-11 18:04:45 -05001720#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001722/* Reads the controller's registers to determine what interface
1723 * connects it to the PHY.
1724 */
1725static phy_interface_t gfar_get_interface(struct net_device *dev)
1726{
1727 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001728 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001729 u32 ecntrl;
1730
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001731 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001732
1733 if (ecntrl & ECNTRL_SGMII_MODE)
1734 return PHY_INTERFACE_MODE_SGMII;
1735
1736 if (ecntrl & ECNTRL_TBI_MODE) {
1737 if (ecntrl & ECNTRL_REDUCED_MODE)
1738 return PHY_INTERFACE_MODE_RTBI;
1739 else
1740 return PHY_INTERFACE_MODE_TBI;
1741 }
1742
1743 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001744 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001745 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001746 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001747 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001748 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001749
Jan Ceuleers0977f812012-06-05 03:42:12 +00001750 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001751 * be set by the device tree or platform code.
1752 */
1753 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1754 return PHY_INTERFACE_MODE_RGMII_ID;
1755
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001756 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001757 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001758 }
1759
Andy Flemingb31a1d82008-12-16 15:29:15 -08001760 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001761 return PHY_INTERFACE_MODE_GMII;
1762
1763 return PHY_INTERFACE_MODE_MII;
1764}
1765
1766
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001767/* Initializes driver's PHY state, and attaches to the PHY.
1768 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 */
1770static int init_phy(struct net_device *dev)
1771{
1772 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001773 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001774 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001775 GFAR_SUPPORTED_GBIT : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001776 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
1778 priv->oldlink = 0;
1779 priv->oldspeed = 0;
1780 priv->oldduplex = -1;
1781
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001782 interface = gfar_get_interface(dev);
1783
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001784 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1785 interface);
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001786 if (!priv->phydev) {
1787 dev_err(&dev->dev, "could not attach to PHY\n");
1788 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Kapil Junejad3c12872007-05-11 18:25:11 -05001791 if (interface == PHY_INTERFACE_MODE_SGMII)
1792 gfar_configure_serdes(dev);
1793
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001794 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001795 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1796 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797
Pavaluca Matei-B46610cf987af2014-10-27 10:42:42 +02001798 /* Add support for flow control, but don't advertise it by default */
1799 priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
1803
Jan Ceuleers0977f812012-06-05 03:42:12 +00001804/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001805 * SERDES lynx PHY on the chip. We communicate with this PHY
1806 * through the MDIO bus on each controller, treating it as a
1807 * "normal" PHY at the address found in the TBIPA register. We assume
1808 * that the TBIPA register is valid. Either the MDIO bus code will set
1809 * it to a value that doesn't conflict with other PHYs on the bus, or the
1810 * value doesn't matter, as there are no other PHYs on the bus.
1811 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001812static void gfar_configure_serdes(struct net_device *dev)
1813{
1814 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001815 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001816
Grant Likelyfe192a42009-04-25 12:53:12 +00001817 if (!priv->tbi_node) {
1818 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1819 "device tree specify a tbi-handle\n");
1820 return;
1821 }
1822
1823 tbiphy = of_phy_find_device(priv->tbi_node);
1824 if (!tbiphy) {
1825 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001826 return;
1827 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001828
Jan Ceuleers0977f812012-06-05 03:42:12 +00001829 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001830 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1831 * everything for us? Resetting it takes the link down and requires
1832 * several seconds for it to come back.
1833 */
Russell King38737e42015-09-24 20:36:28 +01001834 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1835 put_device(&tbiphy->dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -08001836 return;
Russell King38737e42015-09-24 20:36:28 +01001837 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001838
Paul Gortmakerd0313582008-04-17 00:08:10 -04001839 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001840 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001841
Grant Likelyfe192a42009-04-25 12:53:12 +00001842 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001843 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1844 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001845
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001846 phy_write(tbiphy, MII_BMCR,
1847 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1848 BMCR_SPEED1000);
Russell King04d53b22015-09-24 20:36:18 +01001849
1850 put_device(&tbiphy->dev);
Kapil Junejad3c12872007-05-11 18:25:11 -05001851}
1852
Anton Vorontsov511d9342010-06-30 06:39:15 +00001853static int __gfar_is_rx_idle(struct gfar_private *priv)
1854{
1855 u32 res;
1856
Jan Ceuleers0977f812012-06-05 03:42:12 +00001857 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001858 * actually wait for IEVENT_GRSC flag.
1859 */
Claudiu Manoilad3660c2013-10-09 20:20:40 +03001860 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
Anton Vorontsov511d9342010-06-30 06:39:15 +00001861 return 0;
1862
Jan Ceuleers0977f812012-06-05 03:42:12 +00001863 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001864 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1865 * and the Rx can be safely reset.
1866 */
1867 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1868 res &= 0x7f807f80;
1869 if ((res & 0xffff) == (res >> 16))
1870 return 1;
1871
1872 return 0;
1873}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001874
1875/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001876static void gfar_halt_nodisable(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877{
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001878 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 u32 tempval;
Claudiu Manoila4feee82014-10-07 10:44:34 +03001880 unsigned int timeout;
1881 int stopped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001883 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Claudiu Manoila4feee82014-10-07 10:44:34 +03001885 if (gfar_is_dma_stopped(priv))
1886 return;
1887
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001889 tempval = gfar_read(&regs->dmactrl);
Claudiu Manoila4feee82014-10-07 10:44:34 +03001890 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1891 gfar_write(&regs->dmactrl, tempval);
Anton Vorontsov511d9342010-06-30 06:39:15 +00001892
Claudiu Manoila4feee82014-10-07 10:44:34 +03001893retry:
1894 timeout = 1000;
1895 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1896 cpu_relax();
1897 timeout--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 }
Claudiu Manoila4feee82014-10-07 10:44:34 +03001899
1900 if (!timeout)
1901 stopped = gfar_is_dma_stopped(priv);
1902
1903 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1904 !__gfar_is_rx_idle(priv))
1905 goto retry;
Scott Woodd87eb122008-07-11 18:04:45 -05001906}
Scott Woodd87eb122008-07-11 18:04:45 -05001907
1908/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001909void gfar_halt(struct gfar_private *priv)
Scott Woodd87eb122008-07-11 18:04:45 -05001910{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001911 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001912 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001914 /* Dissable the Rx/Tx hw queues */
1915 gfar_write(&regs->rqueue, 0);
1916 gfar_write(&regs->tqueue, 0);
Scott Wood2a54adc2008-08-12 15:10:46 -05001917
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001918 mdelay(10);
1919
1920 gfar_halt_nodisable(priv);
1921
1922 /* Disable Rx/Tx DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 tempval = gfar_read(&regs->maccfg1);
1924 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1925 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001926}
1927
1928void stop_gfar(struct net_device *dev)
1929{
1930 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001931
Claudiu Manoil08511332014-02-24 12:13:45 +02001932 netif_tx_stop_all_queues(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001933
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001934 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02001935 set_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001936 smp_mb__after_atomic();
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001937
Claudiu Manoil08511332014-02-24 12:13:45 +02001938 disable_napi(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001939
Claudiu Manoil08511332014-02-24 12:13:45 +02001940 /* disable ints and gracefully shut down Rx/Tx DMA */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001941 gfar_halt(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Claudiu Manoil08511332014-02-24 12:13:45 +02001943 phy_stop(priv->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946}
1947
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001948static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001951 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001952 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001954 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001956 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1957 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001958 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
Claudiu Manoila7312d52015-03-13 10:36:28 +02001960 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1961 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001962 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001963 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001964 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001965 txbdp++;
Claudiu Manoila7312d52015-03-13 10:36:28 +02001966 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1967 be16_to_cpu(txbdp->length),
1968 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001970 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001971 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1972 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001974 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001975 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001976}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001978static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1979{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001980 int i;
1981
Claudiu Manoil75354142015-07-13 16:22:06 +03001982 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1983
1984 if (rx_queue->skb)
1985 dev_kfree_skb(rx_queue->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001987 for (i = 0; i < rx_queue->rx_ring_size; i++) {
Claudiu Manoil75354142015-07-13 16:22:06 +03001988 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1989
Anton Vorontsove69edd22009-10-12 06:00:30 +00001990 rxbdp->lstatus = 0;
1991 rxbdp->bufPtr = 0;
1992 rxbdp++;
Claudiu Manoil75354142015-07-13 16:22:06 +03001993
1994 if (!rxb->page)
1995 continue;
1996
1997 dma_unmap_single(rx_queue->dev, rxb->dma,
1998 PAGE_SIZE, DMA_FROM_DEVICE);
1999 __free_page(rxb->page);
2000
2001 rxb->page = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002003
2004 kfree(rx_queue->rx_buff);
2005 rx_queue->rx_buff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002006}
Anton Vorontsove69edd22009-10-12 06:00:30 +00002007
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002008/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00002009 * Then free tx_skbuff and rx_skbuff
2010 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002011static void free_skb_resources(struct gfar_private *priv)
2012{
2013 struct gfar_priv_tx_q *tx_queue = NULL;
2014 struct gfar_priv_rx_q *rx_queue = NULL;
2015 int i;
2016
2017 /* Go through all the buffer descriptors and free their data buffers */
2018 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002019 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002020
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002021 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002022 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002023 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002024 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002025 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002026 }
2027
2028 for (i = 0; i < priv->num_rx_queues; i++) {
2029 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002030 if (rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002031 free_skb_rx_queue(rx_queue);
2032 }
2033
Claudiu Manoil369ec162013-02-14 05:00:02 +00002034 dma_free_coherent(priv->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002035 sizeof(struct txbd8) * priv->total_tx_ring_size +
2036 sizeof(struct rxbd8) * priv->total_rx_ring_size,
2037 priv->tx_queue[0]->tx_bd_base,
2038 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039}
2040
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002041void gfar_start(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002042{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002043 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002044 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002045 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002046
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002047 /* Enable Rx/Tx hw queues */
2048 gfar_write(&regs->rqueue, priv->rqueue);
2049 gfar_write(&regs->tqueue, priv->tqueue);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002050
2051 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002052 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002053 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002054 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002055
Kumar Gala0bbaf062005-06-20 10:54:21 -05002056 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002057 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002058 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002059 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002060
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002061 for (i = 0; i < priv->num_grps; i++) {
2062 regs = priv->gfargrp[i].regs;
2063 /* Clear THLT/RHLT, so that the DMA starts polling now */
2064 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2065 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002066 }
Dai Haruki12dea572008-12-16 15:30:20 -08002067
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002068 /* Enable Rx/Tx DMA */
2069 tempval = gfar_read(&regs->maccfg1);
2070 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2071 gfar_write(&regs->maccfg1, tempval);
2072
Claudiu Manoilefeddce2014-02-17 12:53:17 +02002073 gfar_ints_enable(priv);
2074
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002075 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002076}
2077
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002078static void free_grp_irqs(struct gfar_priv_grp *grp)
2079{
2080 free_irq(gfar_irq(grp, TX)->irq, grp);
2081 free_irq(gfar_irq(grp, RX)->irq, grp);
2082 free_irq(gfar_irq(grp, ER)->irq, grp);
2083}
2084
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002085static int register_grp_irqs(struct gfar_priv_grp *grp)
2086{
2087 struct gfar_private *priv = grp->priv;
2088 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00002089 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00002092 * them. Otherwise, only register for the one
2093 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002094 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002095 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00002096 * Transmit, and Receive
2097 */
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002098 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002099 gfar_irq(grp, ER)->name, grp);
2100 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002101 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002102 gfar_irq(grp, ER)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002103
Julia Lawall2145f1a2010-08-05 10:26:20 +00002104 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002106 enable_irq_wake(gfar_irq(grp, ER)->irq);
2107
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002108 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2109 gfar_irq(grp, TX)->name, grp);
2110 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002111 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002112 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 goto tx_irq_fail;
2114 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002115 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2116 gfar_irq(grp, RX)->name, grp);
2117 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002118 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002119 gfar_irq(grp, RX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 goto rx_irq_fail;
2121 }
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002122 enable_irq_wake(gfar_irq(grp, RX)->irq);
2123
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 } else {
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002125 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002126 gfar_irq(grp, TX)->name, grp);
2127 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002128 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002129 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 goto err_irq_fail;
2131 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002132 enable_irq_wake(gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 }
2134
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002135 return 0;
2136
2137rx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002138 free_irq(gfar_irq(grp, TX)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002139tx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002140 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002141err_irq_fail:
2142 return err;
2143
2144}
2145
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002146static void gfar_free_irq(struct gfar_private *priv)
2147{
2148 int i;
2149
2150 /* Free the IRQs */
2151 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2152 for (i = 0; i < priv->num_grps; i++)
2153 free_grp_irqs(&priv->gfargrp[i]);
2154 } else {
2155 for (i = 0; i < priv->num_grps; i++)
2156 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2157 &priv->gfargrp[i]);
2158 }
2159}
2160
2161static int gfar_request_irq(struct gfar_private *priv)
2162{
2163 int err, i, j;
2164
2165 for (i = 0; i < priv->num_grps; i++) {
2166 err = register_grp_irqs(&priv->gfargrp[i]);
2167 if (err) {
2168 for (j = 0; j < i; j++)
2169 free_grp_irqs(&priv->gfargrp[j]);
2170 return err;
2171 }
2172 }
2173
2174 return 0;
2175}
2176
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002177/* Bring the controller up and running */
2178int startup_gfar(struct net_device *ndev)
2179{
2180 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002181 int err;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002182
Claudiu Manoila328ac92014-02-24 12:13:42 +02002183 gfar_mac_reset(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002184
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002185 err = gfar_alloc_skb_resources(ndev);
2186 if (err)
2187 return err;
2188
Claudiu Manoila328ac92014-02-24 12:13:42 +02002189 gfar_init_tx_rx_base(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002190
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002191 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002192 clear_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002193 smp_mb__after_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002194
2195 /* Start Rx/Tx DMA and enable the interrupts */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002196 gfar_start(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
Claudiu Manoil2a4eebf2015-08-13 16:50:37 +03002198 /* force link state update after mac reset */
2199 priv->oldlink = 0;
2200 priv->oldspeed = 0;
2201 priv->oldduplex = -1;
2202
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00002203 phy_start(priv->phydev);
2204
Claudiu Manoil08511332014-02-24 12:13:45 +02002205 enable_napi(priv);
2206
2207 netif_tx_wake_all_queues(ndev);
2208
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210}
2211
Jan Ceuleers0977f812012-06-05 03:42:12 +00002212/* Called when something needs to use the ethernet device
2213 * Returns 0 for success.
2214 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215static int gfar_enet_open(struct net_device *dev)
2216{
Li Yang94e8cc32007-10-12 21:53:51 +08002217 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 int err;
2219
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 err = init_phy(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002221 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 return err;
2223
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002224 err = gfar_request_irq(priv);
2225 if (err)
2226 return err;
2227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002229 if (err)
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04002230 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 return err;
2233}
2234
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002235static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002236{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002237 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07002238
2239 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002240
Kumar Gala0bbaf062005-06-20 10:54:21 -05002241 return fcb;
2242}
2243
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002244static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002245 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002246{
Kumar Gala0bbaf062005-06-20 10:54:21 -05002247 /* If we're here, it's a IP packet with a TCP or UDP
2248 * payload. We set it to checksum, using a pseudo-header
2249 * we provide
2250 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002251 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002252
Jan Ceuleers0977f812012-06-05 03:42:12 +00002253 /* Tell the controller what the protocol is
2254 * And provide the already calculated phcs
2255 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002256 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002257 flags |= TXFCB_UDP;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002258 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002259 } else
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002260 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002261
2262 /* l3os is the distance between the start of the
2263 * frame (skb->data) and the start of the IP hdr.
2264 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002265 * l3 hdr and the l4 hdr
2266 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002267 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002268 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002269
Andy Fleming7f7f5312005-11-11 12:38:59 -06002270 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002271}
2272
Andy Fleming7f7f5312005-11-11 12:38:59 -06002273void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002274{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002275 fcb->flags |= TXFCB_VLN;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002276 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
Kumar Gala0bbaf062005-06-20 10:54:21 -05002277}
2278
Dai Haruki4669bc92008-12-17 16:51:04 -08002279static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002280 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002281{
2282 struct txbd8 *new_bd = bdp + stride;
2283
2284 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2285}
2286
2287static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002288 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002289{
2290 return skip_txbd(bdp, 1, base, ring_size);
2291}
2292
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002293/* eTSEC12: csum generation not supported for some fcb offsets */
2294static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2295 unsigned long fcb_addr)
2296{
2297 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2298 (fcb_addr % 0x20) > 0x18);
2299}
2300
2301/* eTSEC76: csum generation for frames larger than 2500 may
2302 * cause excess delays before start of transmission
2303 */
2304static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2305 unsigned int len)
2306{
2307 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2308 (len > 2500));
2309}
2310
Jan Ceuleers0977f812012-06-05 03:42:12 +00002311/* This is called by the kernel when a frame is ready for transmission.
2312 * It is pointed to by the dev->hard_start_xmit function pointer
2313 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2315{
2316 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002317 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002318 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002319 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002320 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002321 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002322 u32 lstatus;
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002323 int i, rq = 0;
2324 int do_tstamp, do_csum, do_vlan;
Dai Haruki4669bc92008-12-17 16:51:04 -08002325 u32 bufaddr;
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002326 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002327
2328 rq = skb->queue_mapping;
2329 tx_queue = priv->tx_queue[rq];
2330 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002331 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002332 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002333
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002334 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002335 do_vlan = skb_vlan_tag_present(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002336 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2337 priv->hwts_tx_en;
2338
2339 if (do_csum || do_vlan)
2340 fcb_len = GMAC_FCB_LEN;
2341
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002342 /* check if time stamp should be generated */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002343 if (unlikely(do_tstamp))
2344 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Dai Haruki4669bc92008-12-17 16:51:04 -08002345
Li Yang5b28bea2009-03-27 15:54:30 -07002346 /* make space for additional header when fcb is needed */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002347 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002348 struct sk_buff *skb_new;
2349
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002350 skb_new = skb_realloc_headroom(skb, fcb_len);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002351 if (!skb_new) {
2352 dev->stats.tx_errors++;
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002353 dev_kfree_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002354 return NETDEV_TX_OK;
2355 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002356
Eric Dumazet313b0372012-07-05 11:45:13 +00002357 if (skb->sk)
2358 skb_set_owner_w(skb_new, skb->sk);
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002359 dev_consume_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002360 skb = skb_new;
2361 }
2362
Dai Haruki4669bc92008-12-17 16:51:04 -08002363 /* total number of fragments in the SKB */
2364 nr_frags = skb_shinfo(skb)->nr_frags;
2365
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002366 /* calculate the required number of TxBDs for this skb */
2367 if (unlikely(do_tstamp))
2368 nr_txbds = nr_frags + 2;
2369 else
2370 nr_txbds = nr_frags + 1;
2371
Dai Haruki4669bc92008-12-17 16:51:04 -08002372 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002373 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002374 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002375 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002376 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002377 return NETDEV_TX_BUSY;
2378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
2380 /* Update transmit stats */
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002381 bytes_sent = skb->len;
2382 tx_queue->stats.tx_bytes += bytes_sent;
2383 /* keep Tx bytes on wire for BQL accounting */
2384 GFAR_CB(skb)->bytes_sent = bytes_sent;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002385 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002387 txbdp = txbdp_start = tx_queue->cur_tx;
Claudiu Manoila7312d52015-03-13 10:36:28 +02002388 lstatus = be32_to_cpu(txbdp->lstatus);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002389
2390 /* Time stamp insertion requires one additional TxBD */
2391 if (unlikely(do_tstamp))
2392 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002393 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
Dai Haruki4669bc92008-12-17 16:51:04 -08002395 if (nr_frags == 0) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002396 if (unlikely(do_tstamp)) {
2397 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2398
2399 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2400 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2401 } else {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002402 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002403 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002404 } else {
2405 /* Place the fragment addresses and lengths into the TxBDs */
2406 for (i = 0; i < nr_frags; i++) {
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002407 unsigned int frag_len;
Dai Haruki4669bc92008-12-17 16:51:04 -08002408 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002409 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002411 frag_len = skb_shinfo(skb)->frags[i].size;
Dai Haruki4669bc92008-12-17 16:51:04 -08002412
Claudiu Manoila7312d52015-03-13 10:36:28 +02002413 lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002414 BD_LFLAG(TXBD_READY);
Dai Haruki4669bc92008-12-17 16:51:04 -08002415
2416 /* Handle the last BD specially */
2417 if (i == nr_frags - 1)
2418 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2419
Claudiu Manoil369ec162013-02-14 05:00:02 +00002420 bufaddr = skb_frag_dma_map(priv->dev,
Ian Campbell2234a722011-08-29 23:18:29 +00002421 &skb_shinfo(skb)->frags[i],
2422 0,
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002423 frag_len,
Ian Campbell2234a722011-08-29 23:18:29 +00002424 DMA_TO_DEVICE);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002425 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2426 goto dma_map_err;
Dai Haruki4669bc92008-12-17 16:51:04 -08002427
2428 /* set the TxBD length and buffer pointer */
Claudiu Manoila7312d52015-03-13 10:36:28 +02002429 txbdp->bufPtr = cpu_to_be32(bufaddr);
2430 txbdp->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002431 }
2432
Claudiu Manoila7312d52015-03-13 10:36:28 +02002433 lstatus = be32_to_cpu(txbdp_start->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002436 /* Add TxPAL between FCB and frame if required */
2437 if (unlikely(do_tstamp)) {
2438 skb_push(skb, GMAC_TXPAL_LEN);
2439 memset(skb->data, 0, GMAC_TXPAL_LEN);
2440 }
2441
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002442 /* Add TxFCB if required */
2443 if (fcb_len) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002444 fcb = gfar_add_fcb(skb);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002445 lstatus |= BD_LFLAG(TXBD_TOE);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002446 }
2447
2448 /* Set up checksumming */
2449 if (do_csum) {
2450 gfar_tx_checksum(skb, fcb, fcb_len);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002451
2452 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2453 unlikely(gfar_csum_errata_76(priv, skb->len))) {
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002454 __skb_pull(skb, GMAC_FCB_LEN);
2455 skb_checksum_help(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002456 if (do_vlan || do_tstamp) {
2457 /* put back a new fcb for vlan/tstamp TOE */
2458 fcb = gfar_add_fcb(skb);
2459 } else {
2460 /* Tx TOE not used */
2461 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2462 fcb = NULL;
2463 }
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002464 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002465 }
2466
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002467 if (do_vlan)
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002468 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002469
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002470 /* Setup tx hardware time stamping if requested */
2471 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002472 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002473 fcb->ptp = 1;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002474 }
2475
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002476 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2477 DMA_TO_DEVICE);
2478 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2479 goto dma_map_err;
2480
Claudiu Manoila7312d52015-03-13 10:36:28 +02002481 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482
Jan Ceuleers0977f812012-06-05 03:42:12 +00002483 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002484 * first TxBD points to the FCB and must have a data length of
2485 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2486 * the full frame length.
2487 */
2488 if (unlikely(do_tstamp)) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002489 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2490
2491 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2492 bufaddr += fcb_len;
2493 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2494 (skb_headlen(skb) - fcb_len);
2495
2496 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2497 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002498 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2499 } else {
2500 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002503 netdev_tx_sent_queue(txq, bytes_sent);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002504
Claudiu Manoild55398b2014-10-07 10:44:35 +03002505 gfar_wmb();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002506
Claudiu Manoila7312d52015-03-13 10:36:28 +02002507 txbdp_start->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002508
Claudiu Manoild55398b2014-10-07 10:44:35 +03002509 gfar_wmb(); /* force lstatus write before tx_skbuff */
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002510
2511 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2512
Dai Haruki4669bc92008-12-17 16:51:04 -08002513 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002514 * (wrapping if necessary)
2515 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002516 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002517 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002518
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002519 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002520
Claudiu Manoilbc602282015-05-06 18:07:29 +03002521 /* We can work in parallel with gfar_clean_tx_ring(), except
2522 * when modifying num_txbdfree. Note that we didn't grab the lock
2523 * when we were reading the num_txbdfree and checking for available
2524 * space, that's because outside of this function it can only grow.
2525 */
2526 spin_lock_bh(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002527 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002528 tx_queue->num_txbdfree -= (nr_txbds);
Claudiu Manoilbc602282015-05-06 18:07:29 +03002529 spin_unlock_bh(&tx_queue->txlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
2531 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002532 * are full. We need to tell the kernel to stop sending us stuff.
2533 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002534 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002535 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002537 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 }
2539
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002541 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002543 return NETDEV_TX_OK;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002544
2545dma_map_err:
2546 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2547 if (do_tstamp)
2548 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2549 for (i = 0; i < nr_frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002550 lstatus = be32_to_cpu(txbdp->lstatus);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002551 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2552 break;
2553
Claudiu Manoila7312d52015-03-13 10:36:28 +02002554 lstatus &= ~BD_LFLAG(TXBD_READY);
2555 txbdp->lstatus = cpu_to_be32(lstatus);
2556 bufaddr = be32_to_cpu(txbdp->bufPtr);
2557 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002558 DMA_TO_DEVICE);
2559 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2560 }
2561 gfar_wmb();
2562 dev_kfree_skb_any(skb);
2563 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564}
2565
2566/* Stops the kernel queue, and halts the controller */
2567static int gfar_close(struct net_device *dev)
2568{
2569 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002570
Sebastian Siewiorab939902008-08-19 21:12:45 +02002571 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 stop_gfar(dev);
2573
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002574 /* Disconnect from the PHY */
2575 phy_disconnect(priv->phydev);
2576 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002578 gfar_free_irq(priv);
2579
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 return 0;
2581}
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002584static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002586 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
2588 return 0;
2589}
2590
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2592{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002594 int frame_size = new_mtu + ETH_HLEN;
2595
Claudiu Manoil75354142015-07-13 16:22:06 +03002596 if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
Joe Perches59deab22011-06-14 08:57:47 +00002597 netif_err(priv, drv, dev, "Invalid MTU setting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 return -EINVAL;
2599 }
2600
Claudiu Manoil08511332014-02-24 12:13:45 +02002601 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2602 cpu_relax();
2603
Claudiu Manoil88302642014-02-24 12:13:43 +02002604 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 stop_gfar(dev);
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 dev->mtu = new_mtu;
2608
Claudiu Manoil88302642014-02-24 12:13:43 +02002609 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 startup_gfar(dev);
2611
Claudiu Manoil08511332014-02-24 12:13:45 +02002612 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 return 0;
2615}
2616
Claudiu Manoil08511332014-02-24 12:13:45 +02002617void reset_gfar(struct net_device *ndev)
2618{
2619 struct gfar_private *priv = netdev_priv(ndev);
2620
2621 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2622 cpu_relax();
2623
2624 stop_gfar(ndev);
2625 startup_gfar(ndev);
2626
2627 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2628}
2629
Sebastian Siewiorab939902008-08-19 21:12:45 +02002630/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 * transmitted after a set amount of time.
2632 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002633 * starting over will fix the problem.
2634 */
2635static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002637 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002638 reset_task);
Claudiu Manoil08511332014-02-24 12:13:45 +02002639 reset_gfar(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640}
2641
Sebastian Siewiorab939902008-08-19 21:12:45 +02002642static void gfar_timeout(struct net_device *dev)
2643{
2644 struct gfar_private *priv = netdev_priv(dev);
2645
2646 dev->stats.tx_errors++;
2647 schedule_work(&priv->reset_task);
2648}
2649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650/* Interrupt Handler for Transmit complete */
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002651static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002653 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002654 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002655 struct gfar_private *priv = netdev_priv(dev);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002656 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002657 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002658 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002659 struct sk_buff *skb;
2660 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002661 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002662 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002663 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002664 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002665 int tqi = tx_queue->qindex;
2666 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002667 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002668 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002670 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002671 bdp = tx_queue->dirty_tx;
2672 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002673
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002674 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002675
Dai Haruki4669bc92008-12-17 16:51:04 -08002676 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002677
Jan Ceuleers0977f812012-06-05 03:42:12 +00002678 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002679 * Also, we need to dma_unmap_single() the TxPAL.
2680 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002681 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002682 nr_txbds = frags + 2;
2683 else
2684 nr_txbds = frags + 1;
2685
2686 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002687
Claudiu Manoila7312d52015-03-13 10:36:28 +02002688 lstatus = be32_to_cpu(lbdp->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002689
2690 /* Only clean completed frames */
2691 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002692 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 break;
2694
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002695 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002696 next = next_txbd(bdp, base, tx_ring_size);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002697 buflen = be16_to_cpu(next->length) +
2698 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002699 } else
Claudiu Manoila7312d52015-03-13 10:36:28 +02002700 buflen = be16_to_cpu(bdp->length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002701
Claudiu Manoila7312d52015-03-13 10:36:28 +02002702 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002703 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002704
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002705 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002706 struct skb_shared_hwtstamps shhwtstamps;
Scott Woodb4b67f22015-07-29 16:13:06 +03002707 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2708 ~0x7UL);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002709
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002710 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2711 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002712 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002713 skb_tstamp_tx(skb, &shhwtstamps);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002714 gfar_clear_txbd_status(bdp);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002715 bdp = next;
2716 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002717
Claudiu Manoila7312d52015-03-13 10:36:28 +02002718 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002719 bdp = next_txbd(bdp, base, tx_ring_size);
2720
2721 for (i = 0; i < frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002722 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2723 be16_to_cpu(bdp->length),
2724 DMA_TO_DEVICE);
2725 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002726 bdp = next_txbd(bdp, base, tx_ring_size);
2727 }
2728
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002729 bytes_sent += GFAR_CB(skb)->bytes_sent;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002730
Eric Dumazetacb600d2012-10-05 06:23:55 +00002731 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002732
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002733 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002734
2735 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002736 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002737
Dai Harukid080cd62008-04-09 19:37:51 -05002738 howmany++;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002739 spin_lock(&tx_queue->txlock);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002740 tx_queue->num_txbdfree += nr_txbds;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002741 spin_unlock(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
Dai Haruki4669bc92008-12-17 16:51:04 -08002744 /* If we freed a buffer, we can restart transmission, if necessary */
Claudiu Manoil08511332014-02-24 12:13:45 +02002745 if (tx_queue->num_txbdfree &&
2746 netif_tx_queue_stopped(txq) &&
2747 !(test_bit(GFAR_DOWN, &priv->state)))
2748 netif_wake_subqueue(priv->ndev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Dai Haruki4669bc92008-12-17 16:51:04 -08002750 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002751 tx_queue->skb_dirtytx = skb_dirtytx;
2752 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002754 netdev_tx_completed_queue(txq, howmany, bytes_sent);
Dai Harukid080cd62008-04-09 19:37:51 -05002755}
2756
Claudiu Manoil75354142015-07-13 16:22:06 +03002757static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002758{
Claudiu Manoil75354142015-07-13 16:22:06 +03002759 struct page *page;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002760 dma_addr_t addr;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002761
Claudiu Manoil75354142015-07-13 16:22:06 +03002762 page = dev_alloc_page();
2763 if (unlikely(!page))
2764 return false;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002765
Claudiu Manoil75354142015-07-13 16:22:06 +03002766 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2767 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2768 __free_page(page);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002769
Claudiu Manoil75354142015-07-13 16:22:06 +03002770 return false;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002771 }
2772
Claudiu Manoil75354142015-07-13 16:22:06 +03002773 rxb->dma = addr;
2774 rxb->page = page;
2775 rxb->page_offset = 0;
2776
2777 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778}
2779
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002780static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2781{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002782 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002783 struct gfar_extra_stats *estats = &priv->extra_stats;
2784
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002785 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002786 atomic64_inc(&estats->rx_alloc_err);
2787}
2788
2789static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2790 int alloc_cnt)
2791{
Claudiu Manoil75354142015-07-13 16:22:06 +03002792 struct rxbd8 *bdp;
2793 struct gfar_rx_buff *rxb;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002794 int i;
2795
2796 i = rx_queue->next_to_use;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002797 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002798 rxb = &rx_queue->rx_buff[i];
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002799
2800 while (alloc_cnt--) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002801 /* try reuse page */
2802 if (unlikely(!rxb->page)) {
2803 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002804 gfar_rx_alloc_err(rx_queue);
2805 break;
2806 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002807 }
2808
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002809 /* Setup the new RxBD */
Claudiu Manoil75354142015-07-13 16:22:06 +03002810 gfar_init_rxbdp(rx_queue, bdp,
2811 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002812
2813 /* Update to the next pointer */
Claudiu Manoil75354142015-07-13 16:22:06 +03002814 bdp++;
2815 rxb++;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002816
Claudiu Manoil75354142015-07-13 16:22:06 +03002817 if (unlikely(++i == rx_queue->rx_ring_size)) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002818 i = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +03002819 bdp = rx_queue->rx_bd_base;
2820 rxb = rx_queue->rx_buff;
2821 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002822 }
2823
2824 rx_queue->next_to_use = i;
Claudiu Manoil75354142015-07-13 16:22:06 +03002825 rx_queue->next_to_alloc = i;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002826}
2827
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002828static void count_errors(u32 lstatus, struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002830 struct gfar_private *priv = netdev_priv(ndev);
2831 struct net_device_stats *stats = &ndev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 struct gfar_extra_stats *estats = &priv->extra_stats;
2833
Jan Ceuleers0977f812012-06-05 03:42:12 +00002834 /* If the packet was truncated, none of the other errors matter */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002835 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 stats->rx_length_errors++;
2837
Paul Gortmaker212079d2013-02-12 15:38:19 -05002838 atomic64_inc(&estats->rx_trunc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
2840 return;
2841 }
2842 /* Count the errors, if there were any */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002843 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 stats->rx_length_errors++;
2845
Claudiu Manoilf9660822015-07-13 16:22:04 +03002846 if (lstatus & BD_LFLAG(RXBD_LARGE))
Paul Gortmaker212079d2013-02-12 15:38:19 -05002847 atomic64_inc(&estats->rx_large);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 else
Paul Gortmaker212079d2013-02-12 15:38:19 -05002849 atomic64_inc(&estats->rx_short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002851 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 stats->rx_frame_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002853 atomic64_inc(&estats->rx_nonoctet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002855 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002856 atomic64_inc(&estats->rx_crcerr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 stats->rx_crc_errors++;
2858 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002859 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002860 atomic64_inc(&estats->rx_overrun);
Claudiu Manoilf9660822015-07-13 16:22:04 +03002861 stats->rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 }
2863}
2864
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002865irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866{
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002867 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2868 unsigned long flags;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002869 u32 imask, ievent;
2870
2871 ievent = gfar_read(&grp->regs->ievent);
2872
2873 if (unlikely(ievent & IEVENT_FGPI)) {
2874 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2875 return IRQ_HANDLED;
2876 }
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002877
2878 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2879 spin_lock_irqsave(&grp->grplock, flags);
2880 imask = gfar_read(&grp->regs->imask);
2881 imask &= IMASK_RX_DISABLED;
2882 gfar_write(&grp->regs->imask, imask);
2883 spin_unlock_irqrestore(&grp->grplock, flags);
2884 __napi_schedule(&grp->napi_rx);
2885 } else {
2886 /* Clear IEVENT, so interrupts aren't called again
2887 * because of the packets that have already arrived.
2888 */
2889 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2890 }
2891
2892 return IRQ_HANDLED;
2893}
2894
2895/* Interrupt Handler for Transmit complete */
2896static irqreturn_t gfar_transmit(int irq, void *grp_id)
2897{
2898 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2899 unsigned long flags;
2900 u32 imask;
2901
2902 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2903 spin_lock_irqsave(&grp->grplock, flags);
2904 imask = gfar_read(&grp->regs->imask);
2905 imask &= IMASK_TX_DISABLED;
2906 gfar_write(&grp->regs->imask, imask);
2907 spin_unlock_irqrestore(&grp->grplock, flags);
2908 __napi_schedule(&grp->napi_tx);
2909 } else {
2910 /* Clear IEVENT, so interrupts aren't called again
2911 * because of the packets that have already arrived.
2912 */
2913 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2914 }
2915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 return IRQ_HANDLED;
2917}
2918
Claudiu Manoil75354142015-07-13 16:22:06 +03002919static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2920 struct sk_buff *skb, bool first)
2921{
2922 unsigned int size = lstatus & BD_LENGTH_MASK;
2923 struct page *page = rxb->page;
2924
2925 /* Remove the FCS from the packet length */
2926 if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
2927 size -= ETH_FCS_LEN;
2928
2929 if (likely(first))
2930 skb_put(skb, size);
2931 else
2932 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2933 rxb->page_offset + RXBUF_ALIGNMENT,
2934 size, GFAR_RXB_TRUESIZE);
2935
2936 /* try reuse page */
2937 if (unlikely(page_count(page) != 1))
2938 return false;
2939
2940 /* change offset to the other half */
2941 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2942
2943 atomic_inc(&page->_count);
2944
2945 return true;
2946}
2947
2948static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2949 struct gfar_rx_buff *old_rxb)
2950{
2951 struct gfar_rx_buff *new_rxb;
2952 u16 nta = rxq->next_to_alloc;
2953
2954 new_rxb = &rxq->rx_buff[nta];
2955
2956 /* find next buf that can reuse a page */
2957 nta++;
2958 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2959
2960 /* copy page reference */
2961 *new_rxb = *old_rxb;
2962
2963 /* sync for use by the device */
2964 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2965 old_rxb->page_offset,
2966 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2967}
2968
2969static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2970 u32 lstatus, struct sk_buff *skb)
2971{
2972 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2973 struct page *page = rxb->page;
2974 bool first = false;
2975
2976 if (likely(!skb)) {
2977 void *buff_addr = page_address(page) + rxb->page_offset;
2978
2979 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2980 if (unlikely(!skb)) {
2981 gfar_rx_alloc_err(rx_queue);
2982 return NULL;
2983 }
2984 skb_reserve(skb, RXBUF_ALIGNMENT);
2985 first = true;
2986 }
2987
2988 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2989 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2990
2991 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2992 /* reuse the free half of the page */
2993 gfar_reuse_rx_page(rx_queue, rxb);
2994 } else {
2995 /* page cannot be reused, unmap it */
2996 dma_unmap_page(rx_queue->dev, rxb->dma,
2997 PAGE_SIZE, DMA_FROM_DEVICE);
2998 }
2999
3000 /* clear rxb content */
3001 rxb->page = NULL;
3002
3003 return skb;
3004}
3005
Kumar Gala0bbaf062005-06-20 10:54:21 -05003006static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3007{
3008 /* If valid headers were found, and valid sums
3009 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00003010 * checksumming is necessary. Otherwise, it is [FIXME]
3011 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003012 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3013 (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05003014 skb->ip_summed = CHECKSUM_UNNECESSARY;
3015 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003016 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003017}
3018
Jan Ceuleers0977f812012-06-05 03:42:12 +00003019/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003020static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003022 struct gfar_private *priv = netdev_priv(ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003023 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024
Dai Haruki2c2db482008-12-16 15:31:15 -08003025 /* fcb is at the beginning if exists */
3026 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Jan Ceuleers0977f812012-06-05 03:42:12 +00003028 /* Remove the FCB from the skb
3029 * Remove the padded bytes, if there are any
3030 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003031 if (priv->uses_rxfcb)
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003032 skb_pull(skb, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003033
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003034 /* Get receive timestamp from the skb */
3035 if (priv->hwts_rx_en) {
3036 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3037 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003038
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003039 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3040 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
3041 }
3042
3043 if (priv->padding)
3044 skb_pull(skb, priv->padding);
3045
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003046 if (ndev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08003047 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003048
Dai Haruki2c2db482008-12-16 15:31:15 -08003049 /* Tell the skb what kind of packet this is */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003050 skb->protocol = eth_type_trans(skb, ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003051
Patrick McHardyf6469682013-04-19 02:04:27 +00003052 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07003053 * Even if vlan rx accel is disabled, on some chips
3054 * RXFCB_VLN is pseudo randomly set.
3055 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003056 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003057 be16_to_cpu(fcb->flags) & RXFCB_VLN)
3058 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3059 be16_to_cpu(fcb->vlctl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060}
3061
3062/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00003063 * until the budget/quota has been reached. Returns the number
3064 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003066int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003068 struct net_device *ndev = rx_queue->ndev;
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003069 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil75354142015-07-13 16:22:06 +03003070 struct rxbd8 *bdp;
3071 int i, howmany = 0;
3072 struct sk_buff *skb = rx_queue->skb;
3073 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3074 unsigned int total_bytes = 0, total_pkts = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075
3076 /* Get the first full descriptor */
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003077 i = rx_queue->next_to_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003079 while (rx_work_limit--) {
Claudiu Manoilf9660822015-07-13 16:22:04 +03003080 u32 lstatus;
Dai Haruki2c2db482008-12-16 15:31:15 -08003081
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003082 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3083 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3084 cleaned_cnt = 0;
3085 }
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003086
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003087 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoilf9660822015-07-13 16:22:04 +03003088 lstatus = be32_to_cpu(bdp->lstatus);
3089 if (lstatus & BD_LFLAG(RXBD_EMPTY))
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003090 break;
3091
3092 /* order rx buffer descriptor reads */
Scott Wood3b6330c2007-05-16 15:06:59 -05003093 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05003094
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003095 /* fetch next to clean buffer from the ring */
Claudiu Manoil75354142015-07-13 16:22:06 +03003096 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3097 if (unlikely(!skb))
3098 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099
Claudiu Manoil75354142015-07-13 16:22:06 +03003100 cleaned_cnt++;
3101 howmany++;
Andy Fleming81183052008-11-12 10:07:11 -06003102
Claudiu Manoil75354142015-07-13 16:22:06 +03003103 if (unlikely(++i == rx_queue->rx_ring_size))
3104 i = 0;
Anton Vorontsov63b88b92010-06-11 10:51:03 +00003105
Claudiu Manoil75354142015-07-13 16:22:06 +03003106 rx_queue->next_to_clean = i;
3107
3108 /* fetch next buffer if not the last in frame */
3109 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3110 continue;
3111
3112 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003113 count_errors(lstatus, ndev);
Andy Fleming815b97c2008-04-22 17:18:29 -05003114
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003115 /* discard faulty buffer */
3116 dev_kfree_skb(skb);
Claudiu Manoil75354142015-07-13 16:22:06 +03003117 skb = NULL;
3118 rx_queue->stats.rx_dropped++;
3119 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 }
3121
Claudiu Manoil75354142015-07-13 16:22:06 +03003122 /* Increment the number of packets */
3123 total_pkts++;
3124 total_bytes += skb->len;
3125
3126 skb_record_rx_queue(skb, rx_queue->qindex);
3127
3128 gfar_process_frame(ndev, skb);
3129
3130 /* Send the packet up the stack */
3131 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3132
3133 skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 }
3135
Claudiu Manoil75354142015-07-13 16:22:06 +03003136 /* Store incomplete frames for completion */
3137 rx_queue->skb = skb;
3138
3139 rx_queue->stats.rx_packets += total_pkts;
3140 rx_queue->stats.rx_bytes += total_bytes;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003141
3142 if (cleaned_cnt)
3143 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3144
3145 /* Update Last Free RxBD pointer for LFC */
3146 if (unlikely(priv->tx_actual_en)) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003147 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3148
3149 gfar_write(rx_queue->rfbptr, bdp_dma);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 return howmany;
3153}
3154
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003155static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003156{
3157 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003158 container_of(napi, struct gfar_priv_grp, napi_rx);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003159 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003160 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003161 int work_done = 0;
3162
3163 /* Clear IEVENT, so interrupts aren't called again
3164 * because of the packets that have already arrived
3165 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003166 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003167
3168 work_done = gfar_clean_rx_ring(rx_queue, budget);
3169
3170 if (work_done < budget) {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003171 u32 imask;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003172 napi_complete(napi);
3173 /* Clear the halt bit in RSTAT */
3174 gfar_write(&regs->rstat, gfargrp->rstat);
3175
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003176 spin_lock_irq(&gfargrp->grplock);
3177 imask = gfar_read(&regs->imask);
3178 imask |= IMASK_RX_DEFAULT;
3179 gfar_write(&regs->imask, imask);
3180 spin_unlock_irq(&gfargrp->grplock);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003181 }
3182
3183 return work_done;
3184}
3185
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003186static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003188 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003189 container_of(napi, struct gfar_priv_grp, napi_tx);
3190 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003191 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003192 u32 imask;
3193
3194 /* Clear IEVENT, so interrupts aren't called again
3195 * because of the packets that have already arrived
3196 */
3197 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3198
3199 /* run Tx cleanup to completion */
3200 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3201 gfar_clean_tx_ring(tx_queue);
3202
3203 napi_complete(napi);
3204
3205 spin_lock_irq(&gfargrp->grplock);
3206 imask = gfar_read(&regs->imask);
3207 imask |= IMASK_TX_DEFAULT;
3208 gfar_write(&regs->imask, imask);
3209 spin_unlock_irq(&gfargrp->grplock);
3210
3211 return 0;
3212}
3213
3214static int gfar_poll_rx(struct napi_struct *napi, int budget)
3215{
3216 struct gfar_priv_grp *gfargrp =
3217 container_of(napi, struct gfar_priv_grp, napi_rx);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003218 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003219 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003220 struct gfar_priv_rx_q *rx_queue = NULL;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003221 int work_done = 0, work_done_per_q = 0;
Claudiu Manoil39c0a0d2013-03-21 03:12:13 +00003222 int i, budget_per_q = 0;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003223 unsigned long rstat_rxf;
3224 int num_act_queues;
Dai Harukid080cd62008-04-09 19:37:51 -05003225
Dai Haruki8c7396a2008-12-17 16:52:00 -08003226 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00003227 * because of the packets that have already arrived
3228 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003229 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08003230
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003231 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3232
3233 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3234 if (num_act_queues)
3235 budget_per_q = budget/num_act_queues;
3236
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003237 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3238 /* skip queue if not active */
3239 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3240 continue;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003241
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003242 rx_queue = priv->rx_queue[i];
3243 work_done_per_q =
3244 gfar_clean_rx_ring(rx_queue, budget_per_q);
3245 work_done += work_done_per_q;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003246
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003247 /* finished processing this queue */
3248 if (work_done_per_q < budget_per_q) {
3249 /* clear active queue hw indication */
3250 gfar_write(&regs->rstat,
3251 RSTAT_CLEAR_RXF0 >> i);
3252 num_act_queues--;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003253
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003254 if (!num_act_queues)
3255 break;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003256 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003257 }
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003258
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003259 if (!num_act_queues) {
3260 u32 imask;
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003261 napi_complete(napi);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003262
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003263 /* Clear the halt bit in RSTAT */
3264 gfar_write(&regs->rstat, gfargrp->rstat);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003265
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003266 spin_lock_irq(&gfargrp->grplock);
3267 imask = gfar_read(&regs->imask);
3268 imask |= IMASK_RX_DEFAULT;
3269 gfar_write(&regs->imask, imask);
3270 spin_unlock_irq(&gfargrp->grplock);
Dai Harukid080cd62008-04-09 19:37:51 -05003271 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003273 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003276static int gfar_poll_tx(struct napi_struct *napi, int budget)
3277{
3278 struct gfar_priv_grp *gfargrp =
3279 container_of(napi, struct gfar_priv_grp, napi_tx);
3280 struct gfar_private *priv = gfargrp->priv;
3281 struct gfar __iomem *regs = gfargrp->regs;
3282 struct gfar_priv_tx_q *tx_queue = NULL;
3283 int has_tx_work = 0;
3284 int i;
3285
3286 /* Clear IEVENT, so interrupts aren't called again
3287 * because of the packets that have already arrived
3288 */
3289 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3290
3291 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3292 tx_queue = priv->tx_queue[i];
3293 /* run Tx cleanup to completion */
3294 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3295 gfar_clean_tx_ring(tx_queue);
3296 has_tx_work = 1;
3297 }
3298 }
3299
3300 if (!has_tx_work) {
3301 u32 imask;
3302 napi_complete(napi);
3303
3304 spin_lock_irq(&gfargrp->grplock);
3305 imask = gfar_read(&regs->imask);
3306 imask |= IMASK_TX_DEFAULT;
3307 gfar_write(&regs->imask, imask);
3308 spin_unlock_irq(&gfargrp->grplock);
3309 }
3310
3311 return 0;
3312}
3313
3314
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003315#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00003316/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003317 * without having to re-enable interrupts. It's not called while
3318 * the interrupt routine is executing.
3319 */
3320static void gfar_netpoll(struct net_device *dev)
3321{
3322 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00003323 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003324
3325 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003326 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003327 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003328 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3329
3330 disable_irq(gfar_irq(grp, TX)->irq);
3331 disable_irq(gfar_irq(grp, RX)->irq);
3332 disable_irq(gfar_irq(grp, ER)->irq);
3333 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3334 enable_irq(gfar_irq(grp, ER)->irq);
3335 enable_irq(gfar_irq(grp, RX)->irq);
3336 enable_irq(gfar_irq(grp, TX)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003337 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003338 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003339 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003340 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3341
3342 disable_irq(gfar_irq(grp, TX)->irq);
3343 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3344 enable_irq(gfar_irq(grp, TX)->irq);
Anton Vorontsov43de0042009-12-09 02:52:19 -08003345 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003346 }
3347}
3348#endif
3349
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003351static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003353 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
3355 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003356 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003359 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003360 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361
3362 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003363 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003364 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003366 /* Check for errors */
3367 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003368 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369
3370 return IRQ_HANDLED;
3371}
3372
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373/* Called every time the controller might need to be made
3374 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003375 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 * function converts those variables into the appropriate
3377 * register values, and can bring down the device if needed.
3378 */
3379static void adjust_link(struct net_device *dev)
3380{
3381 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003382 struct phy_device *phydev = priv->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003384 if (unlikely(phydev->link != priv->oldlink ||
Guenter Roeck0ae93b22015-03-02 12:03:27 -08003385 (phydev->link && (phydev->duplex != priv->oldduplex ||
3386 phydev->speed != priv->oldspeed))))
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003387 gfar_update_link_state(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003388}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389
3390/* Update the hash table based on the current list of multicast
3391 * addresses we subscribe to. Also, change the promiscuity of
3392 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003393 * whenever dev->flags is changed
3394 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395static void gfar_set_multi(struct net_device *dev)
3396{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003397 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003399 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 u32 tempval;
3401
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003402 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 /* Set RCTRL to PROM */
3404 tempval = gfar_read(&regs->rctrl);
3405 tempval |= RCTRL_PROM;
3406 gfar_write(&regs->rctrl, tempval);
3407 } else {
3408 /* Set RCTRL to not PROM */
3409 tempval = gfar_read(&regs->rctrl);
3410 tempval &= ~(RCTRL_PROM);
3411 gfar_write(&regs->rctrl, tempval);
3412 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003413
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003414 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003416 gfar_write(&regs->igaddr0, 0xffffffff);
3417 gfar_write(&regs->igaddr1, 0xffffffff);
3418 gfar_write(&regs->igaddr2, 0xffffffff);
3419 gfar_write(&regs->igaddr3, 0xffffffff);
3420 gfar_write(&regs->igaddr4, 0xffffffff);
3421 gfar_write(&regs->igaddr5, 0xffffffff);
3422 gfar_write(&regs->igaddr6, 0xffffffff);
3423 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 gfar_write(&regs->gaddr0, 0xffffffff);
3425 gfar_write(&regs->gaddr1, 0xffffffff);
3426 gfar_write(&regs->gaddr2, 0xffffffff);
3427 gfar_write(&regs->gaddr3, 0xffffffff);
3428 gfar_write(&regs->gaddr4, 0xffffffff);
3429 gfar_write(&regs->gaddr5, 0xffffffff);
3430 gfar_write(&regs->gaddr6, 0xffffffff);
3431 gfar_write(&regs->gaddr7, 0xffffffff);
3432 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003433 int em_num;
3434 int idx;
3435
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003437 gfar_write(&regs->igaddr0, 0x0);
3438 gfar_write(&regs->igaddr1, 0x0);
3439 gfar_write(&regs->igaddr2, 0x0);
3440 gfar_write(&regs->igaddr3, 0x0);
3441 gfar_write(&regs->igaddr4, 0x0);
3442 gfar_write(&regs->igaddr5, 0x0);
3443 gfar_write(&regs->igaddr6, 0x0);
3444 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 gfar_write(&regs->gaddr0, 0x0);
3446 gfar_write(&regs->gaddr1, 0x0);
3447 gfar_write(&regs->gaddr2, 0x0);
3448 gfar_write(&regs->gaddr3, 0x0);
3449 gfar_write(&regs->gaddr4, 0x0);
3450 gfar_write(&regs->gaddr5, 0x0);
3451 gfar_write(&regs->gaddr6, 0x0);
3452 gfar_write(&regs->gaddr7, 0x0);
3453
Andy Fleming7f7f5312005-11-11 12:38:59 -06003454 /* If we have extended hash tables, we need to
3455 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003456 * setting them
3457 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003458 if (priv->extended_hash) {
3459 em_num = GFAR_EM_NUM + 1;
3460 gfar_clear_exact_match(dev);
3461 idx = 1;
3462 } else {
3463 idx = 0;
3464 em_num = 0;
3465 }
3466
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003467 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 return;
3469
3470 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003471 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003472 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003473 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003474 idx++;
3475 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003476 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 }
3478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479}
3480
Andy Fleming7f7f5312005-11-11 12:38:59 -06003481
3482/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003483 * don't interfere with normal reception
3484 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003485static void gfar_clear_exact_match(struct net_device *dev)
3486{
3487 int idx;
Joe Perches6a3c910c2011-11-16 09:38:02 +00003488 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003489
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003490 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003491 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003492}
3493
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494/* Set the appropriate hash bit for the given addr */
3495/* The algorithm works like so:
3496 * 1) Take the Destination Address (ie the multicast address), and
3497 * do a CRC on it (little endian), and reverse the bits of the
3498 * result.
3499 * 2) Use the 8 most significant bits as a hash into a 256-entry
3500 * table. The table is controlled through 8 32-bit registers:
3501 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3502 * gaddr7. This means that the 3 most significant bits in the
3503 * hash index which gaddr register to use, and the 5 other bits
3504 * indicate which bit (assuming an IBM numbering scheme, which
3505 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003506 * the entry.
3507 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3509{
3510 u32 tempval;
3511 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c910c2011-11-16 09:38:02 +00003512 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003513 int width = priv->hash_width;
3514 u8 whichbit = (result >> (32 - width)) & 0x1f;
3515 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 u32 value = (1 << (31-whichbit));
3517
Kumar Gala0bbaf062005-06-20 10:54:21 -05003518 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003520 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521}
3522
Andy Fleming7f7f5312005-11-11 12:38:59 -06003523
3524/* There are multiple MAC Address register pairs on some controllers
3525 * This function sets the numth pair to a given address
3526 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003527static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3528 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003529{
3530 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003531 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003532 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003533 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003534
3535 macptr += num*2;
3536
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003537 /* For a station address of 0x12345678ABCD in transmission
3538 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3539 * MACnADDR2 is set to 0x34120000.
Jan Ceuleers0977f812012-06-05 03:42:12 +00003540 */
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003541 tempval = (addr[5] << 24) | (addr[4] << 16) |
3542 (addr[3] << 8) | addr[2];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003543
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003544 gfar_write(macptr, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003545
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003546 tempval = (addr[1] << 24) | (addr[0] << 16);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003547
3548 gfar_write(macptr+1, tempval);
3549}
3550
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003552static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003554 struct gfar_priv_grp *gfargrp = grp_id;
3555 struct gfar __iomem *regs = gfargrp->regs;
3556 struct gfar_private *priv= gfargrp->priv;
3557 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558
3559 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003560 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561
3562 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003563 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003564
3565 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003566 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003567 (events & IEVENT_MAG))
3568 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569
3570 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003571 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003572 netdev_dbg(dev,
3573 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003574 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
3576 /* Update the error counters */
3577 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003578 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579
3580 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003581 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003583 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 if (events & IEVENT_XFUN) {
Joe Perches59deab22011-06-14 08:57:47 +00003585 netif_dbg(priv, tx_err, dev,
3586 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003587 dev->stats.tx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003588 atomic64_inc(&priv->extra_stats.tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
Claudiu Manoilbc602282015-05-06 18:07:29 +03003590 schedule_work(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 }
Joe Perches59deab22011-06-14 08:57:47 +00003592 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 }
3594 if (events & IEVENT_BSY) {
Claudiu Manoil1de65a52015-10-23 11:42:00 +03003595 dev->stats.rx_over_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003596 atomic64_inc(&priv->extra_stats.rx_bsy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
Joe Perches59deab22011-06-14 08:57:47 +00003598 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3599 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 }
3601 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003602 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003603 atomic64_inc(&priv->extra_stats.rx_babr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604
Joe Perches59deab22011-06-14 08:57:47 +00003605 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 }
3607 if (events & IEVENT_EBERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003608 atomic64_inc(&priv->extra_stats.eberr);
Joe Perches59deab22011-06-14 08:57:47 +00003609 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 }
Joe Perches59deab22011-06-14 08:57:47 +00003611 if (events & IEVENT_RXC)
3612 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
3614 if (events & IEVENT_BABT) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003615 atomic64_inc(&priv->extra_stats.tx_babt);
Joe Perches59deab22011-06-14 08:57:47 +00003616 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
3618 return IRQ_HANDLED;
3619}
3620
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003621static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3622{
3623 struct phy_device *phydev = priv->phydev;
3624 u32 val = 0;
3625
3626 if (!phydev->duplex)
3627 return val;
3628
3629 if (!priv->pause_aneg_en) {
3630 if (priv->tx_pause_en)
3631 val |= MACCFG1_TX_FLOW;
3632 if (priv->rx_pause_en)
3633 val |= MACCFG1_RX_FLOW;
3634 } else {
3635 u16 lcl_adv, rmt_adv;
3636 u8 flowctrl;
3637 /* get link partner capabilities */
3638 rmt_adv = 0;
3639 if (phydev->pause)
3640 rmt_adv = LPA_PAUSE_CAP;
3641 if (phydev->asym_pause)
3642 rmt_adv |= LPA_PAUSE_ASYM;
3643
Pavaluca Matei-B4661043ef8d22014-10-27 10:42:43 +02003644 lcl_adv = 0;
3645 if (phydev->advertising & ADVERTISED_Pause)
3646 lcl_adv |= ADVERTISE_PAUSE_CAP;
3647 if (phydev->advertising & ADVERTISED_Asym_Pause)
3648 lcl_adv |= ADVERTISE_PAUSE_ASYM;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003649
3650 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3651 if (flowctrl & FLOW_CTRL_TX)
3652 val |= MACCFG1_TX_FLOW;
3653 if (flowctrl & FLOW_CTRL_RX)
3654 val |= MACCFG1_RX_FLOW;
3655 }
3656
3657 return val;
3658}
3659
3660static noinline void gfar_update_link_state(struct gfar_private *priv)
3661{
3662 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3663 struct phy_device *phydev = priv->phydev;
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003664 struct gfar_priv_rx_q *rx_queue = NULL;
3665 int i;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003666
3667 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3668 return;
3669
3670 if (phydev->link) {
3671 u32 tempval1 = gfar_read(&regs->maccfg1);
3672 u32 tempval = gfar_read(&regs->maccfg2);
3673 u32 ecntrl = gfar_read(&regs->ecntrl);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003674 u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003675
3676 if (phydev->duplex != priv->oldduplex) {
3677 if (!(phydev->duplex))
3678 tempval &= ~(MACCFG2_FULL_DUPLEX);
3679 else
3680 tempval |= MACCFG2_FULL_DUPLEX;
3681
3682 priv->oldduplex = phydev->duplex;
3683 }
3684
3685 if (phydev->speed != priv->oldspeed) {
3686 switch (phydev->speed) {
3687 case 1000:
3688 tempval =
3689 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3690
3691 ecntrl &= ~(ECNTRL_R100);
3692 break;
3693 case 100:
3694 case 10:
3695 tempval =
3696 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3697
3698 /* Reduced mode distinguishes
3699 * between 10 and 100
3700 */
3701 if (phydev->speed == SPEED_100)
3702 ecntrl |= ECNTRL_R100;
3703 else
3704 ecntrl &= ~(ECNTRL_R100);
3705 break;
3706 default:
3707 netif_warn(priv, link, priv->ndev,
3708 "Ack! Speed (%d) is not 10/100/1000!\n",
3709 phydev->speed);
3710 break;
3711 }
3712
3713 priv->oldspeed = phydev->speed;
3714 }
3715
3716 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3717 tempval1 |= gfar_get_flowctrl_cfg(priv);
3718
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003719 /* Turn last free buffer recording on */
3720 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3721 for (i = 0; i < priv->num_rx_queues; i++) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003722 u32 bdp_dma;
3723
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003724 rx_queue = priv->rx_queue[i];
Scott Woodb4b67f22015-07-29 16:13:06 +03003725 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3726 gfar_write(rx_queue->rfbptr, bdp_dma);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003727 }
3728
3729 priv->tx_actual_en = 1;
3730 }
3731
3732 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3733 priv->tx_actual_en = 0;
3734
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003735 gfar_write(&regs->maccfg1, tempval1);
3736 gfar_write(&regs->maccfg2, tempval);
3737 gfar_write(&regs->ecntrl, ecntrl);
3738
3739 if (!priv->oldlink)
3740 priv->oldlink = 1;
3741
3742 } else if (priv->oldlink) {
3743 priv->oldlink = 0;
3744 priv->oldspeed = 0;
3745 priv->oldduplex = -1;
3746 }
3747
3748 if (netif_msg_link(priv))
3749 phy_print_status(phydev);
3750}
3751
Fabian Frederick94e5a2a2015-03-17 19:37:34 +01003752static const struct of_device_id gfar_match[] =
Andy Flemingb31a1d82008-12-16 15:29:15 -08003753{
3754 {
3755 .type = "network",
3756 .compatible = "gianfar",
3757 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003758 {
3759 .compatible = "fsl,etsec2",
3760 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003761 {},
3762};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003763MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003764
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003766static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003767 .driver = {
3768 .name = "fsl-gianfar",
Grant Likely40182942010-04-13 16:13:02 -07003769 .pm = GFAR_PM_OPS,
3770 .of_match_table = gfar_match,
3771 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772 .probe = gfar_probe,
3773 .remove = gfar_remove,
3774};
3775
Axel Lindb62f682011-11-27 16:44:17 +00003776module_platform_driver(gfar_driver);