blob: c8bc43e99a355bd8c298c1da9156e0813e32458c [file] [log] [blame]
Jan Ceuleers0977f812012-06-05 03:42:12 +00001/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06004 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Claudiu Manoil20862782014-02-17 12:53:14 +020012 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080028 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050033 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050038 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040042 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020044 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050045 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
Joe Perches59deab22011-06-14 08:57:47 +000064#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050077#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/spinlock.h>
79#include <linux/mm.h>
Rob Herring5af50732013-09-17 14:28:33 -050080#include <linux/of_address.h>
81#include <linux/of_irq.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030091#ifdef CONFIG_PPC
Anton Vorontsov7d350972010-06-30 06:39:12 +000092#include <asm/reg.h>
Claudiu Manoil2969b1f2013-10-09 20:20:41 +030093#include <asm/mpc85xx.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030094#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/irq.h>
96#include <asm/uaccess.h>
97#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/dma-mapping.h>
99#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400100#include <linux/mii.h>
101#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -0800102#include <linux/phy_fixed.h>
103#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700104#include <linux/of_net.h>
Claudiu Manoilfd31a952014-10-07 10:44:31 +0300105#include <linux/of_address.h>
106#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Abhimanyu8fcc6032015-10-27 14:17:43 +0530110#define TX_TIMEOUT (5*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Claudiu Manoil75354142015-07-13 16:22:06 +0300112const char gfar_driver_version[] = "2.0";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static int gfar_enet_open(struct net_device *dev);
115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200116static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300119static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
120 int alloc_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int gfar_set_mac_address(struct net_device *dev);
122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100123static irqreturn_t gfar_error(int irq, void *dev_id);
124static irqreturn_t gfar_transmit(int irq, void *dev_id);
125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static void adjust_link(struct net_device *dev);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +0300127static noinline void gfar_update_link_state(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700129static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600130static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400131static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static void gfar_set_multi(struct net_device *dev);
133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500134static void gfar_configure_serdes(struct net_device *dev);
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200135static int gfar_poll_rx(struct napi_struct *napi, int budget);
136static int gfar_poll_tx(struct napi_struct *napi, int budget);
137static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300139#ifdef CONFIG_NET_POLL_CONTROLLER
140static void gfar_netpoll(struct net_device *dev);
141#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000142int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
Claudiu Manoilc233cf402013-03-19 07:40:02 +0000143static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300144static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
Claudiu Manoilc10650b2014-02-17 12:53:18 +0200145static void gfar_halt_nodisable(struct gfar_private *priv);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600146static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800147static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000149static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151MODULE_AUTHOR("Freescale Semiconductor, Inc");
152MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153MODULE_LICENSE("GPL");
154
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000155static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000156 dma_addr_t buf)
157{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000158 u32 lstatus;
159
Claudiu Manoila7312d52015-03-13 10:36:28 +0200160 bdp->bufPtr = cpu_to_be32(buf);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161
162 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000163 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000164 lstatus |= BD_LFLAG(RXBD_WRAP);
165
Claudiu Manoild55398b2014-10-07 10:44:35 +0300166 gfar_wmb();
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000167
Claudiu Manoila7312d52015-03-13 10:36:28 +0200168 bdp->lstatus = cpu_to_be32(lstatus);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000169}
170
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300171static void gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000172{
Anton Vorontsov87283272009-10-12 06:00:39 +0000173 struct gfar_private *priv = netdev_priv(ndev);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200174 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000175 struct gfar_priv_tx_q *tx_queue = NULL;
176 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000177 struct txbd8 *txbdp;
Kevin Hao03366a32014-12-24 14:05:45 +0800178 u32 __iomem *rfbptr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000179 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000180
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000181 for (i = 0; i < priv->num_tx_queues; i++) {
182 tx_queue = priv->tx_queue[i];
183 /* Initialize some variables in our dev structure */
184 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
185 tx_queue->dirty_tx = tx_queue->tx_bd_base;
186 tx_queue->cur_tx = tx_queue->tx_bd_base;
187 tx_queue->skb_curtx = 0;
188 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000189
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000190 /* Initialize Transmit Descriptor Ring */
191 txbdp = tx_queue->tx_bd_base;
192 for (j = 0; j < tx_queue->tx_ring_size; j++) {
193 txbdp->lstatus = 0;
194 txbdp->bufPtr = 0;
195 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000196 }
197
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000198 /* Set the last descriptor in the ring to indicate wrap */
199 txbdp--;
Claudiu Manoila7312d52015-03-13 10:36:28 +0200200 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
201 TXBD_WRAP);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000202 }
203
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200204 rfbptr = &regs->rfbptr0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000205 for (i = 0; i < priv->num_rx_queues; i++) {
206 rx_queue = priv->rx_queue[i];
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000207
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300208 rx_queue->next_to_clean = 0;
209 rx_queue->next_to_use = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +0300210 rx_queue->next_to_alloc = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000211
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300212 /* make sure next_to_clean != next_to_use after this
213 * by leaving at least 1 unused descriptor
214 */
215 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000216
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200217 rx_queue->rfbptr = rfbptr;
218 rfbptr += 2;
Anton Vorontsov87283272009-10-12 06:00:39 +0000219 }
Anton Vorontsov87283272009-10-12 06:00:39 +0000220}
221
222static int gfar_alloc_skb_resources(struct net_device *ndev)
223{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000224 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000225 dma_addr_t addr;
Claudiu Manoil75354142015-07-13 16:22:06 +0300226 int i, j;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000227 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil369ec162013-02-14 05:00:02 +0000228 struct device *dev = priv->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000229 struct gfar_priv_tx_q *tx_queue = NULL;
230 struct gfar_priv_rx_q *rx_queue = NULL;
231
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000232 priv->total_tx_ring_size = 0;
233 for (i = 0; i < priv->num_tx_queues; i++)
234 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
235
236 priv->total_rx_ring_size = 0;
237 for (i = 0; i < priv->num_rx_queues; i++)
238 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000239
240 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000241 vaddr = dma_alloc_coherent(dev,
Joe Perchesd0320f72013-03-14 13:07:21 +0000242 (priv->total_tx_ring_size *
243 sizeof(struct txbd8)) +
244 (priv->total_rx_ring_size *
245 sizeof(struct rxbd8)),
246 &addr, GFP_KERNEL);
247 if (!vaddr)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000248 return -ENOMEM;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000249
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000250 for (i = 0; i < priv->num_tx_queues; i++) {
251 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000252 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000253 tx_queue->tx_bd_dma_base = addr;
254 tx_queue->dev = ndev;
255 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000256 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
257 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000258 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000259
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000260 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000261 for (i = 0; i < priv->num_rx_queues; i++) {
262 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000263 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000264 rx_queue->rx_bd_dma_base = addr;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300265 rx_queue->ndev = ndev;
Claudiu Manoil75354142015-07-13 16:22:06 +0300266 rx_queue->dev = dev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000267 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
268 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000269 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000270
271 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000272 for (i = 0; i < priv->num_tx_queues; i++) {
273 tx_queue = priv->tx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000274 tx_queue->tx_skbuff =
275 kmalloc_array(tx_queue->tx_ring_size,
276 sizeof(*tx_queue->tx_skbuff),
277 GFP_KERNEL);
278 if (!tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000279 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000280
Claudiu Manoil75354142015-07-13 16:22:06 +0300281 for (j = 0; j < tx_queue->tx_ring_size; j++)
282 tx_queue->tx_skbuff[j] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000283 }
284
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000285 for (i = 0; i < priv->num_rx_queues; i++) {
286 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +0300287 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
288 sizeof(*rx_queue->rx_buff),
289 GFP_KERNEL);
290 if (!rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000291 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000292 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000293
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300294 gfar_init_bds(ndev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000295
296 return 0;
297
298cleanup:
299 free_skb_resources(priv);
300 return -ENOMEM;
301}
302
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000303static void gfar_init_tx_rx_base(struct gfar_private *priv)
304{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000305 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000306 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000307 int i;
308
309 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000310 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000311 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000312 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000313 }
314
315 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000316 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000317 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000318 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000319 }
320}
321
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200322static void gfar_init_rqprm(struct gfar_private *priv)
323{
324 struct gfar __iomem *regs = priv->gfargrp[0].regs;
325 u32 __iomem *baddr;
326 int i;
327
328 baddr = &regs->rqprm0;
329 for (i = 0; i < priv->num_rx_queues; i++) {
330 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
331 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
332 baddr++;
333 }
334}
335
Claudiu Manoil75354142015-07-13 16:22:06 +0300336static void gfar_rx_offload_en(struct gfar_private *priv)
Claudiu Manoil88302642014-02-24 12:13:43 +0200337{
Claudiu Manoil88302642014-02-24 12:13:43 +0200338 /* set this when rx hw offload (TOE) functions are being used */
339 priv->uses_rxfcb = 0;
340
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 priv->uses_rxfcb = 1;
343
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300344 if (priv->hwts_rx_en || priv->rx_filer_enable)
Claudiu Manoil88302642014-02-24 12:13:43 +0200345 priv->uses_rxfcb = 1;
Claudiu Manoil88302642014-02-24 12:13:43 +0200346}
347
Claudiu Manoila328ac92014-02-24 12:13:42 +0200348static void gfar_mac_rx_config(struct gfar_private *priv)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000349{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000350 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000351 u32 rctrl = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000352
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000353 if (priv->rx_filer_enable) {
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300354 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000355 /* Program the RIR0 reg with the required distribution */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200356 if (priv->poll_mode == GFAR_SQ_POLLING)
357 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
358 else /* GFAR_MQ_POLLING */
359 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000360 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000361
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000362 /* Restore PROMISC mode */
Claudiu Manoila328ac92014-02-24 12:13:42 +0200363 if (priv->ndev->flags & IFF_PROMISC)
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000364 rctrl |= RCTRL_PROM;
365
Claudiu Manoil88302642014-02-24 12:13:43 +0200366 if (priv->ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000367 rctrl |= RCTRL_CHECKSUMMING;
368
Claudiu Manoil88302642014-02-24 12:13:43 +0200369 if (priv->extended_hash)
370 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000371
372 if (priv->padding) {
373 rctrl &= ~RCTRL_PAL_MASK;
374 rctrl |= RCTRL_PADDING(priv->padding);
375 }
376
Manfred Rudigier97553f72010-06-11 01:49:05 +0000377 /* Enable HW time stamping if requested from user space */
Claudiu Manoil88302642014-02-24 12:13:43 +0200378 if (priv->hwts_rx_en)
Manfred Rudigier97553f72010-06-11 01:49:05 +0000379 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
380
Claudiu Manoil88302642014-02-24 12:13:43 +0200381 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000383
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200384 /* Clear the LFC bit */
385 gfar_write(&regs->rctrl, rctrl);
386 /* Init flow control threshold values */
387 gfar_init_rqprm(priv);
388 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
389 rctrl |= RCTRL_LFC;
390
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000391 /* Init rctrl based on our settings */
392 gfar_write(&regs->rctrl, rctrl);
Claudiu Manoila328ac92014-02-24 12:13:42 +0200393}
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000394
Claudiu Manoila328ac92014-02-24 12:13:42 +0200395static void gfar_mac_tx_config(struct gfar_private *priv)
396{
397 struct gfar __iomem *regs = priv->gfargrp[0].regs;
398 u32 tctrl = 0;
399
400 if (priv->ndev->features & NETIF_F_IP_CSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000401 tctrl |= TCTRL_INIT_CSUM;
402
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000403 if (priv->prio_sched_en)
404 tctrl |= TCTRL_TXSCHED_PRIO;
405 else {
406 tctrl |= TCTRL_TXSCHED_WRRS;
407 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
408 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
409 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000410
Claudiu Manoil88302642014-02-24 12:13:43 +0200411 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
412 tctrl |= TCTRL_VLINS;
413
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000414 gfar_write(&regs->tctrl, tctrl);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000415}
416
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200417static void gfar_configure_coalescing(struct gfar_private *priv,
418 unsigned long tx_mask, unsigned long rx_mask)
419{
420 struct gfar __iomem *regs = priv->gfargrp[0].regs;
421 u32 __iomem *baddr;
422
423 if (priv->mode == MQ_MG_MODE) {
424 int i = 0;
425
426 baddr = &regs->txic0;
427 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
428 gfar_write(baddr + i, 0);
429 if (likely(priv->tx_queue[i]->txcoalescing))
430 gfar_write(baddr + i, priv->tx_queue[i]->txic);
431 }
432
433 baddr = &regs->rxic0;
434 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
435 gfar_write(baddr + i, 0);
436 if (likely(priv->rx_queue[i]->rxcoalescing))
437 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
438 }
439 } else {
440 /* Backward compatible case -- even if we enable
441 * multiple queues, there's only single reg to program
442 */
443 gfar_write(&regs->txic, 0);
444 if (likely(priv->tx_queue[0]->txcoalescing))
445 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
446
447 gfar_write(&regs->rxic, 0);
448 if (unlikely(priv->rx_queue[0]->rxcoalescing))
449 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
450 }
451}
452
453void gfar_configure_coalescing_all(struct gfar_private *priv)
454{
455 gfar_configure_coalescing(priv, 0xFF, 0xFF);
456}
457
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000458static struct net_device_stats *gfar_get_stats(struct net_device *dev)
459{
460 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000461 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
462 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000463 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000464
465 for (i = 0; i < priv->num_rx_queues; i++) {
466 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000467 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000468 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
469 }
470
471 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000472 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000473 dev->stats.rx_dropped = rx_dropped;
474
475 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000476 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
477 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000478 }
479
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000480 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000481 dev->stats.tx_packets = tx_packets;
482
483 return &dev->stats;
484}
485
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300486static int gfar_set_mac_addr(struct net_device *dev, void *p)
487{
488 eth_mac_addr(dev, p);
489
490 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
491
492 return 0;
493}
494
Andy Fleming26ccfc32009-03-10 12:58:28 +0000495static const struct net_device_ops gfar_netdev_ops = {
496 .ndo_open = gfar_enet_open,
497 .ndo_start_xmit = gfar_start_xmit,
498 .ndo_stop = gfar_close,
499 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000500 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000501 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000502 .ndo_tx_timeout = gfar_timeout,
503 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000504 .ndo_get_stats = gfar_get_stats,
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300505 .ndo_set_mac_address = gfar_set_mac_addr,
Ben Hutchings240c1022009-07-09 17:54:35 +0000506 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000507#ifdef CONFIG_NET_POLL_CONTROLLER
508 .ndo_poll_controller = gfar_netpoll,
509#endif
510};
511
Claudiu Manoilefeddce2014-02-17 12:53:17 +0200512static void gfar_ints_disable(struct gfar_private *priv)
513{
514 int i;
515 for (i = 0; i < priv->num_grps; i++) {
516 struct gfar __iomem *regs = priv->gfargrp[i].regs;
517 /* Clear IEVENT */
518 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
519
520 /* Initialize IMASK */
521 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
522 }
523}
524
525static void gfar_ints_enable(struct gfar_private *priv)
526{
527 int i;
528 for (i = 0; i < priv->num_grps; i++) {
529 struct gfar __iomem *regs = priv->gfargrp[i].regs;
530 /* Unmask the interrupts we look for */
531 gfar_write(&regs->imask, IMASK_DEFAULT);
532 }
533}
534
Claudiu Manoil20862782014-02-17 12:53:14 +0200535static int gfar_alloc_tx_queues(struct gfar_private *priv)
536{
537 int i;
538
539 for (i = 0; i < priv->num_tx_queues; i++) {
540 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
541 GFP_KERNEL);
542 if (!priv->tx_queue[i])
543 return -ENOMEM;
544
545 priv->tx_queue[i]->tx_skbuff = NULL;
546 priv->tx_queue[i]->qindex = i;
547 priv->tx_queue[i]->dev = priv->ndev;
548 spin_lock_init(&(priv->tx_queue[i]->txlock));
549 }
550 return 0;
551}
552
553static int gfar_alloc_rx_queues(struct gfar_private *priv)
554{
555 int i;
556
557 for (i = 0; i < priv->num_rx_queues; i++) {
558 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
559 GFP_KERNEL);
560 if (!priv->rx_queue[i])
561 return -ENOMEM;
562
Claudiu Manoil20862782014-02-17 12:53:14 +0200563 priv->rx_queue[i]->qindex = i;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300564 priv->rx_queue[i]->ndev = priv->ndev;
Claudiu Manoil20862782014-02-17 12:53:14 +0200565 }
566 return 0;
567}
568
569static void gfar_free_tx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000570{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000571 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000572
573 for (i = 0; i < priv->num_tx_queues; i++)
574 kfree(priv->tx_queue[i]);
575}
576
Claudiu Manoil20862782014-02-17 12:53:14 +0200577static void gfar_free_rx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000578{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000579 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000580
581 for (i = 0; i < priv->num_rx_queues; i++)
582 kfree(priv->rx_queue[i]);
583}
584
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000585static void unmap_group_regs(struct gfar_private *priv)
586{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000587 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000588
589 for (i = 0; i < MAXGROUPS; i++)
590 if (priv->gfargrp[i].regs)
591 iounmap(priv->gfargrp[i].regs);
592}
593
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000594static void free_gfar_dev(struct gfar_private *priv)
595{
596 int i, j;
597
598 for (i = 0; i < priv->num_grps; i++)
599 for (j = 0; j < GFAR_NUM_IRQS; j++) {
600 kfree(priv->gfargrp[i].irqinfo[j]);
601 priv->gfargrp[i].irqinfo[j] = NULL;
602 }
603
604 free_netdev(priv->ndev);
605}
606
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000607static void disable_napi(struct gfar_private *priv)
608{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000609 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000610
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200611 for (i = 0; i < priv->num_grps; i++) {
612 napi_disable(&priv->gfargrp[i].napi_rx);
613 napi_disable(&priv->gfargrp[i].napi_tx);
614 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000615}
616
617static void enable_napi(struct gfar_private *priv)
618{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000619 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000620
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200621 for (i = 0; i < priv->num_grps; i++) {
622 napi_enable(&priv->gfargrp[i].napi_rx);
623 napi_enable(&priv->gfargrp[i].napi_tx);
624 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000625}
626
627static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000628 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000629{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000630 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000631 int i;
632
Paul Gortmaker7c1e7e92013-02-04 09:49:42 +0000633 for (i = 0; i < GFAR_NUM_IRQS; i++) {
634 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
635 GFP_KERNEL);
636 if (!grp->irqinfo[i])
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000637 return -ENOMEM;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000638 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000639
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000640 grp->regs = of_iomap(np, 0);
641 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000642 return -ENOMEM;
643
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000644 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000645
646 /* If we aren't the FEC we have multiple interrupts */
647 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
650 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
651 gfar_irq(grp, RX)->irq == NO_IRQ ||
652 gfar_irq(grp, ER)->irq == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000653 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000654 }
655
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000656 grp->priv = priv;
657 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000658 if (priv->mode == MQ_MG_MODE) {
Jingchang Lu55917642015-03-13 10:52:32 +0200659 u32 rxq_mask, txq_mask;
660 int ret;
661
662 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
663 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
664
665 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
666 if (!ret) {
667 grp->rx_bit_map = rxq_mask ?
668 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
669 }
670
671 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
672 if (!ret) {
673 grp->tx_bit_map = txq_mask ?
674 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
675 }
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200676
677 if (priv->poll_mode == GFAR_SQ_POLLING) {
678 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
679 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
680 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200681 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000682 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000683 grp->rx_bit_map = 0xFF;
684 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000685 }
Claudiu Manoil20862782014-02-17 12:53:14 +0200686
687 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
688 * right to left, so we need to revert the 8 bits to get the q index
689 */
690 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
691 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
692
693 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
694 * also assign queues to groups
695 */
696 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200697 if (!grp->rx_queue)
698 grp->rx_queue = priv->rx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200699 grp->num_rx_queues++;
700 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
701 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
702 priv->rx_queue[i]->grp = grp;
703 }
704
705 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200706 if (!grp->tx_queue)
707 grp->tx_queue = priv->tx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200708 grp->num_tx_queues++;
709 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
710 priv->tqueue |= (TQUEUE_EN0 >> i);
711 priv->tx_queue[i]->grp = grp;
712 }
713
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000714 priv->num_grps++;
715
716 return 0;
717}
718
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100719static int gfar_of_group_count(struct device_node *np)
720{
721 struct device_node *child;
722 int num = 0;
723
724 for_each_available_child_of_node(np, child)
725 if (!of_node_cmp(child->name, "queue-group"))
726 num++;
727
728 return num;
729}
730
Grant Likely2dc11582010-08-06 09:25:50 -0600731static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800732{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800733 const char *model;
734 const char *ctype;
735 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000736 int err = 0, i;
737 struct net_device *dev = NULL;
738 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700739 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000740 struct device_node *child = NULL;
Jingchang Lu55917642015-03-13 10:52:32 +0200741 struct property *stash;
742 u32 stash_len = 0;
743 u32 stash_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000744 unsigned int num_tx_qs, num_rx_qs;
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200745 unsigned short mode, poll_mode;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800746
Kevin Hao4b222ca2015-01-28 20:06:48 +0800747 if (!np)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800748 return -ENODEV;
749
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200750 if (of_device_is_compatible(np, "fsl,etsec2")) {
751 mode = MQ_MG_MODE;
752 poll_mode = GFAR_SQ_POLLING;
753 } else {
754 mode = SQ_SG_MODE;
755 poll_mode = GFAR_SQ_POLLING;
756 }
757
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200758 if (mode == SQ_SG_MODE) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200759 num_tx_qs = 1;
760 num_rx_qs = 1;
761 } else { /* MQ_MG_MODE */
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200762 /* get the actual number of supported groups */
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100763 unsigned int num_grps = gfar_of_group_count(np);
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200764
765 if (num_grps == 0 || num_grps > MAXGROUPS) {
766 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
767 num_grps);
768 pr_err("Cannot do alloc_etherdev, aborting\n");
769 return -EINVAL;
770 }
771
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200772 if (poll_mode == GFAR_SQ_POLLING) {
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200773 num_tx_qs = num_grps; /* one txq per int group */
774 num_rx_qs = num_grps; /* one rxq per int group */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200775 } else { /* GFAR_MQ_POLLING */
Jingchang Lu55917642015-03-13 10:52:32 +0200776 u32 tx_queues, rx_queues;
777 int ret;
778
779 /* parse the num of HW tx and rx queues */
780 ret = of_property_read_u32(np, "fsl,num_tx_queues",
781 &tx_queues);
782 num_tx_qs = ret ? 1 : tx_queues;
783
784 ret = of_property_read_u32(np, "fsl,num_rx_queues",
785 &rx_queues);
786 num_rx_qs = ret ? 1 : rx_queues;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200787 }
788 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000789
790 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000791 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
792 num_tx_qs, MAX_TX_QS);
793 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000794 return -EINVAL;
795 }
796
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000797 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000798 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
799 num_rx_qs, MAX_RX_QS);
800 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000801 return -EINVAL;
802 }
803
804 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
805 dev = *pdev;
806 if (NULL == dev)
807 return -ENOMEM;
808
809 priv = netdev_priv(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000810 priv->ndev = dev;
811
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200812 priv->mode = mode;
813 priv->poll_mode = poll_mode;
814
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000815 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000816 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000817 priv->num_rx_queues = num_rx_qs;
Claudiu Manoil20862782014-02-17 12:53:14 +0200818
819 err = gfar_alloc_tx_queues(priv);
820 if (err)
821 goto tx_alloc_failed;
822
823 err = gfar_alloc_rx_queues(priv);
824 if (err)
825 goto rx_alloc_failed;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800826
Jingchang Lu55917642015-03-13 10:52:32 +0200827 err = of_property_read_string(np, "model", &model);
828 if (err) {
829 pr_err("Device model property missing, aborting\n");
830 goto rx_alloc_failed;
831 }
832
Jan Ceuleers0977f812012-06-05 03:42:12 +0000833 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700834 INIT_LIST_HEAD(&priv->rx_list.list);
835 priv->rx_list.count = 0;
836 mutex_init(&priv->rx_queue_access);
837
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000838 for (i = 0; i < MAXGROUPS; i++)
839 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800840
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000841 /* Parse and initialize group specific information */
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200842 if (priv->mode == MQ_MG_MODE) {
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100843 for_each_available_child_of_node(np, child) {
844 if (of_node_cmp(child->name, "queue-group"))
845 continue;
846
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000847 err = gfar_parse_group(child, priv, model);
848 if (err)
849 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800850 }
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200851 } else { /* SQ_SG_MODE */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000852 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000853 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000854 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800855 }
856
Jingchang Lu55917642015-03-13 10:52:32 +0200857 stash = of_find_property(np, "bd-stash", NULL);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800858
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000859 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800860 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
861 priv->bd_stash_en = 1;
862 }
863
Jingchang Lu55917642015-03-13 10:52:32 +0200864 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800865
Jingchang Lu55917642015-03-13 10:52:32 +0200866 if (err == 0)
867 priv->rx_stash_size = stash_len;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800868
Jingchang Lu55917642015-03-13 10:52:32 +0200869 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800870
Jingchang Lu55917642015-03-13 10:52:32 +0200871 if (err == 0)
872 priv->rx_stash_index = stash_idx;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800873
874 if (stash_len || stash_idx)
875 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
876
Andy Flemingb31a1d82008-12-16 15:29:15 -0800877 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000878
Andy Flemingb31a1d82008-12-16 15:29:15 -0800879 if (mac_addr)
Joe Perches6a3c9102011-11-16 09:38:02 +0000880 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800881
882 if (model && !strcasecmp(model, "TSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200883 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000884 FSL_GIANFAR_DEV_HAS_COALESCE |
885 FSL_GIANFAR_DEV_HAS_RMON |
886 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
887
Andy Flemingb31a1d82008-12-16 15:29:15 -0800888 if (model && !strcasecmp(model, "eTSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200889 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000890 FSL_GIANFAR_DEV_HAS_COALESCE |
891 FSL_GIANFAR_DEV_HAS_RMON |
892 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000893 FSL_GIANFAR_DEV_HAS_CSUM |
894 FSL_GIANFAR_DEV_HAS_VLAN |
895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
897 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800898
Jingchang Lu55917642015-03-13 10:52:32 +0200899 err = of_property_read_string(np, "phy-connection-type", &ctype);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800900
901 /* We only care about rgmii-id. The rest are autodetected */
Jingchang Lu55917642015-03-13 10:52:32 +0200902 if (err == 0 && !strcmp(ctype, "rgmii-id"))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800903 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
904 else
905 priv->interface = PHY_INTERFACE_MODE_MII;
906
Jingchang Lu55917642015-03-13 10:52:32 +0200907 if (of_find_property(np, "fsl,magic-packet", NULL))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800908 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
909
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300910 if (of_get_property(np, "fsl,wake-on-filer", NULL))
911 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
912
Grant Likelyfe192a42009-04-25 12:53:12 +0000913 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800914
Florian Fainellibe403642014-05-22 09:47:48 -0700915 /* In the case of a fixed PHY, the DT node associated
916 * to the PHY is the Ethernet MAC DT node.
917 */
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200918 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
Florian Fainellibe403642014-05-22 09:47:48 -0700919 err = of_phy_register_fixed_link(np);
920 if (err)
921 goto err_grp_init;
922
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200923 priv->phy_node = of_node_get(np);
Florian Fainellibe403642014-05-22 09:47:48 -0700924 }
925
Andy Flemingb31a1d82008-12-16 15:29:15 -0800926 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000927 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800928
929 return 0;
930
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000931err_grp_init:
932 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +0200933rx_alloc_failed:
934 gfar_free_rx_queues(priv);
935tx_alloc_failed:
936 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000937 free_gfar_dev(priv);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800938 return err;
939}
940
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000941static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000942{
943 struct hwtstamp_config config;
944 struct gfar_private *priv = netdev_priv(netdev);
945
946 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
947 return -EFAULT;
948
949 /* reserved for future extensions */
950 if (config.flags)
951 return -EINVAL;
952
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000953 switch (config.tx_type) {
954 case HWTSTAMP_TX_OFF:
955 priv->hwts_tx_en = 0;
956 break;
957 case HWTSTAMP_TX_ON:
958 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
959 return -ERANGE;
960 priv->hwts_tx_en = 1;
961 break;
962 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000963 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000964 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000965
966 switch (config.rx_filter) {
967 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000968 if (priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000969 priv->hwts_rx_en = 0;
Claudiu Manoil08511332014-02-24 12:13:45 +0200970 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000971 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000972 break;
973 default:
974 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
975 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000976 if (!priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000977 priv->hwts_rx_en = 1;
Claudiu Manoil08511332014-02-24 12:13:45 +0200978 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000979 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000980 config.rx_filter = HWTSTAMP_FILTER_ALL;
981 break;
982 }
983
984 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
985 -EFAULT : 0;
986}
987
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000988static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
989{
990 struct hwtstamp_config config;
991 struct gfar_private *priv = netdev_priv(netdev);
992
993 config.flags = 0;
994 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
995 config.rx_filter = (priv->hwts_rx_en ?
996 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
997
998 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
999 -EFAULT : 0;
1000}
1001
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001002static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1003{
1004 struct gfar_private *priv = netdev_priv(dev);
1005
1006 if (!netif_running(dev))
1007 return -EINVAL;
1008
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001009 if (cmd == SIOCSHWTSTAMP)
Ben Hutchingsca0c88c2013-11-18 23:05:27 +00001010 return gfar_hwtstamp_set(dev, rq);
1011 if (cmd == SIOCGHWTSTAMP)
1012 return gfar_hwtstamp_get(dev, rq);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001013
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001014 if (!priv->phydev)
1015 return -ENODEV;
1016
Richard Cochran28b04112010-07-17 08:48:55 +00001017 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001018}
1019
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001020static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1021 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001022{
1023 u32 rqfpr = FPR_FILER_MASK;
1024 u32 rqfcr = 0x0;
1025
1026 rqfar--;
1027 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001028 priv->ftp_rqfpr[rqfar] = rqfpr;
1029 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001030 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1031
1032 rqfar--;
1033 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001034 priv->ftp_rqfpr[rqfar] = rqfpr;
1035 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001036 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1037
1038 rqfar--;
1039 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1040 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001041 priv->ftp_rqfcr[rqfar] = rqfcr;
1042 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001043 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1044
1045 rqfar--;
1046 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1047 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001048 priv->ftp_rqfcr[rqfar] = rqfcr;
1049 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001050 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1051
1052 return rqfar;
1053}
1054
1055static void gfar_init_filer_table(struct gfar_private *priv)
1056{
1057 int i = 0x0;
1058 u32 rqfar = MAX_FILER_IDX;
1059 u32 rqfcr = 0x0;
1060 u32 rqfpr = FPR_FILER_MASK;
1061
1062 /* Default rule */
1063 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001064 priv->ftp_rqfcr[rqfar] = rqfcr;
1065 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001066 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1067
1068 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1069 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1070 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1071 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1072 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1073 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1074
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +02001075 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001076 priv->cur_filer_idx = rqfar;
1077
1078 /* Rest are masked rules */
1079 rqfcr = RQFCR_CMP_NOMATCH;
1080 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001081 priv->ftp_rqfcr[i] = rqfcr;
1082 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001083 gfar_write_filer(priv, i, rqfcr, rqfpr);
1084 }
1085}
1086
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001087#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001088static void __gfar_detect_errata_83xx(struct gfar_private *priv)
Anton Vorontsov7d350972010-06-30 06:39:12 +00001089{
Anton Vorontsov7d350972010-06-30 06:39:12 +00001090 unsigned int pvr = mfspr(SPRN_PVR);
1091 unsigned int svr = mfspr(SPRN_SVR);
1092 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1093 unsigned int rev = svr & 0xffff;
1094
1095 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1096 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001097 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001098 priv->errata |= GFAR_ERRATA_74;
1099
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001100 /* MPC8313 and MPC837x all rev */
1101 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001102 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001103 priv->errata |= GFAR_ERRATA_76;
1104
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001105 /* MPC8313 Rev < 2.0 */
1106 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
Alex Dubov4363c2f2011-03-16 17:57:13 +00001107 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001108}
1109
1110static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1111{
1112 unsigned int svr = mfspr(SPRN_SVR);
1113
1114 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1115 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil53fad772013-10-09 20:20:42 +03001116 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1117 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1118 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001119}
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001120#endif
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001121
1122static void gfar_detect_errata(struct gfar_private *priv)
1123{
1124 struct device *dev = &priv->ofdev->dev;
1125
1126 /* no plans to fix */
1127 priv->errata |= GFAR_ERRATA_A002;
1128
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001129#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001130 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1131 __gfar_detect_errata_85xx(priv);
1132 else /* non-mpc85xx parts, i.e. e300 core based */
1133 __gfar_detect_errata_83xx(priv);
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001134#endif
Alex Dubov4363c2f2011-03-16 17:57:13 +00001135
Anton Vorontsov7d350972010-06-30 06:39:12 +00001136 if (priv->errata)
1137 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1138 priv->errata);
1139}
1140
Claudiu Manoil08511332014-02-24 12:13:45 +02001141void gfar_mac_reset(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142{
Claudiu Manoil20862782014-02-17 12:53:14 +02001143 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoila328ac92014-02-24 12:13:42 +02001144 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001147 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Andy Flemingb98ac702009-02-04 16:38:05 -08001149 /* We need to delay at least 3 TX clocks */
Claudiu Manoila328ac92014-02-24 12:13:42 +02001150 udelay(3);
Andy Flemingb98ac702009-02-04 16:38:05 -08001151
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001152 /* the soft reset bit is not self-resetting, so we need to
1153 * clear it before resuming normal operation
1154 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001155 gfar_write(&regs->maccfg1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Claudiu Manoila328ac92014-02-24 12:13:42 +02001157 udelay(3);
1158
Claudiu Manoil75354142015-07-13 16:22:06 +03001159 gfar_rx_offload_en(priv);
Claudiu Manoil88302642014-02-24 12:13:43 +02001160
1161 /* Initialize the max receive frame/buffer lengths */
Claudiu Manoil75354142015-07-13 16:22:06 +03001162 gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1163 gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
Claudiu Manoila328ac92014-02-24 12:13:42 +02001164
1165 /* Initialize the Minimum Frame Length Register */
1166 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001169 tempval = MACCFG2_INIT_SETTINGS;
Claudiu Manoil88302642014-02-24 12:13:43 +02001170
Claudiu Manoil75354142015-07-13 16:22:06 +03001171 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1172 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
1173 * and by checking RxBD[LG] and discarding larger than MAXFRM.
Claudiu Manoil88302642014-02-24 12:13:43 +02001174 */
Claudiu Manoil75354142015-07-13 16:22:06 +03001175 if (gfar_has_errata(priv, GFAR_ERRATA_74))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001176 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
Claudiu Manoil88302642014-02-24 12:13:43 +02001177
Anton Vorontsov7d350972010-06-30 06:39:12 +00001178 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Claudiu Manoila328ac92014-02-24 12:13:42 +02001180 /* Clear mac addr hash registers */
1181 gfar_write(&regs->igaddr0, 0);
1182 gfar_write(&regs->igaddr1, 0);
1183 gfar_write(&regs->igaddr2, 0);
1184 gfar_write(&regs->igaddr3, 0);
1185 gfar_write(&regs->igaddr4, 0);
1186 gfar_write(&regs->igaddr5, 0);
1187 gfar_write(&regs->igaddr6, 0);
1188 gfar_write(&regs->igaddr7, 0);
1189
1190 gfar_write(&regs->gaddr0, 0);
1191 gfar_write(&regs->gaddr1, 0);
1192 gfar_write(&regs->gaddr2, 0);
1193 gfar_write(&regs->gaddr3, 0);
1194 gfar_write(&regs->gaddr4, 0);
1195 gfar_write(&regs->gaddr5, 0);
1196 gfar_write(&regs->gaddr6, 0);
1197 gfar_write(&regs->gaddr7, 0);
1198
1199 if (priv->extended_hash)
1200 gfar_clear_exact_match(priv->ndev);
1201
1202 gfar_mac_rx_config(priv);
1203
1204 gfar_mac_tx_config(priv);
1205
1206 gfar_set_mac_address(priv->ndev);
1207
1208 gfar_set_multi(priv->ndev);
1209
1210 /* clear ievent and imask before configuring coalescing */
1211 gfar_ints_disable(priv);
1212
1213 /* Configure the coalescing support */
1214 gfar_configure_coalescing_all(priv);
1215}
1216
1217static void gfar_hw_init(struct gfar_private *priv)
1218{
1219 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1220 u32 attrs;
1221
1222 /* Stop the DMA engine now, in case it was running before
1223 * (The firmware could have used it, and left it running).
1224 */
1225 gfar_halt(priv);
1226
1227 gfar_mac_reset(priv);
1228
1229 /* Zero out the rmon mib registers if it has them */
1230 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1231 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1232
1233 /* Mask off the CAM interrupts */
1234 gfar_write(&regs->rmon.cam1, 0xffffffff);
1235 gfar_write(&regs->rmon.cam2, 0xffffffff);
1236 }
1237
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001239 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001241 /* Set the extraction length and index */
1242 attrs = ATTRELI_EL(priv->rx_stash_size) |
1243 ATTRELI_EI(priv->rx_stash_index);
1244
1245 gfar_write(&regs->attreli, attrs);
1246
1247 /* Start with defaults, and add stashing
1248 * depending on driver parameters
1249 */
1250 attrs = ATTR_INIT_SETTINGS;
1251
1252 if (priv->bd_stash_en)
1253 attrs |= ATTR_BDSTASH;
1254
1255 if (priv->rx_stash_size != 0)
1256 attrs |= ATTR_BUFSTASH;
1257
1258 gfar_write(&regs->attr, attrs);
1259
1260 /* FIFO configs */
1261 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1262 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1263 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1264
Claudiu Manoil20862782014-02-17 12:53:14 +02001265 /* Program the interrupt steering regs, only for MG devices */
1266 if (priv->num_grps > 1)
1267 gfar_write_isrg(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001268}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Xiubo Li898157e2014-06-04 16:49:16 +08001270static void gfar_init_addr_hash_table(struct gfar_private *priv)
Claudiu Manoil20862782014-02-17 12:53:14 +02001271{
1272 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001273
Andy Flemingb31a1d82008-12-16 15:29:15 -08001274 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001275 priv->extended_hash = 1;
1276 priv->hash_width = 9;
1277
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001278 priv->hash_regs[0] = &regs->igaddr0;
1279 priv->hash_regs[1] = &regs->igaddr1;
1280 priv->hash_regs[2] = &regs->igaddr2;
1281 priv->hash_regs[3] = &regs->igaddr3;
1282 priv->hash_regs[4] = &regs->igaddr4;
1283 priv->hash_regs[5] = &regs->igaddr5;
1284 priv->hash_regs[6] = &regs->igaddr6;
1285 priv->hash_regs[7] = &regs->igaddr7;
1286 priv->hash_regs[8] = &regs->gaddr0;
1287 priv->hash_regs[9] = &regs->gaddr1;
1288 priv->hash_regs[10] = &regs->gaddr2;
1289 priv->hash_regs[11] = &regs->gaddr3;
1290 priv->hash_regs[12] = &regs->gaddr4;
1291 priv->hash_regs[13] = &regs->gaddr5;
1292 priv->hash_regs[14] = &regs->gaddr6;
1293 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001294
1295 } else {
1296 priv->extended_hash = 0;
1297 priv->hash_width = 8;
1298
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001299 priv->hash_regs[0] = &regs->gaddr0;
1300 priv->hash_regs[1] = &regs->gaddr1;
1301 priv->hash_regs[2] = &regs->gaddr2;
1302 priv->hash_regs[3] = &regs->gaddr3;
1303 priv->hash_regs[4] = &regs->gaddr4;
1304 priv->hash_regs[5] = &regs->gaddr5;
1305 priv->hash_regs[6] = &regs->gaddr6;
1306 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001307 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001308}
1309
1310/* Set up the ethernet device structure, private data,
1311 * and anything else we need before we start
1312 */
1313static int gfar_probe(struct platform_device *ofdev)
1314{
1315 struct net_device *dev = NULL;
1316 struct gfar_private *priv = NULL;
1317 int err = 0, i;
1318
1319 err = gfar_of_init(ofdev, &dev);
1320
1321 if (err)
1322 return err;
1323
1324 priv = netdev_priv(dev);
1325 priv->ndev = dev;
1326 priv->ofdev = ofdev;
1327 priv->dev = &ofdev->dev;
1328 SET_NETDEV_DEV(dev, &ofdev->dev);
1329
Claudiu Manoil20862782014-02-17 12:53:14 +02001330 INIT_WORK(&priv->reset_task, gfar_reset_task);
1331
1332 platform_set_drvdata(ofdev, priv);
1333
1334 gfar_detect_errata(priv);
1335
Claudiu Manoil20862782014-02-17 12:53:14 +02001336 /* Set the dev->base_addr to the gfar reg region */
1337 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1338
1339 /* Fill in the dev structure */
1340 dev->watchdog_timeo = TX_TIMEOUT;
1341 dev->mtu = 1500;
1342 dev->netdev_ops = &gfar_netdev_ops;
1343 dev->ethtool_ops = &gfar_ethtool_ops;
1344
1345 /* Register for napi ...We are registering NAPI for each grp */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001346 for (i = 0; i < priv->num_grps; i++) {
1347 if (priv->poll_mode == GFAR_SQ_POLLING) {
1348 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1349 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001350 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001351 gfar_poll_tx_sq, 2);
1352 } else {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001353 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1354 gfar_poll_rx, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001355 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001356 gfar_poll_tx, 2);
1357 }
1358 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001359
1360 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1361 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1362 NETIF_F_RXCSUM;
1363 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1364 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1365 }
1366
1367 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1368 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1369 NETIF_F_HW_VLAN_CTAG_RX;
1370 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1371 }
1372
Claudiu Manoil3d23a052015-05-06 18:07:30 +03001373 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1374
Claudiu Manoil20862782014-02-17 12:53:14 +02001375 gfar_init_addr_hash_table(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001376
Claudiu Manoil532c37b2014-02-17 12:53:16 +02001377 /* Insert receive time stamps into padding alignment bytes */
1378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1379 priv->padding = 8;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001380
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001381 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001382 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001383 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001385 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001386 for (i = 0; i < priv->num_tx_queues; i++) {
1387 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1388 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1389 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1390 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1391 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001392
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001393 for (i = 0; i < priv->num_rx_queues; i++) {
1394 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1395 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1396 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Jan Ceuleers0977f812012-06-05 03:42:12 +00001399 /* always enable rx filer */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001400 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001401 /* Enable most messages by default */
1402 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001403 /* use pritority h/w tx queue scheduling for single queue devices */
1404 if (priv->num_tx_queues == 1)
1405 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001406
Claudiu Manoil08511332014-02-24 12:13:45 +02001407 set_bit(GFAR_DOWN, &priv->state);
1408
Claudiu Manoila328ac92014-02-24 12:13:42 +02001409 gfar_hw_init(priv);
Trent Piephod3eab822008-10-02 11:12:24 +00001410
Fabio Estevamd4c642e2014-06-03 19:55:38 -03001411 /* Carrier starts down, phylib will bring it up */
1412 netif_carrier_off(dev);
1413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 err = register_netdev(dev);
1415
1416 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001417 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 goto register_fail;
1419 }
1420
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001421 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1422 priv->wol_supported |= GFAR_WOL_MAGIC;
1423
1424 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1425 priv->rx_filer_enable)
1426 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1427
1428 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001429
Dai Harukic50a5d92008-12-17 16:51:32 -08001430 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001431 for (i = 0; i < priv->num_grps; i++) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001432 struct gfar_priv_grp *grp = &priv->gfargrp[i];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001433 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001434 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001435 dev->name, "_g", '0' + i, "_tx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001436 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001437 dev->name, "_g", '0' + i, "_rx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001438 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001439 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001440 } else
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001441 strcpy(gfar_irq(grp, TX)->name, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001442 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001443
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001444 /* Initialize the filer table */
1445 gfar_init_filer_table(priv);
1446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001448 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Jan Ceuleers0977f812012-06-05 03:42:12 +00001450 /* Even more device info helps when determining which kernel
1451 * provided which set of benchmarks.
1452 */
Joe Perches59deab22011-06-14 08:57:47 +00001453 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001454 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001455 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1456 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001457 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001458 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1459 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
1461 return 0;
1462
1463register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001464 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001465 gfar_free_rx_queues(priv);
1466 gfar_free_tx_queues(priv);
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001467 of_node_put(priv->phy_node);
1468 of_node_put(priv->tbi_node);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001469 free_gfar_dev(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001470 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471}
1472
Grant Likely2dc11582010-08-06 09:25:50 -06001473static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001475 struct gfar_private *priv = platform_get_drvdata(ofdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001477 of_node_put(priv->phy_node);
1478 of_node_put(priv->tbi_node);
Grant Likelyfe192a42009-04-25 12:53:12 +00001479
David S. Millerd9d8e042009-09-06 01:41:02 -07001480 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001481 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001482 gfar_free_rx_queues(priv);
1483 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001484 free_gfar_dev(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
1486 return 0;
1487}
1488
Scott Woodd87eb122008-07-11 18:04:45 -05001489#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001490
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001491static void __gfar_filer_disable(struct gfar_private *priv)
1492{
1493 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1494 u32 temp;
1495
1496 temp = gfar_read(&regs->rctrl);
1497 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1498 gfar_write(&regs->rctrl, temp);
1499}
1500
1501static void __gfar_filer_enable(struct gfar_private *priv)
1502{
1503 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1504 u32 temp;
1505
1506 temp = gfar_read(&regs->rctrl);
1507 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1508 gfar_write(&regs->rctrl, temp);
1509}
1510
1511/* Filer rules implementing wol capabilities */
1512static void gfar_filer_config_wol(struct gfar_private *priv)
1513{
1514 unsigned int i;
1515 u32 rqfcr;
1516
1517 __gfar_filer_disable(priv);
1518
1519 /* clear the filer table, reject any packet by default */
1520 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1521 for (i = 0; i <= MAX_FILER_IDX; i++)
1522 gfar_write_filer(priv, i, rqfcr, 0);
1523
1524 i = 0;
1525 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1526 /* unicast packet, accept it */
1527 struct net_device *ndev = priv->ndev;
1528 /* get the default rx queue index */
1529 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1530 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1531 (ndev->dev_addr[1] << 8) |
1532 ndev->dev_addr[2];
1533
1534 rqfcr = (qindex << 10) | RQFCR_AND |
1535 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1536
1537 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1538
1539 dest_mac_addr = (ndev->dev_addr[3] << 16) |
1540 (ndev->dev_addr[4] << 8) |
1541 ndev->dev_addr[5];
1542 rqfcr = (qindex << 10) | RQFCR_GPI |
1543 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1544 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1545 }
1546
1547 __gfar_filer_enable(priv);
1548}
1549
1550static void gfar_filer_restore_table(struct gfar_private *priv)
1551{
1552 u32 rqfcr, rqfpr;
1553 unsigned int i;
1554
1555 __gfar_filer_disable(priv);
1556
1557 for (i = 0; i <= MAX_FILER_IDX; i++) {
1558 rqfcr = priv->ftp_rqfcr[i];
1559 rqfpr = priv->ftp_rqfpr[i];
1560 gfar_write_filer(priv, i, rqfcr, rqfpr);
1561 }
1562
1563 __gfar_filer_enable(priv);
1564}
1565
1566/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1567static void gfar_start_wol_filer(struct gfar_private *priv)
1568{
1569 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1570 u32 tempval;
1571 int i = 0;
1572
1573 /* Enable Rx hw queues */
1574 gfar_write(&regs->rqueue, priv->rqueue);
1575
1576 /* Initialize DMACTRL to have WWR and WOP */
1577 tempval = gfar_read(&regs->dmactrl);
1578 tempval |= DMACTRL_INIT_SETTINGS;
1579 gfar_write(&regs->dmactrl, tempval);
1580
1581 /* Make sure we aren't stopped */
1582 tempval = gfar_read(&regs->dmactrl);
1583 tempval &= ~DMACTRL_GRS;
1584 gfar_write(&regs->dmactrl, tempval);
1585
1586 for (i = 0; i < priv->num_grps; i++) {
1587 regs = priv->gfargrp[i].regs;
1588 /* Clear RHLT, so that the DMA starts polling now */
1589 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1590 /* enable the Filer General Purpose Interrupt */
1591 gfar_write(&regs->imask, IMASK_FGPI);
1592 }
1593
1594 /* Enable Rx DMA */
1595 tempval = gfar_read(&regs->maccfg1);
1596 tempval |= MACCFG1_RX_EN;
1597 gfar_write(&regs->maccfg1, tempval);
1598}
1599
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001600static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001601{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001602 struct gfar_private *priv = dev_get_drvdata(dev);
1603 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001604 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001605 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001606 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001607
Claudiu Manoil614b4242015-07-31 18:38:32 +03001608 if (!netif_running(ndev))
1609 return 0;
1610
1611 disable_napi(priv);
1612 netif_tx_lock(ndev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001613 netif_device_detach(ndev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001614 netif_tx_unlock(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001615
Claudiu Manoil614b4242015-07-31 18:38:32 +03001616 gfar_halt(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001617
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001618 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001619 /* Enable interrupt on Magic Packet */
1620 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001621
Claudiu Manoil614b4242015-07-31 18:38:32 +03001622 /* Enable Magic Packet mode */
1623 tempval = gfar_read(&regs->maccfg2);
1624 tempval |= MACCFG2_MPEN;
1625 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001626
Claudiu Manoil614b4242015-07-31 18:38:32 +03001627 /* re-enable the Rx block */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001628 tempval = gfar_read(&regs->maccfg1);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001629 tempval |= MACCFG1_RX_EN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001630 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001631
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001632 } else if (wol & GFAR_WOL_FILER_UCAST) {
1633 gfar_filer_config_wol(priv);
1634 gfar_start_wol_filer(priv);
1635
Claudiu Manoil614b4242015-07-31 18:38:32 +03001636 } else {
1637 phy_stop(priv->phydev);
Scott Woodd87eb122008-07-11 18:04:45 -05001638 }
1639
1640 return 0;
1641}
1642
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001643static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001644{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001645 struct gfar_private *priv = dev_get_drvdata(dev);
1646 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001647 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001648 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001649 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001650
Claudiu Manoil614b4242015-07-31 18:38:32 +03001651 if (!netif_running(ndev))
Scott Woodd87eb122008-07-11 18:04:45 -05001652 return 0;
Scott Woodd87eb122008-07-11 18:04:45 -05001653
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001654 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001655 /* Disable Magic Packet mode */
1656 tempval = gfar_read(&regs->maccfg2);
1657 tempval &= ~MACCFG2_MPEN;
1658 gfar_write(&regs->maccfg2, tempval);
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001659
1660 } else if (wol & GFAR_WOL_FILER_UCAST) {
1661 /* need to stop rx only, tx is already down */
1662 gfar_halt(priv);
1663 gfar_filer_restore_table(priv);
1664
Claudiu Manoil614b4242015-07-31 18:38:32 +03001665 } else {
Scott Woodd87eb122008-07-11 18:04:45 -05001666 phy_start(priv->phydev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001667 }
Scott Woodd87eb122008-07-11 18:04:45 -05001668
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001669 gfar_start(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001670
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001671 netif_device_attach(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001672 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001673
1674 return 0;
1675}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001676
1677static int gfar_restore(struct device *dev)
1678{
1679 struct gfar_private *priv = dev_get_drvdata(dev);
1680 struct net_device *ndev = priv->ndev;
1681
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001682 if (!netif_running(ndev)) {
1683 netif_device_attach(ndev);
1684
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001685 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001686 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001687
Claudiu Manoil76f31e82015-07-13 16:22:03 +03001688 gfar_init_bds(ndev);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001689
Claudiu Manoila328ac92014-02-24 12:13:42 +02001690 gfar_mac_reset(priv);
1691
1692 gfar_init_tx_rx_base(priv);
1693
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001694 gfar_start(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001695
1696 priv->oldlink = 0;
1697 priv->oldspeed = 0;
1698 priv->oldduplex = -1;
1699
1700 if (priv->phydev)
1701 phy_start(priv->phydev);
1702
1703 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001704 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001705
1706 return 0;
1707}
1708
1709static struct dev_pm_ops gfar_pm_ops = {
1710 .suspend = gfar_suspend,
1711 .resume = gfar_resume,
1712 .freeze = gfar_suspend,
1713 .thaw = gfar_resume,
1714 .restore = gfar_restore,
1715};
1716
1717#define GFAR_PM_OPS (&gfar_pm_ops)
1718
Scott Woodd87eb122008-07-11 18:04:45 -05001719#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001720
1721#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001722
Scott Woodd87eb122008-07-11 18:04:45 -05001723#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001725/* Reads the controller's registers to determine what interface
1726 * connects it to the PHY.
1727 */
1728static phy_interface_t gfar_get_interface(struct net_device *dev)
1729{
1730 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001731 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001732 u32 ecntrl;
1733
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001734 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001735
1736 if (ecntrl & ECNTRL_SGMII_MODE)
1737 return PHY_INTERFACE_MODE_SGMII;
1738
1739 if (ecntrl & ECNTRL_TBI_MODE) {
1740 if (ecntrl & ECNTRL_REDUCED_MODE)
1741 return PHY_INTERFACE_MODE_RTBI;
1742 else
1743 return PHY_INTERFACE_MODE_TBI;
1744 }
1745
1746 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001747 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001748 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001749 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001750 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001751 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001752
Jan Ceuleers0977f812012-06-05 03:42:12 +00001753 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001754 * be set by the device tree or platform code.
1755 */
1756 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1757 return PHY_INTERFACE_MODE_RGMII_ID;
1758
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001759 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001760 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001761 }
1762
Andy Flemingb31a1d82008-12-16 15:29:15 -08001763 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001764 return PHY_INTERFACE_MODE_GMII;
1765
1766 return PHY_INTERFACE_MODE_MII;
1767}
1768
1769
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001770/* Initializes driver's PHY state, and attaches to the PHY.
1771 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 */
1773static int init_phy(struct net_device *dev)
1774{
1775 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001776 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001777 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001778 GFAR_SUPPORTED_GBIT : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001779 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
1781 priv->oldlink = 0;
1782 priv->oldspeed = 0;
1783 priv->oldduplex = -1;
1784
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001785 interface = gfar_get_interface(dev);
1786
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001787 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1788 interface);
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001789 if (!priv->phydev) {
1790 dev_err(&dev->dev, "could not attach to PHY\n");
1791 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Kapil Junejad3c12872007-05-11 18:25:11 -05001794 if (interface == PHY_INTERFACE_MODE_SGMII)
1795 gfar_configure_serdes(dev);
1796
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001797 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001798 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1799 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
Pavaluca Matei-B46610cf987af2014-10-27 10:42:42 +02001801 /* Add support for flow control, but don't advertise it by default */
1802 priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805}
1806
Jan Ceuleers0977f812012-06-05 03:42:12 +00001807/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001808 * SERDES lynx PHY on the chip. We communicate with this PHY
1809 * through the MDIO bus on each controller, treating it as a
1810 * "normal" PHY at the address found in the TBIPA register. We assume
1811 * that the TBIPA register is valid. Either the MDIO bus code will set
1812 * it to a value that doesn't conflict with other PHYs on the bus, or the
1813 * value doesn't matter, as there are no other PHYs on the bus.
1814 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001815static void gfar_configure_serdes(struct net_device *dev)
1816{
1817 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001818 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001819
Grant Likelyfe192a42009-04-25 12:53:12 +00001820 if (!priv->tbi_node) {
1821 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1822 "device tree specify a tbi-handle\n");
1823 return;
1824 }
1825
1826 tbiphy = of_phy_find_device(priv->tbi_node);
1827 if (!tbiphy) {
1828 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001829 return;
1830 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001831
Jan Ceuleers0977f812012-06-05 03:42:12 +00001832 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001833 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1834 * everything for us? Resetting it takes the link down and requires
1835 * several seconds for it to come back.
1836 */
Russell King38737e42015-09-24 20:36:28 +01001837 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1838 put_device(&tbiphy->dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -08001839 return;
Russell King38737e42015-09-24 20:36:28 +01001840 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001841
Paul Gortmakerd0313582008-04-17 00:08:10 -04001842 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001843 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001844
Grant Likelyfe192a42009-04-25 12:53:12 +00001845 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001846 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1847 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001848
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001849 phy_write(tbiphy, MII_BMCR,
1850 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1851 BMCR_SPEED1000);
Russell King04d53b22015-09-24 20:36:18 +01001852
1853 put_device(&tbiphy->dev);
Kapil Junejad3c12872007-05-11 18:25:11 -05001854}
1855
Anton Vorontsov511d9342010-06-30 06:39:15 +00001856static int __gfar_is_rx_idle(struct gfar_private *priv)
1857{
1858 u32 res;
1859
Jan Ceuleers0977f812012-06-05 03:42:12 +00001860 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001861 * actually wait for IEVENT_GRSC flag.
1862 */
Claudiu Manoilad3660c2013-10-09 20:20:40 +03001863 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
Anton Vorontsov511d9342010-06-30 06:39:15 +00001864 return 0;
1865
Jan Ceuleers0977f812012-06-05 03:42:12 +00001866 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001867 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1868 * and the Rx can be safely reset.
1869 */
1870 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1871 res &= 0x7f807f80;
1872 if ((res & 0xffff) == (res >> 16))
1873 return 1;
1874
1875 return 0;
1876}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001877
1878/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001879static void gfar_halt_nodisable(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880{
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001881 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 u32 tempval;
Claudiu Manoila4feee82014-10-07 10:44:34 +03001883 unsigned int timeout;
1884 int stopped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001886 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
Claudiu Manoila4feee82014-10-07 10:44:34 +03001888 if (gfar_is_dma_stopped(priv))
1889 return;
1890
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001892 tempval = gfar_read(&regs->dmactrl);
Claudiu Manoila4feee82014-10-07 10:44:34 +03001893 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1894 gfar_write(&regs->dmactrl, tempval);
Anton Vorontsov511d9342010-06-30 06:39:15 +00001895
Claudiu Manoila4feee82014-10-07 10:44:34 +03001896retry:
1897 timeout = 1000;
1898 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1899 cpu_relax();
1900 timeout--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 }
Claudiu Manoila4feee82014-10-07 10:44:34 +03001902
1903 if (!timeout)
1904 stopped = gfar_is_dma_stopped(priv);
1905
1906 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1907 !__gfar_is_rx_idle(priv))
1908 goto retry;
Scott Woodd87eb122008-07-11 18:04:45 -05001909}
Scott Woodd87eb122008-07-11 18:04:45 -05001910
1911/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001912void gfar_halt(struct gfar_private *priv)
Scott Woodd87eb122008-07-11 18:04:45 -05001913{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001914 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001915 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001917 /* Dissable the Rx/Tx hw queues */
1918 gfar_write(&regs->rqueue, 0);
1919 gfar_write(&regs->tqueue, 0);
Scott Wood2a54adc2008-08-12 15:10:46 -05001920
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001921 mdelay(10);
1922
1923 gfar_halt_nodisable(priv);
1924
1925 /* Disable Rx/Tx DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 tempval = gfar_read(&regs->maccfg1);
1927 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1928 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001929}
1930
1931void stop_gfar(struct net_device *dev)
1932{
1933 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001934
Claudiu Manoil08511332014-02-24 12:13:45 +02001935 netif_tx_stop_all_queues(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001936
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001937 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02001938 set_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001939 smp_mb__after_atomic();
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001940
Claudiu Manoil08511332014-02-24 12:13:45 +02001941 disable_napi(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001942
Claudiu Manoil08511332014-02-24 12:13:45 +02001943 /* disable ints and gracefully shut down Rx/Tx DMA */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001944 gfar_halt(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Claudiu Manoil08511332014-02-24 12:13:45 +02001946 phy_stop(priv->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949}
1950
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001951static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001954 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001955 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001957 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001959 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1960 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001961 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
Claudiu Manoila7312d52015-03-13 10:36:28 +02001963 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1964 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001965 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001966 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001967 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001968 txbdp++;
Claudiu Manoila7312d52015-03-13 10:36:28 +02001969 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1970 be16_to_cpu(txbdp->length),
1971 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001973 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001974 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1975 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001977 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001978 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001979}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001981static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1982{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001983 int i;
1984
Claudiu Manoil75354142015-07-13 16:22:06 +03001985 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1986
1987 if (rx_queue->skb)
1988 dev_kfree_skb(rx_queue->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001990 for (i = 0; i < rx_queue->rx_ring_size; i++) {
Claudiu Manoil75354142015-07-13 16:22:06 +03001991 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1992
Anton Vorontsove69edd22009-10-12 06:00:30 +00001993 rxbdp->lstatus = 0;
1994 rxbdp->bufPtr = 0;
1995 rxbdp++;
Claudiu Manoil75354142015-07-13 16:22:06 +03001996
1997 if (!rxb->page)
1998 continue;
1999
2000 dma_unmap_single(rx_queue->dev, rxb->dma,
2001 PAGE_SIZE, DMA_FROM_DEVICE);
2002 __free_page(rxb->page);
2003
2004 rxb->page = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002006
2007 kfree(rx_queue->rx_buff);
2008 rx_queue->rx_buff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002009}
Anton Vorontsove69edd22009-10-12 06:00:30 +00002010
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002011/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00002012 * Then free tx_skbuff and rx_skbuff
2013 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002014static void free_skb_resources(struct gfar_private *priv)
2015{
2016 struct gfar_priv_tx_q *tx_queue = NULL;
2017 struct gfar_priv_rx_q *rx_queue = NULL;
2018 int i;
2019
2020 /* Go through all the buffer descriptors and free their data buffers */
2021 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002022 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002023
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002024 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002025 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002026 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002027 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002028 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002029 }
2030
2031 for (i = 0; i < priv->num_rx_queues; i++) {
2032 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002033 if (rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002034 free_skb_rx_queue(rx_queue);
2035 }
2036
Claudiu Manoil369ec162013-02-14 05:00:02 +00002037 dma_free_coherent(priv->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002038 sizeof(struct txbd8) * priv->total_tx_ring_size +
2039 sizeof(struct rxbd8) * priv->total_rx_ring_size,
2040 priv->tx_queue[0]->tx_bd_base,
2041 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042}
2043
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002044void gfar_start(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002045{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002046 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002047 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002048 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002049
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002050 /* Enable Rx/Tx hw queues */
2051 gfar_write(&regs->rqueue, priv->rqueue);
2052 gfar_write(&regs->tqueue, priv->tqueue);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002053
2054 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002055 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002056 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002057 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002058
Kumar Gala0bbaf062005-06-20 10:54:21 -05002059 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002060 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002061 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002062 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002063
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002064 for (i = 0; i < priv->num_grps; i++) {
2065 regs = priv->gfargrp[i].regs;
2066 /* Clear THLT/RHLT, so that the DMA starts polling now */
2067 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2068 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002069 }
Dai Haruki12dea572008-12-16 15:30:20 -08002070
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002071 /* Enable Rx/Tx DMA */
2072 tempval = gfar_read(&regs->maccfg1);
2073 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2074 gfar_write(&regs->maccfg1, tempval);
2075
Claudiu Manoilefeddce2014-02-17 12:53:17 +02002076 gfar_ints_enable(priv);
2077
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002078 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002079}
2080
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002081static void free_grp_irqs(struct gfar_priv_grp *grp)
2082{
2083 free_irq(gfar_irq(grp, TX)->irq, grp);
2084 free_irq(gfar_irq(grp, RX)->irq, grp);
2085 free_irq(gfar_irq(grp, ER)->irq, grp);
2086}
2087
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002088static int register_grp_irqs(struct gfar_priv_grp *grp)
2089{
2090 struct gfar_private *priv = grp->priv;
2091 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00002092 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00002095 * them. Otherwise, only register for the one
2096 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002097 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002098 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00002099 * Transmit, and Receive
2100 */
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002101 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002102 gfar_irq(grp, ER)->name, grp);
2103 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002104 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002105 gfar_irq(grp, ER)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002106
Julia Lawall2145f1a2010-08-05 10:26:20 +00002107 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002109 enable_irq_wake(gfar_irq(grp, ER)->irq);
2110
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002111 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2112 gfar_irq(grp, TX)->name, grp);
2113 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002114 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002115 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 goto tx_irq_fail;
2117 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002118 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2119 gfar_irq(grp, RX)->name, grp);
2120 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002121 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002122 gfar_irq(grp, RX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 goto rx_irq_fail;
2124 }
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002125 enable_irq_wake(gfar_irq(grp, RX)->irq);
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 } else {
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002128 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002129 gfar_irq(grp, TX)->name, grp);
2130 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002131 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002132 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 goto err_irq_fail;
2134 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002135 enable_irq_wake(gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 }
2137
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002138 return 0;
2139
2140rx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002141 free_irq(gfar_irq(grp, TX)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002142tx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002143 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002144err_irq_fail:
2145 return err;
2146
2147}
2148
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002149static void gfar_free_irq(struct gfar_private *priv)
2150{
2151 int i;
2152
2153 /* Free the IRQs */
2154 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2155 for (i = 0; i < priv->num_grps; i++)
2156 free_grp_irqs(&priv->gfargrp[i]);
2157 } else {
2158 for (i = 0; i < priv->num_grps; i++)
2159 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2160 &priv->gfargrp[i]);
2161 }
2162}
2163
2164static int gfar_request_irq(struct gfar_private *priv)
2165{
2166 int err, i, j;
2167
2168 for (i = 0; i < priv->num_grps; i++) {
2169 err = register_grp_irqs(&priv->gfargrp[i]);
2170 if (err) {
2171 for (j = 0; j < i; j++)
2172 free_grp_irqs(&priv->gfargrp[j]);
2173 return err;
2174 }
2175 }
2176
2177 return 0;
2178}
2179
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002180/* Bring the controller up and running */
2181int startup_gfar(struct net_device *ndev)
2182{
2183 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002184 int err;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002185
Claudiu Manoila328ac92014-02-24 12:13:42 +02002186 gfar_mac_reset(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002187
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002188 err = gfar_alloc_skb_resources(ndev);
2189 if (err)
2190 return err;
2191
Claudiu Manoila328ac92014-02-24 12:13:42 +02002192 gfar_init_tx_rx_base(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002193
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002194 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002195 clear_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002196 smp_mb__after_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002197
2198 /* Start Rx/Tx DMA and enable the interrupts */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002199 gfar_start(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
Claudiu Manoil2a4eebf2015-08-13 16:50:37 +03002201 /* force link state update after mac reset */
2202 priv->oldlink = 0;
2203 priv->oldspeed = 0;
2204 priv->oldduplex = -1;
2205
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00002206 phy_start(priv->phydev);
2207
Claudiu Manoil08511332014-02-24 12:13:45 +02002208 enable_napi(priv);
2209
2210 netif_tx_wake_all_queues(ndev);
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
2214
Jan Ceuleers0977f812012-06-05 03:42:12 +00002215/* Called when something needs to use the ethernet device
2216 * Returns 0 for success.
2217 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218static int gfar_enet_open(struct net_device *dev)
2219{
Li Yang94e8cc32007-10-12 21:53:51 +08002220 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 int err;
2222
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 err = init_phy(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002224 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 return err;
2226
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002227 err = gfar_request_irq(priv);
2228 if (err)
2229 return err;
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002232 if (err)
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04002233 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
2235 return err;
2236}
2237
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002238static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002239{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002240 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07002241
2242 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002243
Kumar Gala0bbaf062005-06-20 10:54:21 -05002244 return fcb;
2245}
2246
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002247static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002248 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002249{
Kumar Gala0bbaf062005-06-20 10:54:21 -05002250 /* If we're here, it's a IP packet with a TCP or UDP
2251 * payload. We set it to checksum, using a pseudo-header
2252 * we provide
2253 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002254 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002255
Jan Ceuleers0977f812012-06-05 03:42:12 +00002256 /* Tell the controller what the protocol is
2257 * And provide the already calculated phcs
2258 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002259 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002260 flags |= TXFCB_UDP;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002261 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002262 } else
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002263 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002264
2265 /* l3os is the distance between the start of the
2266 * frame (skb->data) and the start of the IP hdr.
2267 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002268 * l3 hdr and the l4 hdr
2269 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002270 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002271 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002272
Andy Fleming7f7f5312005-11-11 12:38:59 -06002273 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002274}
2275
Andy Fleming7f7f5312005-11-11 12:38:59 -06002276void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002277{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002278 fcb->flags |= TXFCB_VLN;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002279 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
Kumar Gala0bbaf062005-06-20 10:54:21 -05002280}
2281
Dai Haruki4669bc92008-12-17 16:51:04 -08002282static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002283 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002284{
2285 struct txbd8 *new_bd = bdp + stride;
2286
2287 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2288}
2289
2290static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002291 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002292{
2293 return skip_txbd(bdp, 1, base, ring_size);
2294}
2295
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002296/* eTSEC12: csum generation not supported for some fcb offsets */
2297static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2298 unsigned long fcb_addr)
2299{
2300 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2301 (fcb_addr % 0x20) > 0x18);
2302}
2303
2304/* eTSEC76: csum generation for frames larger than 2500 may
2305 * cause excess delays before start of transmission
2306 */
2307static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2308 unsigned int len)
2309{
2310 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2311 (len > 2500));
2312}
2313
Jan Ceuleers0977f812012-06-05 03:42:12 +00002314/* This is called by the kernel when a frame is ready for transmission.
2315 * It is pointed to by the dev->hard_start_xmit function pointer
2316 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2318{
2319 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002320 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002321 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002322 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002323 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002324 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002325 u32 lstatus;
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002326 int i, rq = 0;
2327 int do_tstamp, do_csum, do_vlan;
Dai Haruki4669bc92008-12-17 16:51:04 -08002328 u32 bufaddr;
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002329 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002330
2331 rq = skb->queue_mapping;
2332 tx_queue = priv->tx_queue[rq];
2333 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002334 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002335 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002336
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002337 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002338 do_vlan = skb_vlan_tag_present(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002339 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2340 priv->hwts_tx_en;
2341
2342 if (do_csum || do_vlan)
2343 fcb_len = GMAC_FCB_LEN;
2344
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002345 /* check if time stamp should be generated */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002346 if (unlikely(do_tstamp))
2347 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Dai Haruki4669bc92008-12-17 16:51:04 -08002348
Li Yang5b28bea2009-03-27 15:54:30 -07002349 /* make space for additional header when fcb is needed */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002350 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002351 struct sk_buff *skb_new;
2352
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002353 skb_new = skb_realloc_headroom(skb, fcb_len);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002354 if (!skb_new) {
2355 dev->stats.tx_errors++;
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002356 dev_kfree_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002357 return NETDEV_TX_OK;
2358 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002359
Eric Dumazet313b0372012-07-05 11:45:13 +00002360 if (skb->sk)
2361 skb_set_owner_w(skb_new, skb->sk);
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002362 dev_consume_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002363 skb = skb_new;
2364 }
2365
Dai Haruki4669bc92008-12-17 16:51:04 -08002366 /* total number of fragments in the SKB */
2367 nr_frags = skb_shinfo(skb)->nr_frags;
2368
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002369 /* calculate the required number of TxBDs for this skb */
2370 if (unlikely(do_tstamp))
2371 nr_txbds = nr_frags + 2;
2372 else
2373 nr_txbds = nr_frags + 1;
2374
Dai Haruki4669bc92008-12-17 16:51:04 -08002375 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002376 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002377 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002378 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002379 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002380 return NETDEV_TX_BUSY;
2381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
2383 /* Update transmit stats */
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002384 bytes_sent = skb->len;
2385 tx_queue->stats.tx_bytes += bytes_sent;
2386 /* keep Tx bytes on wire for BQL accounting */
2387 GFAR_CB(skb)->bytes_sent = bytes_sent;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002388 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002390 txbdp = txbdp_start = tx_queue->cur_tx;
Claudiu Manoila7312d52015-03-13 10:36:28 +02002391 lstatus = be32_to_cpu(txbdp->lstatus);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002392
2393 /* Time stamp insertion requires one additional TxBD */
2394 if (unlikely(do_tstamp))
2395 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002396 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
Dai Haruki4669bc92008-12-17 16:51:04 -08002398 if (nr_frags == 0) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002399 if (unlikely(do_tstamp)) {
2400 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2401
2402 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2403 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2404 } else {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002405 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002406 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002407 } else {
2408 /* Place the fragment addresses and lengths into the TxBDs */
2409 for (i = 0; i < nr_frags; i++) {
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002410 unsigned int frag_len;
Dai Haruki4669bc92008-12-17 16:51:04 -08002411 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002412 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002414 frag_len = skb_shinfo(skb)->frags[i].size;
Dai Haruki4669bc92008-12-17 16:51:04 -08002415
Claudiu Manoila7312d52015-03-13 10:36:28 +02002416 lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002417 BD_LFLAG(TXBD_READY);
Dai Haruki4669bc92008-12-17 16:51:04 -08002418
2419 /* Handle the last BD specially */
2420 if (i == nr_frags - 1)
2421 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2422
Claudiu Manoil369ec162013-02-14 05:00:02 +00002423 bufaddr = skb_frag_dma_map(priv->dev,
Ian Campbell2234a722011-08-29 23:18:29 +00002424 &skb_shinfo(skb)->frags[i],
2425 0,
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002426 frag_len,
Ian Campbell2234a722011-08-29 23:18:29 +00002427 DMA_TO_DEVICE);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002428 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2429 goto dma_map_err;
Dai Haruki4669bc92008-12-17 16:51:04 -08002430
2431 /* set the TxBD length and buffer pointer */
Claudiu Manoila7312d52015-03-13 10:36:28 +02002432 txbdp->bufPtr = cpu_to_be32(bufaddr);
2433 txbdp->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002434 }
2435
Claudiu Manoila7312d52015-03-13 10:36:28 +02002436 lstatus = be32_to_cpu(txbdp_start->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002439 /* Add TxPAL between FCB and frame if required */
2440 if (unlikely(do_tstamp)) {
2441 skb_push(skb, GMAC_TXPAL_LEN);
2442 memset(skb->data, 0, GMAC_TXPAL_LEN);
2443 }
2444
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002445 /* Add TxFCB if required */
2446 if (fcb_len) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002447 fcb = gfar_add_fcb(skb);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002448 lstatus |= BD_LFLAG(TXBD_TOE);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002449 }
2450
2451 /* Set up checksumming */
2452 if (do_csum) {
2453 gfar_tx_checksum(skb, fcb, fcb_len);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002454
2455 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2456 unlikely(gfar_csum_errata_76(priv, skb->len))) {
Alex Dubov4363c2f2011-03-16 17:57:13 +00002457 __skb_pull(skb, GMAC_FCB_LEN);
2458 skb_checksum_help(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002459 if (do_vlan || do_tstamp) {
2460 /* put back a new fcb for vlan/tstamp TOE */
2461 fcb = gfar_add_fcb(skb);
2462 } else {
2463 /* Tx TOE not used */
2464 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2465 fcb = NULL;
2466 }
Alex Dubov4363c2f2011-03-16 17:57:13 +00002467 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002468 }
2469
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002470 if (do_vlan)
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002471 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002472
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002473 /* Setup tx hardware time stamping if requested */
2474 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002475 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002476 fcb->ptp = 1;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002477 }
2478
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002479 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2480 DMA_TO_DEVICE);
2481 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2482 goto dma_map_err;
2483
Claudiu Manoila7312d52015-03-13 10:36:28 +02002484 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
Jan Ceuleers0977f812012-06-05 03:42:12 +00002486 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002487 * first TxBD points to the FCB and must have a data length of
2488 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2489 * the full frame length.
2490 */
2491 if (unlikely(do_tstamp)) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002492 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2493
2494 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2495 bufaddr += fcb_len;
2496 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2497 (skb_headlen(skb) - fcb_len);
2498
2499 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2500 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002501 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2502 } else {
2503 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002506 netdev_tx_sent_queue(txq, bytes_sent);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002507
Claudiu Manoild55398b2014-10-07 10:44:35 +03002508 gfar_wmb();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002509
Claudiu Manoila7312d52015-03-13 10:36:28 +02002510 txbdp_start->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002511
Claudiu Manoild55398b2014-10-07 10:44:35 +03002512 gfar_wmb(); /* force lstatus write before tx_skbuff */
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002513
2514 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2515
Dai Haruki4669bc92008-12-17 16:51:04 -08002516 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002517 * (wrapping if necessary)
2518 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002519 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002520 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002521
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002522 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002523
Claudiu Manoilbc602282015-05-06 18:07:29 +03002524 /* We can work in parallel with gfar_clean_tx_ring(), except
2525 * when modifying num_txbdfree. Note that we didn't grab the lock
2526 * when we were reading the num_txbdfree and checking for available
2527 * space, that's because outside of this function it can only grow.
2528 */
2529 spin_lock_bh(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002530 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002531 tx_queue->num_txbdfree -= (nr_txbds);
Claudiu Manoilbc602282015-05-06 18:07:29 +03002532 spin_unlock_bh(&tx_queue->txlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533
2534 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002535 * are full. We need to tell the kernel to stop sending us stuff.
2536 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002537 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002538 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002540 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 }
2542
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002544 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002546 return NETDEV_TX_OK;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002547
2548dma_map_err:
2549 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2550 if (do_tstamp)
2551 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2552 for (i = 0; i < nr_frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002553 lstatus = be32_to_cpu(txbdp->lstatus);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002554 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2555 break;
2556
Claudiu Manoila7312d52015-03-13 10:36:28 +02002557 lstatus &= ~BD_LFLAG(TXBD_READY);
2558 txbdp->lstatus = cpu_to_be32(lstatus);
2559 bufaddr = be32_to_cpu(txbdp->bufPtr);
2560 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002561 DMA_TO_DEVICE);
2562 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2563 }
2564 gfar_wmb();
2565 dev_kfree_skb_any(skb);
2566 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567}
2568
2569/* Stops the kernel queue, and halts the controller */
2570static int gfar_close(struct net_device *dev)
2571{
2572 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002573
Sebastian Siewiorab939902008-08-19 21:12:45 +02002574 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 stop_gfar(dev);
2576
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002577 /* Disconnect from the PHY */
2578 phy_disconnect(priv->phydev);
2579 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002581 gfar_free_irq(priv);
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 return 0;
2584}
2585
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002587static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002589 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590
2591 return 0;
2592}
2593
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2595{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002597 int frame_size = new_mtu + ETH_HLEN;
2598
Claudiu Manoil75354142015-07-13 16:22:06 +03002599 if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
Joe Perches59deab22011-06-14 08:57:47 +00002600 netif_err(priv, drv, dev, "Invalid MTU setting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 return -EINVAL;
2602 }
2603
Claudiu Manoil08511332014-02-24 12:13:45 +02002604 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2605 cpu_relax();
2606
Claudiu Manoil88302642014-02-24 12:13:43 +02002607 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 stop_gfar(dev);
2609
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 dev->mtu = new_mtu;
2611
Claudiu Manoil88302642014-02-24 12:13:43 +02002612 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 startup_gfar(dev);
2614
Claudiu Manoil08511332014-02-24 12:13:45 +02002615 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2616
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 return 0;
2618}
2619
Claudiu Manoil08511332014-02-24 12:13:45 +02002620void reset_gfar(struct net_device *ndev)
2621{
2622 struct gfar_private *priv = netdev_priv(ndev);
2623
2624 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2625 cpu_relax();
2626
2627 stop_gfar(ndev);
2628 startup_gfar(ndev);
2629
2630 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2631}
2632
Sebastian Siewiorab939902008-08-19 21:12:45 +02002633/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 * transmitted after a set amount of time.
2635 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002636 * starting over will fix the problem.
2637 */
2638static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002640 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002641 reset_task);
Claudiu Manoil08511332014-02-24 12:13:45 +02002642 reset_gfar(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643}
2644
Sebastian Siewiorab939902008-08-19 21:12:45 +02002645static void gfar_timeout(struct net_device *dev)
2646{
2647 struct gfar_private *priv = netdev_priv(dev);
2648
2649 dev->stats.tx_errors++;
2650 schedule_work(&priv->reset_task);
2651}
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653/* Interrupt Handler for Transmit complete */
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002654static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002656 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002657 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002658 struct gfar_private *priv = netdev_priv(dev);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002659 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002660 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002661 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002662 struct sk_buff *skb;
2663 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002664 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002665 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002666 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002667 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002668 int tqi = tx_queue->qindex;
2669 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002670 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002671 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002673 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002674 bdp = tx_queue->dirty_tx;
2675 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002676
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002677 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002678
Dai Haruki4669bc92008-12-17 16:51:04 -08002679 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002680
Jan Ceuleers0977f812012-06-05 03:42:12 +00002681 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002682 * Also, we need to dma_unmap_single() the TxPAL.
2683 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002684 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002685 nr_txbds = frags + 2;
2686 else
2687 nr_txbds = frags + 1;
2688
2689 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002690
Claudiu Manoila7312d52015-03-13 10:36:28 +02002691 lstatus = be32_to_cpu(lbdp->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002692
2693 /* Only clean completed frames */
2694 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002695 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 break;
2697
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002698 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002699 next = next_txbd(bdp, base, tx_ring_size);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002700 buflen = be16_to_cpu(next->length) +
2701 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002702 } else
Claudiu Manoila7312d52015-03-13 10:36:28 +02002703 buflen = be16_to_cpu(bdp->length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002704
Claudiu Manoila7312d52015-03-13 10:36:28 +02002705 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002706 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002707
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002708 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002709 struct skb_shared_hwtstamps shhwtstamps;
Scott Woodb4b67f22015-07-29 16:13:06 +03002710 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2711 ~0x7UL);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002712
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002713 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2714 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002715 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002716 skb_tstamp_tx(skb, &shhwtstamps);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002717 gfar_clear_txbd_status(bdp);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002718 bdp = next;
2719 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002720
Claudiu Manoila7312d52015-03-13 10:36:28 +02002721 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002722 bdp = next_txbd(bdp, base, tx_ring_size);
2723
2724 for (i = 0; i < frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002725 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2726 be16_to_cpu(bdp->length),
2727 DMA_TO_DEVICE);
2728 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002729 bdp = next_txbd(bdp, base, tx_ring_size);
2730 }
2731
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002732 bytes_sent += GFAR_CB(skb)->bytes_sent;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002733
Eric Dumazetacb600d2012-10-05 06:23:55 +00002734 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002735
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002736 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002737
2738 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002739 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002740
Dai Harukid080cd62008-04-09 19:37:51 -05002741 howmany++;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002742 spin_lock(&tx_queue->txlock);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002743 tx_queue->num_txbdfree += nr_txbds;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002744 spin_unlock(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
Dai Haruki4669bc92008-12-17 16:51:04 -08002747 /* If we freed a buffer, we can restart transmission, if necessary */
Claudiu Manoil08511332014-02-24 12:13:45 +02002748 if (tx_queue->num_txbdfree &&
2749 netif_tx_queue_stopped(txq) &&
2750 !(test_bit(GFAR_DOWN, &priv->state)))
2751 netif_wake_subqueue(priv->ndev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Dai Haruki4669bc92008-12-17 16:51:04 -08002753 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002754 tx_queue->skb_dirtytx = skb_dirtytx;
2755 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002757 netdev_tx_completed_queue(txq, howmany, bytes_sent);
Dai Harukid080cd62008-04-09 19:37:51 -05002758}
2759
Claudiu Manoil75354142015-07-13 16:22:06 +03002760static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002761{
Claudiu Manoil75354142015-07-13 16:22:06 +03002762 struct page *page;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002763 dma_addr_t addr;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002764
Claudiu Manoil75354142015-07-13 16:22:06 +03002765 page = dev_alloc_page();
2766 if (unlikely(!page))
2767 return false;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002768
Claudiu Manoil75354142015-07-13 16:22:06 +03002769 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2770 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2771 __free_page(page);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002772
Claudiu Manoil75354142015-07-13 16:22:06 +03002773 return false;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002774 }
2775
Claudiu Manoil75354142015-07-13 16:22:06 +03002776 rxb->dma = addr;
2777 rxb->page = page;
2778 rxb->page_offset = 0;
2779
2780 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781}
2782
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002783static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2784{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002785 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002786 struct gfar_extra_stats *estats = &priv->extra_stats;
2787
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002788 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002789 atomic64_inc(&estats->rx_alloc_err);
2790}
2791
2792static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2793 int alloc_cnt)
2794{
Claudiu Manoil75354142015-07-13 16:22:06 +03002795 struct rxbd8 *bdp;
2796 struct gfar_rx_buff *rxb;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002797 int i;
2798
2799 i = rx_queue->next_to_use;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002800 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002801 rxb = &rx_queue->rx_buff[i];
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002802
2803 while (alloc_cnt--) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002804 /* try reuse page */
2805 if (unlikely(!rxb->page)) {
2806 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002807 gfar_rx_alloc_err(rx_queue);
2808 break;
2809 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002810 }
2811
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002812 /* Setup the new RxBD */
Claudiu Manoil75354142015-07-13 16:22:06 +03002813 gfar_init_rxbdp(rx_queue, bdp,
2814 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002815
2816 /* Update to the next pointer */
Claudiu Manoil75354142015-07-13 16:22:06 +03002817 bdp++;
2818 rxb++;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002819
Claudiu Manoil75354142015-07-13 16:22:06 +03002820 if (unlikely(++i == rx_queue->rx_ring_size)) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002821 i = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +03002822 bdp = rx_queue->rx_bd_base;
2823 rxb = rx_queue->rx_buff;
2824 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002825 }
2826
2827 rx_queue->next_to_use = i;
Claudiu Manoil75354142015-07-13 16:22:06 +03002828 rx_queue->next_to_alloc = i;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002829}
2830
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002831static void count_errors(u32 lstatus, struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002833 struct gfar_private *priv = netdev_priv(ndev);
2834 struct net_device_stats *stats = &ndev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 struct gfar_extra_stats *estats = &priv->extra_stats;
2836
Jan Ceuleers0977f812012-06-05 03:42:12 +00002837 /* If the packet was truncated, none of the other errors matter */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002838 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 stats->rx_length_errors++;
2840
Paul Gortmaker212079d2013-02-12 15:38:19 -05002841 atomic64_inc(&estats->rx_trunc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842
2843 return;
2844 }
2845 /* Count the errors, if there were any */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002846 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 stats->rx_length_errors++;
2848
Claudiu Manoilf9660822015-07-13 16:22:04 +03002849 if (lstatus & BD_LFLAG(RXBD_LARGE))
Paul Gortmaker212079d2013-02-12 15:38:19 -05002850 atomic64_inc(&estats->rx_large);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 else
Paul Gortmaker212079d2013-02-12 15:38:19 -05002852 atomic64_inc(&estats->rx_short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002854 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 stats->rx_frame_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002856 atomic64_inc(&estats->rx_nonoctet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002858 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002859 atomic64_inc(&estats->rx_crcerr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 stats->rx_crc_errors++;
2861 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002862 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002863 atomic64_inc(&estats->rx_overrun);
Claudiu Manoilf9660822015-07-13 16:22:04 +03002864 stats->rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 }
2866}
2867
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002868irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869{
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002870 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2871 unsigned long flags;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002872 u32 imask, ievent;
2873
2874 ievent = gfar_read(&grp->regs->ievent);
2875
2876 if (unlikely(ievent & IEVENT_FGPI)) {
2877 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2878 return IRQ_HANDLED;
2879 }
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002880
2881 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2882 spin_lock_irqsave(&grp->grplock, flags);
2883 imask = gfar_read(&grp->regs->imask);
2884 imask &= IMASK_RX_DISABLED;
2885 gfar_write(&grp->regs->imask, imask);
2886 spin_unlock_irqrestore(&grp->grplock, flags);
2887 __napi_schedule(&grp->napi_rx);
2888 } else {
2889 /* Clear IEVENT, so interrupts aren't called again
2890 * because of the packets that have already arrived.
2891 */
2892 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2893 }
2894
2895 return IRQ_HANDLED;
2896}
2897
2898/* Interrupt Handler for Transmit complete */
2899static irqreturn_t gfar_transmit(int irq, void *grp_id)
2900{
2901 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2902 unsigned long flags;
2903 u32 imask;
2904
2905 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2906 spin_lock_irqsave(&grp->grplock, flags);
2907 imask = gfar_read(&grp->regs->imask);
2908 imask &= IMASK_TX_DISABLED;
2909 gfar_write(&grp->regs->imask, imask);
2910 spin_unlock_irqrestore(&grp->grplock, flags);
2911 __napi_schedule(&grp->napi_tx);
2912 } else {
2913 /* Clear IEVENT, so interrupts aren't called again
2914 * because of the packets that have already arrived.
2915 */
2916 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2917 }
2918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 return IRQ_HANDLED;
2920}
2921
Claudiu Manoil75354142015-07-13 16:22:06 +03002922static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2923 struct sk_buff *skb, bool first)
2924{
2925 unsigned int size = lstatus & BD_LENGTH_MASK;
2926 struct page *page = rxb->page;
2927
2928 /* Remove the FCS from the packet length */
2929 if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
2930 size -= ETH_FCS_LEN;
2931
2932 if (likely(first))
2933 skb_put(skb, size);
2934 else
2935 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2936 rxb->page_offset + RXBUF_ALIGNMENT,
2937 size, GFAR_RXB_TRUESIZE);
2938
2939 /* try reuse page */
2940 if (unlikely(page_count(page) != 1))
2941 return false;
2942
2943 /* change offset to the other half */
2944 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2945
2946 atomic_inc(&page->_count);
2947
2948 return true;
2949}
2950
2951static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2952 struct gfar_rx_buff *old_rxb)
2953{
2954 struct gfar_rx_buff *new_rxb;
2955 u16 nta = rxq->next_to_alloc;
2956
2957 new_rxb = &rxq->rx_buff[nta];
2958
2959 /* find next buf that can reuse a page */
2960 nta++;
2961 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2962
2963 /* copy page reference */
2964 *new_rxb = *old_rxb;
2965
2966 /* sync for use by the device */
2967 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2968 old_rxb->page_offset,
2969 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2970}
2971
2972static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2973 u32 lstatus, struct sk_buff *skb)
2974{
2975 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2976 struct page *page = rxb->page;
2977 bool first = false;
2978
2979 if (likely(!skb)) {
2980 void *buff_addr = page_address(page) + rxb->page_offset;
2981
2982 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2983 if (unlikely(!skb)) {
2984 gfar_rx_alloc_err(rx_queue);
2985 return NULL;
2986 }
2987 skb_reserve(skb, RXBUF_ALIGNMENT);
2988 first = true;
2989 }
2990
2991 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2992 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2993
2994 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2995 /* reuse the free half of the page */
2996 gfar_reuse_rx_page(rx_queue, rxb);
2997 } else {
2998 /* page cannot be reused, unmap it */
2999 dma_unmap_page(rx_queue->dev, rxb->dma,
3000 PAGE_SIZE, DMA_FROM_DEVICE);
3001 }
3002
3003 /* clear rxb content */
3004 rxb->page = NULL;
3005
3006 return skb;
3007}
3008
Kumar Gala0bbaf062005-06-20 10:54:21 -05003009static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3010{
3011 /* If valid headers were found, and valid sums
3012 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00003013 * checksumming is necessary. Otherwise, it is [FIXME]
3014 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003015 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3016 (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05003017 skb->ip_summed = CHECKSUM_UNNECESSARY;
3018 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003019 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003020}
3021
Jan Ceuleers0977f812012-06-05 03:42:12 +00003022/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003023static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003025 struct gfar_private *priv = netdev_priv(ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003026 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Dai Haruki2c2db482008-12-16 15:31:15 -08003028 /* fcb is at the beginning if exists */
3029 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030
Jan Ceuleers0977f812012-06-05 03:42:12 +00003031 /* Remove the FCB from the skb
3032 * Remove the padded bytes, if there are any
3033 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003034 if (priv->uses_rxfcb)
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003035 skb_pull(skb, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003036
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003037 /* Get receive timestamp from the skb */
3038 if (priv->hwts_rx_en) {
3039 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3040 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003041
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003042 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3043 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
3044 }
3045
3046 if (priv->padding)
3047 skb_pull(skb, priv->padding);
3048
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003049 if (ndev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08003050 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003051
Dai Haruki2c2db482008-12-16 15:31:15 -08003052 /* Tell the skb what kind of packet this is */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003053 skb->protocol = eth_type_trans(skb, ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003054
Patrick McHardyf6469682013-04-19 02:04:27 +00003055 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07003056 * Even if vlan rx accel is disabled, on some chips
3057 * RXFCB_VLN is pseudo randomly set.
3058 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003059 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003060 be16_to_cpu(fcb->flags) & RXFCB_VLN)
3061 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3062 be16_to_cpu(fcb->vlctl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063}
3064
3065/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00003066 * until the budget/quota has been reached. Returns the number
3067 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003069int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003071 struct net_device *ndev = rx_queue->ndev;
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003072 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil75354142015-07-13 16:22:06 +03003073 struct rxbd8 *bdp;
3074 int i, howmany = 0;
3075 struct sk_buff *skb = rx_queue->skb;
3076 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3077 unsigned int total_bytes = 0, total_pkts = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
3079 /* Get the first full descriptor */
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003080 i = rx_queue->next_to_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003082 while (rx_work_limit--) {
Claudiu Manoilf9660822015-07-13 16:22:04 +03003083 u32 lstatus;
Dai Haruki2c2db482008-12-16 15:31:15 -08003084
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003085 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3086 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3087 cleaned_cnt = 0;
3088 }
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003089
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003090 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoilf9660822015-07-13 16:22:04 +03003091 lstatus = be32_to_cpu(bdp->lstatus);
3092 if (lstatus & BD_LFLAG(RXBD_EMPTY))
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003093 break;
3094
3095 /* order rx buffer descriptor reads */
Scott Wood3b6330c2007-05-16 15:06:59 -05003096 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05003097
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003098 /* fetch next to clean buffer from the ring */
Claudiu Manoil75354142015-07-13 16:22:06 +03003099 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3100 if (unlikely(!skb))
3101 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102
Claudiu Manoil75354142015-07-13 16:22:06 +03003103 cleaned_cnt++;
3104 howmany++;
Andy Fleming81183052008-11-12 10:07:11 -06003105
Claudiu Manoil75354142015-07-13 16:22:06 +03003106 if (unlikely(++i == rx_queue->rx_ring_size))
3107 i = 0;
Anton Vorontsov63b88b92010-06-11 10:51:03 +00003108
Claudiu Manoil75354142015-07-13 16:22:06 +03003109 rx_queue->next_to_clean = i;
3110
3111 /* fetch next buffer if not the last in frame */
3112 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3113 continue;
3114
3115 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003116 count_errors(lstatus, ndev);
Andy Fleming815b97c2008-04-22 17:18:29 -05003117
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003118 /* discard faulty buffer */
3119 dev_kfree_skb(skb);
Claudiu Manoil75354142015-07-13 16:22:06 +03003120 skb = NULL;
3121 rx_queue->stats.rx_dropped++;
3122 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 }
3124
Claudiu Manoil75354142015-07-13 16:22:06 +03003125 /* Increment the number of packets */
3126 total_pkts++;
3127 total_bytes += skb->len;
3128
3129 skb_record_rx_queue(skb, rx_queue->qindex);
3130
3131 gfar_process_frame(ndev, skb);
3132
3133 /* Send the packet up the stack */
3134 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3135
3136 skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 }
3138
Claudiu Manoil75354142015-07-13 16:22:06 +03003139 /* Store incomplete frames for completion */
3140 rx_queue->skb = skb;
3141
3142 rx_queue->stats.rx_packets += total_pkts;
3143 rx_queue->stats.rx_bytes += total_bytes;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003144
3145 if (cleaned_cnt)
3146 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3147
3148 /* Update Last Free RxBD pointer for LFC */
3149 if (unlikely(priv->tx_actual_en)) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003150 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3151
3152 gfar_write(rx_queue->rfbptr, bdp_dma);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 return howmany;
3156}
3157
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003158static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003159{
3160 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003161 container_of(napi, struct gfar_priv_grp, napi_rx);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003162 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003163 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003164 int work_done = 0;
3165
3166 /* Clear IEVENT, so interrupts aren't called again
3167 * because of the packets that have already arrived
3168 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003169 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003170
3171 work_done = gfar_clean_rx_ring(rx_queue, budget);
3172
3173 if (work_done < budget) {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003174 u32 imask;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003175 napi_complete(napi);
3176 /* Clear the halt bit in RSTAT */
3177 gfar_write(&regs->rstat, gfargrp->rstat);
3178
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003179 spin_lock_irq(&gfargrp->grplock);
3180 imask = gfar_read(&regs->imask);
3181 imask |= IMASK_RX_DEFAULT;
3182 gfar_write(&regs->imask, imask);
3183 spin_unlock_irq(&gfargrp->grplock);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003184 }
3185
3186 return work_done;
3187}
3188
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003189static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003191 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003192 container_of(napi, struct gfar_priv_grp, napi_tx);
3193 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003194 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003195 u32 imask;
3196
3197 /* Clear IEVENT, so interrupts aren't called again
3198 * because of the packets that have already arrived
3199 */
3200 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3201
3202 /* run Tx cleanup to completion */
3203 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3204 gfar_clean_tx_ring(tx_queue);
3205
3206 napi_complete(napi);
3207
3208 spin_lock_irq(&gfargrp->grplock);
3209 imask = gfar_read(&regs->imask);
3210 imask |= IMASK_TX_DEFAULT;
3211 gfar_write(&regs->imask, imask);
3212 spin_unlock_irq(&gfargrp->grplock);
3213
3214 return 0;
3215}
3216
3217static int gfar_poll_rx(struct napi_struct *napi, int budget)
3218{
3219 struct gfar_priv_grp *gfargrp =
3220 container_of(napi, struct gfar_priv_grp, napi_rx);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003221 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003222 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003223 struct gfar_priv_rx_q *rx_queue = NULL;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003224 int work_done = 0, work_done_per_q = 0;
Claudiu Manoil39c0a0d2013-03-21 03:12:13 +00003225 int i, budget_per_q = 0;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003226 unsigned long rstat_rxf;
3227 int num_act_queues;
Dai Harukid080cd62008-04-09 19:37:51 -05003228
Dai Haruki8c7396a2008-12-17 16:52:00 -08003229 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00003230 * because of the packets that have already arrived
3231 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003232 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08003233
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003234 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3235
3236 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3237 if (num_act_queues)
3238 budget_per_q = budget/num_act_queues;
3239
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003240 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3241 /* skip queue if not active */
3242 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3243 continue;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003244
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003245 rx_queue = priv->rx_queue[i];
3246 work_done_per_q =
3247 gfar_clean_rx_ring(rx_queue, budget_per_q);
3248 work_done += work_done_per_q;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003249
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003250 /* finished processing this queue */
3251 if (work_done_per_q < budget_per_q) {
3252 /* clear active queue hw indication */
3253 gfar_write(&regs->rstat,
3254 RSTAT_CLEAR_RXF0 >> i);
3255 num_act_queues--;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003256
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003257 if (!num_act_queues)
3258 break;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003259 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003260 }
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003261
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003262 if (!num_act_queues) {
3263 u32 imask;
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003264 napi_complete(napi);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003265
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003266 /* Clear the halt bit in RSTAT */
3267 gfar_write(&regs->rstat, gfargrp->rstat);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003268
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003269 spin_lock_irq(&gfargrp->grplock);
3270 imask = gfar_read(&regs->imask);
3271 imask |= IMASK_RX_DEFAULT;
3272 gfar_write(&regs->imask, imask);
3273 spin_unlock_irq(&gfargrp->grplock);
Dai Harukid080cd62008-04-09 19:37:51 -05003274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003276 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003279static int gfar_poll_tx(struct napi_struct *napi, int budget)
3280{
3281 struct gfar_priv_grp *gfargrp =
3282 container_of(napi, struct gfar_priv_grp, napi_tx);
3283 struct gfar_private *priv = gfargrp->priv;
3284 struct gfar __iomem *regs = gfargrp->regs;
3285 struct gfar_priv_tx_q *tx_queue = NULL;
3286 int has_tx_work = 0;
3287 int i;
3288
3289 /* Clear IEVENT, so interrupts aren't called again
3290 * because of the packets that have already arrived
3291 */
3292 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3293
3294 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3295 tx_queue = priv->tx_queue[i];
3296 /* run Tx cleanup to completion */
3297 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3298 gfar_clean_tx_ring(tx_queue);
3299 has_tx_work = 1;
3300 }
3301 }
3302
3303 if (!has_tx_work) {
3304 u32 imask;
3305 napi_complete(napi);
3306
3307 spin_lock_irq(&gfargrp->grplock);
3308 imask = gfar_read(&regs->imask);
3309 imask |= IMASK_TX_DEFAULT;
3310 gfar_write(&regs->imask, imask);
3311 spin_unlock_irq(&gfargrp->grplock);
3312 }
3313
3314 return 0;
3315}
3316
3317
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003318#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00003319/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003320 * without having to re-enable interrupts. It's not called while
3321 * the interrupt routine is executing.
3322 */
3323static void gfar_netpoll(struct net_device *dev)
3324{
3325 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00003326 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003327
3328 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003329 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003330 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003331 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3332
3333 disable_irq(gfar_irq(grp, TX)->irq);
3334 disable_irq(gfar_irq(grp, RX)->irq);
3335 disable_irq(gfar_irq(grp, ER)->irq);
3336 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3337 enable_irq(gfar_irq(grp, ER)->irq);
3338 enable_irq(gfar_irq(grp, RX)->irq);
3339 enable_irq(gfar_irq(grp, TX)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003340 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003341 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003342 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003343 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3344
3345 disable_irq(gfar_irq(grp, TX)->irq);
3346 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3347 enable_irq(gfar_irq(grp, TX)->irq);
Anton Vorontsov43de0042009-12-09 02:52:19 -08003348 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003349 }
3350}
3351#endif
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003354static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003356 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
3358 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003359 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003362 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003363 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
3365 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003366 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003367 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003369 /* Check for errors */
3370 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003371 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372
3373 return IRQ_HANDLED;
3374}
3375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376/* Called every time the controller might need to be made
3377 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003378 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 * function converts those variables into the appropriate
3380 * register values, and can bring down the device if needed.
3381 */
3382static void adjust_link(struct net_device *dev)
3383{
3384 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003385 struct phy_device *phydev = priv->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003387 if (unlikely(phydev->link != priv->oldlink ||
Guenter Roeck0ae93b22015-03-02 12:03:27 -08003388 (phydev->link && (phydev->duplex != priv->oldduplex ||
3389 phydev->speed != priv->oldspeed))))
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003390 gfar_update_link_state(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003391}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392
3393/* Update the hash table based on the current list of multicast
3394 * addresses we subscribe to. Also, change the promiscuity of
3395 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003396 * whenever dev->flags is changed
3397 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398static void gfar_set_multi(struct net_device *dev)
3399{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003400 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003402 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 u32 tempval;
3404
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003405 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 /* Set RCTRL to PROM */
3407 tempval = gfar_read(&regs->rctrl);
3408 tempval |= RCTRL_PROM;
3409 gfar_write(&regs->rctrl, tempval);
3410 } else {
3411 /* Set RCTRL to not PROM */
3412 tempval = gfar_read(&regs->rctrl);
3413 tempval &= ~(RCTRL_PROM);
3414 gfar_write(&regs->rctrl, tempval);
3415 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003416
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003417 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003419 gfar_write(&regs->igaddr0, 0xffffffff);
3420 gfar_write(&regs->igaddr1, 0xffffffff);
3421 gfar_write(&regs->igaddr2, 0xffffffff);
3422 gfar_write(&regs->igaddr3, 0xffffffff);
3423 gfar_write(&regs->igaddr4, 0xffffffff);
3424 gfar_write(&regs->igaddr5, 0xffffffff);
3425 gfar_write(&regs->igaddr6, 0xffffffff);
3426 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 gfar_write(&regs->gaddr0, 0xffffffff);
3428 gfar_write(&regs->gaddr1, 0xffffffff);
3429 gfar_write(&regs->gaddr2, 0xffffffff);
3430 gfar_write(&regs->gaddr3, 0xffffffff);
3431 gfar_write(&regs->gaddr4, 0xffffffff);
3432 gfar_write(&regs->gaddr5, 0xffffffff);
3433 gfar_write(&regs->gaddr6, 0xffffffff);
3434 gfar_write(&regs->gaddr7, 0xffffffff);
3435 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003436 int em_num;
3437 int idx;
3438
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003440 gfar_write(&regs->igaddr0, 0x0);
3441 gfar_write(&regs->igaddr1, 0x0);
3442 gfar_write(&regs->igaddr2, 0x0);
3443 gfar_write(&regs->igaddr3, 0x0);
3444 gfar_write(&regs->igaddr4, 0x0);
3445 gfar_write(&regs->igaddr5, 0x0);
3446 gfar_write(&regs->igaddr6, 0x0);
3447 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 gfar_write(&regs->gaddr0, 0x0);
3449 gfar_write(&regs->gaddr1, 0x0);
3450 gfar_write(&regs->gaddr2, 0x0);
3451 gfar_write(&regs->gaddr3, 0x0);
3452 gfar_write(&regs->gaddr4, 0x0);
3453 gfar_write(&regs->gaddr5, 0x0);
3454 gfar_write(&regs->gaddr6, 0x0);
3455 gfar_write(&regs->gaddr7, 0x0);
3456
Andy Fleming7f7f5312005-11-11 12:38:59 -06003457 /* If we have extended hash tables, we need to
3458 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003459 * setting them
3460 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003461 if (priv->extended_hash) {
3462 em_num = GFAR_EM_NUM + 1;
3463 gfar_clear_exact_match(dev);
3464 idx = 1;
3465 } else {
3466 idx = 0;
3467 em_num = 0;
3468 }
3469
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003470 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 return;
3472
3473 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003474 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003475 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003476 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003477 idx++;
3478 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003479 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 }
3481 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482}
3483
Andy Fleming7f7f5312005-11-11 12:38:59 -06003484
3485/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003486 * don't interfere with normal reception
3487 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003488static void gfar_clear_exact_match(struct net_device *dev)
3489{
3490 int idx;
Joe Perches6a3c9102011-11-16 09:38:02 +00003491 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003492
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003493 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003494 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003495}
3496
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497/* Set the appropriate hash bit for the given addr */
3498/* The algorithm works like so:
3499 * 1) Take the Destination Address (ie the multicast address), and
3500 * do a CRC on it (little endian), and reverse the bits of the
3501 * result.
3502 * 2) Use the 8 most significant bits as a hash into a 256-entry
3503 * table. The table is controlled through 8 32-bit registers:
3504 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3505 * gaddr7. This means that the 3 most significant bits in the
3506 * hash index which gaddr register to use, and the 5 other bits
3507 * indicate which bit (assuming an IBM numbering scheme, which
3508 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003509 * the entry.
3510 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3512{
3513 u32 tempval;
3514 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c9102011-11-16 09:38:02 +00003515 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003516 int width = priv->hash_width;
3517 u8 whichbit = (result >> (32 - width)) & 0x1f;
3518 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 u32 value = (1 << (31-whichbit));
3520
Kumar Gala0bbaf062005-06-20 10:54:21 -05003521 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003523 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524}
3525
Andy Fleming7f7f5312005-11-11 12:38:59 -06003526
3527/* There are multiple MAC Address register pairs on some controllers
3528 * This function sets the numth pair to a given address
3529 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003530static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3531 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003532{
3533 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003534 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003535 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003536 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003537
3538 macptr += num*2;
3539
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003540 /* For a station address of 0x12345678ABCD in transmission
3541 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3542 * MACnADDR2 is set to 0x34120000.
Jan Ceuleers0977f812012-06-05 03:42:12 +00003543 */
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003544 tempval = (addr[5] << 24) | (addr[4] << 16) |
3545 (addr[3] << 8) | addr[2];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003546
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003547 gfar_write(macptr, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003548
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003549 tempval = (addr[1] << 24) | (addr[0] << 16);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003550
3551 gfar_write(macptr+1, tempval);
3552}
3553
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003555static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003557 struct gfar_priv_grp *gfargrp = grp_id;
3558 struct gfar __iomem *regs = gfargrp->regs;
3559 struct gfar_private *priv= gfargrp->priv;
3560 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561
3562 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003563 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564
3565 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003566 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003567
3568 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003569 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003570 (events & IEVENT_MAG))
3571 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572
3573 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003574 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003575 netdev_dbg(dev,
3576 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003577 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578
3579 /* Update the error counters */
3580 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003581 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582
3583 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003584 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003586 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 if (events & IEVENT_XFUN) {
Joe Perches59deab22011-06-14 08:57:47 +00003588 netif_dbg(priv, tx_err, dev,
3589 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003590 dev->stats.tx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003591 atomic64_inc(&priv->extra_stats.tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
Claudiu Manoilbc602282015-05-06 18:07:29 +03003593 schedule_work(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 }
Joe Perches59deab22011-06-14 08:57:47 +00003595 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 }
3597 if (events & IEVENT_BSY) {
Claudiu Manoil1de65a52015-10-23 11:42:00 +03003598 dev->stats.rx_over_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003599 atomic64_inc(&priv->extra_stats.rx_bsy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
Joe Perches59deab22011-06-14 08:57:47 +00003601 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3602 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 }
3604 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003605 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003606 atomic64_inc(&priv->extra_stats.rx_babr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Joe Perches59deab22011-06-14 08:57:47 +00003608 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 }
3610 if (events & IEVENT_EBERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003611 atomic64_inc(&priv->extra_stats.eberr);
Joe Perches59deab22011-06-14 08:57:47 +00003612 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 }
Joe Perches59deab22011-06-14 08:57:47 +00003614 if (events & IEVENT_RXC)
3615 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
3617 if (events & IEVENT_BABT) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003618 atomic64_inc(&priv->extra_stats.tx_babt);
Joe Perches59deab22011-06-14 08:57:47 +00003619 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 }
3621 return IRQ_HANDLED;
3622}
3623
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003624static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3625{
3626 struct phy_device *phydev = priv->phydev;
3627 u32 val = 0;
3628
3629 if (!phydev->duplex)
3630 return val;
3631
3632 if (!priv->pause_aneg_en) {
3633 if (priv->tx_pause_en)
3634 val |= MACCFG1_TX_FLOW;
3635 if (priv->rx_pause_en)
3636 val |= MACCFG1_RX_FLOW;
3637 } else {
3638 u16 lcl_adv, rmt_adv;
3639 u8 flowctrl;
3640 /* get link partner capabilities */
3641 rmt_adv = 0;
3642 if (phydev->pause)
3643 rmt_adv = LPA_PAUSE_CAP;
3644 if (phydev->asym_pause)
3645 rmt_adv |= LPA_PAUSE_ASYM;
3646
Pavaluca Matei-B4661043ef8d22014-10-27 10:42:43 +02003647 lcl_adv = 0;
3648 if (phydev->advertising & ADVERTISED_Pause)
3649 lcl_adv |= ADVERTISE_PAUSE_CAP;
3650 if (phydev->advertising & ADVERTISED_Asym_Pause)
3651 lcl_adv |= ADVERTISE_PAUSE_ASYM;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003652
3653 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3654 if (flowctrl & FLOW_CTRL_TX)
3655 val |= MACCFG1_TX_FLOW;
3656 if (flowctrl & FLOW_CTRL_RX)
3657 val |= MACCFG1_RX_FLOW;
3658 }
3659
3660 return val;
3661}
3662
3663static noinline void gfar_update_link_state(struct gfar_private *priv)
3664{
3665 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3666 struct phy_device *phydev = priv->phydev;
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003667 struct gfar_priv_rx_q *rx_queue = NULL;
3668 int i;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003669
3670 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3671 return;
3672
3673 if (phydev->link) {
3674 u32 tempval1 = gfar_read(&regs->maccfg1);
3675 u32 tempval = gfar_read(&regs->maccfg2);
3676 u32 ecntrl = gfar_read(&regs->ecntrl);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003677 u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003678
3679 if (phydev->duplex != priv->oldduplex) {
3680 if (!(phydev->duplex))
3681 tempval &= ~(MACCFG2_FULL_DUPLEX);
3682 else
3683 tempval |= MACCFG2_FULL_DUPLEX;
3684
3685 priv->oldduplex = phydev->duplex;
3686 }
3687
3688 if (phydev->speed != priv->oldspeed) {
3689 switch (phydev->speed) {
3690 case 1000:
3691 tempval =
3692 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3693
3694 ecntrl &= ~(ECNTRL_R100);
3695 break;
3696 case 100:
3697 case 10:
3698 tempval =
3699 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3700
3701 /* Reduced mode distinguishes
3702 * between 10 and 100
3703 */
3704 if (phydev->speed == SPEED_100)
3705 ecntrl |= ECNTRL_R100;
3706 else
3707 ecntrl &= ~(ECNTRL_R100);
3708 break;
3709 default:
3710 netif_warn(priv, link, priv->ndev,
3711 "Ack! Speed (%d) is not 10/100/1000!\n",
3712 phydev->speed);
3713 break;
3714 }
3715
3716 priv->oldspeed = phydev->speed;
3717 }
3718
3719 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3720 tempval1 |= gfar_get_flowctrl_cfg(priv);
3721
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003722 /* Turn last free buffer recording on */
3723 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3724 for (i = 0; i < priv->num_rx_queues; i++) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003725 u32 bdp_dma;
3726
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003727 rx_queue = priv->rx_queue[i];
Scott Woodb4b67f22015-07-29 16:13:06 +03003728 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3729 gfar_write(rx_queue->rfbptr, bdp_dma);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003730 }
3731
3732 priv->tx_actual_en = 1;
3733 }
3734
3735 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3736 priv->tx_actual_en = 0;
3737
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003738 gfar_write(&regs->maccfg1, tempval1);
3739 gfar_write(&regs->maccfg2, tempval);
3740 gfar_write(&regs->ecntrl, ecntrl);
3741
3742 if (!priv->oldlink)
3743 priv->oldlink = 1;
3744
3745 } else if (priv->oldlink) {
3746 priv->oldlink = 0;
3747 priv->oldspeed = 0;
3748 priv->oldduplex = -1;
3749 }
3750
3751 if (netif_msg_link(priv))
3752 phy_print_status(phydev);
3753}
3754
Fabian Frederick94e5a2a2015-03-17 19:37:34 +01003755static const struct of_device_id gfar_match[] =
Andy Flemingb31a1d82008-12-16 15:29:15 -08003756{
3757 {
3758 .type = "network",
3759 .compatible = "gianfar",
3760 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003761 {
3762 .compatible = "fsl,etsec2",
3763 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003764 {},
3765};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003766MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003767
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003769static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003770 .driver = {
3771 .name = "fsl-gianfar",
Grant Likely40182942010-04-13 16:13:02 -07003772 .pm = GFAR_PM_OPS,
3773 .of_match_table = gfar_match,
3774 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 .probe = gfar_probe,
3776 .remove = gfar_remove,
3777};
3778
Axel Lindb62f682011-11-27 16:44:17 +00003779module_platform_driver(gfar_driver);