blob: 957bfc220978479a5ccee32b58ae26d4236fe939 [file] [log] [blame]
Jan Ceuleers0977f812012-06-05 03:42:12 +00001/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06004 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Claudiu Manoil20862782014-02-17 12:53:14 +020012 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080028 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050033 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050038 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040042 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020044 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050045 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
Joe Perches59deab22011-06-14 08:57:47 +000064#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050077#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/spinlock.h>
79#include <linux/mm.h>
Rob Herring5af50732013-09-17 14:28:33 -050080#include <linux/of_address.h>
81#include <linux/of_irq.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030091#ifdef CONFIG_PPC
Anton Vorontsov7d350972010-06-30 06:39:12 +000092#include <asm/reg.h>
Claudiu Manoil2969b1f2013-10-09 20:20:41 +030093#include <asm/mpc85xx.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030094#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/irq.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080096#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/dma-mapping.h>
99#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400100#include <linux/mii.h>
101#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -0800102#include <linux/phy_fixed.h>
103#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700104#include <linux/of_net.h>
Claudiu Manoilfd31a952014-10-07 10:44:31 +0300105#include <linux/of_address.h>
106#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Abhimanyu8fcc6032015-10-27 14:17:43 +0530110#define TX_TIMEOUT (5*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Claudiu Manoil75354142015-07-13 16:22:06 +0300112const char gfar_driver_version[] = "2.0";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static int gfar_enet_open(struct net_device *dev);
115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200116static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300119static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
120 int alloc_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int gfar_set_mac_address(struct net_device *dev);
122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100123static irqreturn_t gfar_error(int irq, void *dev_id);
124static irqreturn_t gfar_transmit(int irq, void *dev_id);
125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static void adjust_link(struct net_device *dev);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +0300127static noinline void gfar_update_link_state(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700129static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600130static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400131static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static void gfar_set_multi(struct net_device *dev);
133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500134static void gfar_configure_serdes(struct net_device *dev);
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200135static int gfar_poll_rx(struct napi_struct *napi, int budget);
136static int gfar_poll_tx(struct napi_struct *napi, int budget);
137static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300139#ifdef CONFIG_NET_POLL_CONTROLLER
140static void gfar_netpoll(struct net_device *dev);
141#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000142int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
Claudiu Manoilc233cf402013-03-19 07:40:02 +0000143static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300144static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
Claudiu Manoilc10650b2014-02-17 12:53:18 +0200145static void gfar_halt_nodisable(struct gfar_private *priv);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600146static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800147static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000149static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151MODULE_AUTHOR("Freescale Semiconductor, Inc");
152MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153MODULE_LICENSE("GPL");
154
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000155static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000156 dma_addr_t buf)
157{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000158 u32 lstatus;
159
Claudiu Manoila7312d52015-03-13 10:36:28 +0200160 bdp->bufPtr = cpu_to_be32(buf);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000161
162 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000163 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000164 lstatus |= BD_LFLAG(RXBD_WRAP);
165
Claudiu Manoild55398b2014-10-07 10:44:35 +0300166 gfar_wmb();
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000167
Claudiu Manoila7312d52015-03-13 10:36:28 +0200168 bdp->lstatus = cpu_to_be32(lstatus);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000169}
170
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300171static void gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000172{
Anton Vorontsov87283272009-10-12 06:00:39 +0000173 struct gfar_private *priv = netdev_priv(ndev);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200174 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000175 struct gfar_priv_tx_q *tx_queue = NULL;
176 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000177 struct txbd8 *txbdp;
Kevin Hao03366a332014-12-24 14:05:45 +0800178 u32 __iomem *rfbptr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000179 int i, j;
Anton Vorontsov87283272009-10-12 06:00:39 +0000180
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000181 for (i = 0; i < priv->num_tx_queues; i++) {
182 tx_queue = priv->tx_queue[i];
183 /* Initialize some variables in our dev structure */
184 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
185 tx_queue->dirty_tx = tx_queue->tx_bd_base;
186 tx_queue->cur_tx = tx_queue->tx_bd_base;
187 tx_queue->skb_curtx = 0;
188 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000189
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000190 /* Initialize Transmit Descriptor Ring */
191 txbdp = tx_queue->tx_bd_base;
192 for (j = 0; j < tx_queue->tx_ring_size; j++) {
193 txbdp->lstatus = 0;
194 txbdp->bufPtr = 0;
195 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000196 }
197
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000198 /* Set the last descriptor in the ring to indicate wrap */
199 txbdp--;
Claudiu Manoila7312d52015-03-13 10:36:28 +0200200 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
201 TXBD_WRAP);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000202 }
203
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200204 rfbptr = &regs->rfbptr0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000205 for (i = 0; i < priv->num_rx_queues; i++) {
206 rx_queue = priv->rx_queue[i];
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000207
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300208 rx_queue->next_to_clean = 0;
209 rx_queue->next_to_use = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +0300210 rx_queue->next_to_alloc = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000211
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300212 /* make sure next_to_clean != next_to_use after this
213 * by leaving at least 1 unused descriptor
214 */
215 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000216
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200217 rx_queue->rfbptr = rfbptr;
218 rfbptr += 2;
Anton Vorontsov87283272009-10-12 06:00:39 +0000219 }
Anton Vorontsov87283272009-10-12 06:00:39 +0000220}
221
222static int gfar_alloc_skb_resources(struct net_device *ndev)
223{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000224 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000225 dma_addr_t addr;
Claudiu Manoil75354142015-07-13 16:22:06 +0300226 int i, j;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000227 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil369ec162013-02-14 05:00:02 +0000228 struct device *dev = priv->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000229 struct gfar_priv_tx_q *tx_queue = NULL;
230 struct gfar_priv_rx_q *rx_queue = NULL;
231
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000232 priv->total_tx_ring_size = 0;
233 for (i = 0; i < priv->num_tx_queues; i++)
234 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
235
236 priv->total_rx_ring_size = 0;
237 for (i = 0; i < priv->num_rx_queues; i++)
238 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000239
240 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000241 vaddr = dma_alloc_coherent(dev,
Joe Perchesd0320f72013-03-14 13:07:21 +0000242 (priv->total_tx_ring_size *
243 sizeof(struct txbd8)) +
244 (priv->total_rx_ring_size *
245 sizeof(struct rxbd8)),
246 &addr, GFP_KERNEL);
247 if (!vaddr)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000248 return -ENOMEM;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000249
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000250 for (i = 0; i < priv->num_tx_queues; i++) {
251 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000252 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000253 tx_queue->tx_bd_dma_base = addr;
254 tx_queue->dev = ndev;
255 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000256 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
257 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000258 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000259
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000260 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000261 for (i = 0; i < priv->num_rx_queues; i++) {
262 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000263 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000264 rx_queue->rx_bd_dma_base = addr;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300265 rx_queue->ndev = ndev;
Claudiu Manoil75354142015-07-13 16:22:06 +0300266 rx_queue->dev = dev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000267 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
268 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000269 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000270
271 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000272 for (i = 0; i < priv->num_tx_queues; i++) {
273 tx_queue = priv->tx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000274 tx_queue->tx_skbuff =
275 kmalloc_array(tx_queue->tx_ring_size,
276 sizeof(*tx_queue->tx_skbuff),
277 GFP_KERNEL);
278 if (!tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000279 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000280
Claudiu Manoil75354142015-07-13 16:22:06 +0300281 for (j = 0; j < tx_queue->tx_ring_size; j++)
282 tx_queue->tx_skbuff[j] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000283 }
284
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000285 for (i = 0; i < priv->num_rx_queues; i++) {
286 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +0300287 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
288 sizeof(*rx_queue->rx_buff),
289 GFP_KERNEL);
290 if (!rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000291 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000292 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000293
Claudiu Manoil76f31e82015-07-13 16:22:03 +0300294 gfar_init_bds(ndev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000295
296 return 0;
297
298cleanup:
299 free_skb_resources(priv);
300 return -ENOMEM;
301}
302
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000303static void gfar_init_tx_rx_base(struct gfar_private *priv)
304{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000305 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000306 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000307 int i;
308
309 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000310 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000311 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000312 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000313 }
314
315 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000316 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000317 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000318 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000319 }
320}
321
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200322static void gfar_init_rqprm(struct gfar_private *priv)
323{
324 struct gfar __iomem *regs = priv->gfargrp[0].regs;
325 u32 __iomem *baddr;
326 int i;
327
328 baddr = &regs->rqprm0;
329 for (i = 0; i < priv->num_rx_queues; i++) {
330 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
331 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
332 baddr++;
333 }
334}
335
Claudiu Manoil75354142015-07-13 16:22:06 +0300336static void gfar_rx_offload_en(struct gfar_private *priv)
Claudiu Manoil88302642014-02-24 12:13:43 +0200337{
Claudiu Manoil88302642014-02-24 12:13:43 +0200338 /* set this when rx hw offload (TOE) functions are being used */
339 priv->uses_rxfcb = 0;
340
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 priv->uses_rxfcb = 1;
343
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300344 if (priv->hwts_rx_en || priv->rx_filer_enable)
Claudiu Manoil88302642014-02-24 12:13:43 +0200345 priv->uses_rxfcb = 1;
Claudiu Manoil88302642014-02-24 12:13:43 +0200346}
347
Claudiu Manoila328ac92014-02-24 12:13:42 +0200348static void gfar_mac_rx_config(struct gfar_private *priv)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000349{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000350 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000351 u32 rctrl = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000352
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000353 if (priv->rx_filer_enable) {
Claudiu Manoil15bf1762015-10-23 11:41:59 +0300354 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000355 /* Program the RIR0 reg with the required distribution */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200356 if (priv->poll_mode == GFAR_SQ_POLLING)
357 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
358 else /* GFAR_MQ_POLLING */
359 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000360 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000361
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000362 /* Restore PROMISC mode */
Claudiu Manoila328ac92014-02-24 12:13:42 +0200363 if (priv->ndev->flags & IFF_PROMISC)
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000364 rctrl |= RCTRL_PROM;
365
Claudiu Manoil88302642014-02-24 12:13:43 +0200366 if (priv->ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000367 rctrl |= RCTRL_CHECKSUMMING;
368
Claudiu Manoil88302642014-02-24 12:13:43 +0200369 if (priv->extended_hash)
370 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000371
372 if (priv->padding) {
373 rctrl &= ~RCTRL_PAL_MASK;
374 rctrl |= RCTRL_PADDING(priv->padding);
375 }
376
Manfred Rudigier97553f72010-06-11 01:49:05 +0000377 /* Enable HW time stamping if requested from user space */
Claudiu Manoil88302642014-02-24 12:13:43 +0200378 if (priv->hwts_rx_en)
Manfred Rudigier97553f72010-06-11 01:49:05 +0000379 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
380
Claudiu Manoil88302642014-02-24 12:13:43 +0200381 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000383
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200384 /* Clear the LFC bit */
385 gfar_write(&regs->rctrl, rctrl);
386 /* Init flow control threshold values */
387 gfar_init_rqprm(priv);
388 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
389 rctrl |= RCTRL_LFC;
390
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000391 /* Init rctrl based on our settings */
392 gfar_write(&regs->rctrl, rctrl);
Claudiu Manoila328ac92014-02-24 12:13:42 +0200393}
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000394
Claudiu Manoila328ac92014-02-24 12:13:42 +0200395static void gfar_mac_tx_config(struct gfar_private *priv)
396{
397 struct gfar __iomem *regs = priv->gfargrp[0].regs;
398 u32 tctrl = 0;
399
400 if (priv->ndev->features & NETIF_F_IP_CSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000401 tctrl |= TCTRL_INIT_CSUM;
402
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000403 if (priv->prio_sched_en)
404 tctrl |= TCTRL_TXSCHED_PRIO;
405 else {
406 tctrl |= TCTRL_TXSCHED_WRRS;
407 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
408 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
409 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000410
Claudiu Manoil88302642014-02-24 12:13:43 +0200411 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
412 tctrl |= TCTRL_VLINS;
413
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000414 gfar_write(&regs->tctrl, tctrl);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000415}
416
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200417static void gfar_configure_coalescing(struct gfar_private *priv,
418 unsigned long tx_mask, unsigned long rx_mask)
419{
420 struct gfar __iomem *regs = priv->gfargrp[0].regs;
421 u32 __iomem *baddr;
422
423 if (priv->mode == MQ_MG_MODE) {
424 int i = 0;
425
426 baddr = &regs->txic0;
427 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
428 gfar_write(baddr + i, 0);
429 if (likely(priv->tx_queue[i]->txcoalescing))
430 gfar_write(baddr + i, priv->tx_queue[i]->txic);
431 }
432
433 baddr = &regs->rxic0;
434 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
435 gfar_write(baddr + i, 0);
436 if (likely(priv->rx_queue[i]->rxcoalescing))
437 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
438 }
439 } else {
440 /* Backward compatible case -- even if we enable
441 * multiple queues, there's only single reg to program
442 */
443 gfar_write(&regs->txic, 0);
444 if (likely(priv->tx_queue[0]->txcoalescing))
445 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
446
447 gfar_write(&regs->rxic, 0);
448 if (unlikely(priv->rx_queue[0]->rxcoalescing))
449 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
450 }
451}
452
453void gfar_configure_coalescing_all(struct gfar_private *priv)
454{
455 gfar_configure_coalescing(priv, 0xFF, 0xFF);
456}
457
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000458static struct net_device_stats *gfar_get_stats(struct net_device *dev)
459{
460 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000461 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
462 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000463 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000464
465 for (i = 0; i < priv->num_rx_queues; i++) {
466 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000467 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000468 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
469 }
470
471 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000472 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000473 dev->stats.rx_dropped = rx_dropped;
474
475 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000476 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
477 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000478 }
479
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000480 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000481 dev->stats.tx_packets = tx_packets;
482
483 return &dev->stats;
484}
485
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300486static int gfar_set_mac_addr(struct net_device *dev, void *p)
487{
488 eth_mac_addr(dev, p);
489
490 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
491
492 return 0;
493}
494
Andy Fleming26ccfc32009-03-10 12:58:28 +0000495static const struct net_device_ops gfar_netdev_ops = {
496 .ndo_open = gfar_enet_open,
497 .ndo_start_xmit = gfar_start_xmit,
498 .ndo_stop = gfar_close,
499 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000500 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000501 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000502 .ndo_tx_timeout = gfar_timeout,
503 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000504 .ndo_get_stats = gfar_get_stats,
Claudiu Manoil3d23a052015-05-06 18:07:30 +0300505 .ndo_set_mac_address = gfar_set_mac_addr,
Ben Hutchings240c1022009-07-09 17:54:35 +0000506 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000507#ifdef CONFIG_NET_POLL_CONTROLLER
508 .ndo_poll_controller = gfar_netpoll,
509#endif
510};
511
Claudiu Manoilefeddce2014-02-17 12:53:17 +0200512static void gfar_ints_disable(struct gfar_private *priv)
513{
514 int i;
515 for (i = 0; i < priv->num_grps; i++) {
516 struct gfar __iomem *regs = priv->gfargrp[i].regs;
517 /* Clear IEVENT */
518 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
519
520 /* Initialize IMASK */
521 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
522 }
523}
524
525static void gfar_ints_enable(struct gfar_private *priv)
526{
527 int i;
528 for (i = 0; i < priv->num_grps; i++) {
529 struct gfar __iomem *regs = priv->gfargrp[i].regs;
530 /* Unmask the interrupts we look for */
531 gfar_write(&regs->imask, IMASK_DEFAULT);
532 }
533}
534
Claudiu Manoil20862782014-02-17 12:53:14 +0200535static int gfar_alloc_tx_queues(struct gfar_private *priv)
536{
537 int i;
538
539 for (i = 0; i < priv->num_tx_queues; i++) {
540 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
541 GFP_KERNEL);
542 if (!priv->tx_queue[i])
543 return -ENOMEM;
544
545 priv->tx_queue[i]->tx_skbuff = NULL;
546 priv->tx_queue[i]->qindex = i;
547 priv->tx_queue[i]->dev = priv->ndev;
548 spin_lock_init(&(priv->tx_queue[i]->txlock));
549 }
550 return 0;
551}
552
553static int gfar_alloc_rx_queues(struct gfar_private *priv)
554{
555 int i;
556
557 for (i = 0; i < priv->num_rx_queues; i++) {
558 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
559 GFP_KERNEL);
560 if (!priv->rx_queue[i])
561 return -ENOMEM;
562
Claudiu Manoil20862782014-02-17 12:53:14 +0200563 priv->rx_queue[i]->qindex = i;
Claudiu Manoilf23223f2015-07-13 16:22:05 +0300564 priv->rx_queue[i]->ndev = priv->ndev;
Claudiu Manoil20862782014-02-17 12:53:14 +0200565 }
566 return 0;
567}
568
569static void gfar_free_tx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000570{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000571 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000572
573 for (i = 0; i < priv->num_tx_queues; i++)
574 kfree(priv->tx_queue[i]);
575}
576
Claudiu Manoil20862782014-02-17 12:53:14 +0200577static void gfar_free_rx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000578{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000579 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000580
581 for (i = 0; i < priv->num_rx_queues; i++)
582 kfree(priv->rx_queue[i]);
583}
584
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000585static void unmap_group_regs(struct gfar_private *priv)
586{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000587 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000588
589 for (i = 0; i < MAXGROUPS; i++)
590 if (priv->gfargrp[i].regs)
591 iounmap(priv->gfargrp[i].regs);
592}
593
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000594static void free_gfar_dev(struct gfar_private *priv)
595{
596 int i, j;
597
598 for (i = 0; i < priv->num_grps; i++)
599 for (j = 0; j < GFAR_NUM_IRQS; j++) {
600 kfree(priv->gfargrp[i].irqinfo[j]);
601 priv->gfargrp[i].irqinfo[j] = NULL;
602 }
603
604 free_netdev(priv->ndev);
605}
606
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000607static void disable_napi(struct gfar_private *priv)
608{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000609 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000610
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200611 for (i = 0; i < priv->num_grps; i++) {
612 napi_disable(&priv->gfargrp[i].napi_rx);
613 napi_disable(&priv->gfargrp[i].napi_tx);
614 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000615}
616
617static void enable_napi(struct gfar_private *priv)
618{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000619 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000620
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200621 for (i = 0; i < priv->num_grps; i++) {
622 napi_enable(&priv->gfargrp[i].napi_rx);
623 napi_enable(&priv->gfargrp[i].napi_tx);
624 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000625}
626
627static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000628 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000629{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000630 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000631 int i;
632
Paul Gortmaker7c1e7e92013-02-04 09:49:42 +0000633 for (i = 0; i < GFAR_NUM_IRQS; i++) {
634 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
635 GFP_KERNEL);
636 if (!grp->irqinfo[i])
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000637 return -ENOMEM;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000638 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000639
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000640 grp->regs = of_iomap(np, 0);
641 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000642 return -ENOMEM;
643
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000644 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000645
646 /* If we aren't the FEC we have multiple interrupts */
647 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
Mark Brownfea0f662015-11-26 11:59:45 +0000650 if (!gfar_irq(grp, TX)->irq ||
651 !gfar_irq(grp, RX)->irq ||
652 !gfar_irq(grp, ER)->irq)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000653 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000654 }
655
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000656 grp->priv = priv;
657 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000658 if (priv->mode == MQ_MG_MODE) {
Jingchang Lu55917642015-03-13 10:52:32 +0200659 u32 rxq_mask, txq_mask;
660 int ret;
661
662 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
663 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
664
665 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
666 if (!ret) {
667 grp->rx_bit_map = rxq_mask ?
668 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
669 }
670
671 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
672 if (!ret) {
673 grp->tx_bit_map = txq_mask ?
674 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
675 }
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200676
677 if (priv->poll_mode == GFAR_SQ_POLLING) {
678 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
679 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
680 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200681 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000682 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000683 grp->rx_bit_map = 0xFF;
684 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000685 }
Claudiu Manoil20862782014-02-17 12:53:14 +0200686
687 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
688 * right to left, so we need to revert the 8 bits to get the q index
689 */
690 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
691 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
692
693 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
694 * also assign queues to groups
695 */
696 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200697 if (!grp->rx_queue)
698 grp->rx_queue = priv->rx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200699 grp->num_rx_queues++;
700 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
701 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
702 priv->rx_queue[i]->grp = grp;
703 }
704
705 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200706 if (!grp->tx_queue)
707 grp->tx_queue = priv->tx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200708 grp->num_tx_queues++;
709 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
710 priv->tqueue |= (TQUEUE_EN0 >> i);
711 priv->tx_queue[i]->grp = grp;
712 }
713
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000714 priv->num_grps++;
715
716 return 0;
717}
718
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100719static int gfar_of_group_count(struct device_node *np)
720{
721 struct device_node *child;
722 int num = 0;
723
724 for_each_available_child_of_node(np, child)
725 if (!of_node_cmp(child->name, "queue-group"))
726 num++;
727
728 return num;
729}
730
Grant Likely2dc11582010-08-06 09:25:50 -0600731static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800732{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800733 const char *model;
734 const char *ctype;
735 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000736 int err = 0, i;
737 struct net_device *dev = NULL;
738 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700739 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000740 struct device_node *child = NULL;
Jingchang Lu55917642015-03-13 10:52:32 +0200741 u32 stash_len = 0;
742 u32 stash_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000743 unsigned int num_tx_qs, num_rx_qs;
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200744 unsigned short mode, poll_mode;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800745
Kevin Hao4b222ca2015-01-28 20:06:48 +0800746 if (!np)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800747 return -ENODEV;
748
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200749 if (of_device_is_compatible(np, "fsl,etsec2")) {
750 mode = MQ_MG_MODE;
751 poll_mode = GFAR_SQ_POLLING;
752 } else {
753 mode = SQ_SG_MODE;
754 poll_mode = GFAR_SQ_POLLING;
755 }
756
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200757 if (mode == SQ_SG_MODE) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200758 num_tx_qs = 1;
759 num_rx_qs = 1;
760 } else { /* MQ_MG_MODE */
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200761 /* get the actual number of supported groups */
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100762 unsigned int num_grps = gfar_of_group_count(np);
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200763
764 if (num_grps == 0 || num_grps > MAXGROUPS) {
765 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
766 num_grps);
767 pr_err("Cannot do alloc_etherdev, aborting\n");
768 return -EINVAL;
769 }
770
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200771 if (poll_mode == GFAR_SQ_POLLING) {
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200772 num_tx_qs = num_grps; /* one txq per int group */
773 num_rx_qs = num_grps; /* one rxq per int group */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200774 } else { /* GFAR_MQ_POLLING */
Jingchang Lu55917642015-03-13 10:52:32 +0200775 u32 tx_queues, rx_queues;
776 int ret;
777
778 /* parse the num of HW tx and rx queues */
779 ret = of_property_read_u32(np, "fsl,num_tx_queues",
780 &tx_queues);
781 num_tx_qs = ret ? 1 : tx_queues;
782
783 ret = of_property_read_u32(np, "fsl,num_rx_queues",
784 &rx_queues);
785 num_rx_qs = ret ? 1 : rx_queues;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200786 }
787 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000788
789 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000790 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
791 num_tx_qs, MAX_TX_QS);
792 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000793 return -EINVAL;
794 }
795
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000796 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000797 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
798 num_rx_qs, MAX_RX_QS);
799 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000800 return -EINVAL;
801 }
802
803 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
804 dev = *pdev;
805 if (NULL == dev)
806 return -ENOMEM;
807
808 priv = netdev_priv(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000809 priv->ndev = dev;
810
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200811 priv->mode = mode;
812 priv->poll_mode = poll_mode;
813
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000814 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000815 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000816 priv->num_rx_queues = num_rx_qs;
Claudiu Manoil20862782014-02-17 12:53:14 +0200817
818 err = gfar_alloc_tx_queues(priv);
819 if (err)
820 goto tx_alloc_failed;
821
822 err = gfar_alloc_rx_queues(priv);
823 if (err)
824 goto rx_alloc_failed;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800825
Jingchang Lu55917642015-03-13 10:52:32 +0200826 err = of_property_read_string(np, "model", &model);
827 if (err) {
828 pr_err("Device model property missing, aborting\n");
829 goto rx_alloc_failed;
830 }
831
Jan Ceuleers0977f812012-06-05 03:42:12 +0000832 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700833 INIT_LIST_HEAD(&priv->rx_list.list);
834 priv->rx_list.count = 0;
835 mutex_init(&priv->rx_queue_access);
836
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000837 for (i = 0; i < MAXGROUPS; i++)
838 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800839
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000840 /* Parse and initialize group specific information */
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200841 if (priv->mode == MQ_MG_MODE) {
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100842 for_each_available_child_of_node(np, child) {
843 if (of_node_cmp(child->name, "queue-group"))
844 continue;
845
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000846 err = gfar_parse_group(child, priv, model);
847 if (err)
848 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800849 }
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200850 } else { /* SQ_SG_MODE */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000851 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000852 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000853 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800854 }
855
Saurabh Sengar3f8c0f72015-11-20 23:23:58 +0530856 if (of_property_read_bool(np, "bd-stash")) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800857 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
858 priv->bd_stash_en = 1;
859 }
860
Jingchang Lu55917642015-03-13 10:52:32 +0200861 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800862
Jingchang Lu55917642015-03-13 10:52:32 +0200863 if (err == 0)
864 priv->rx_stash_size = stash_len;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800865
Jingchang Lu55917642015-03-13 10:52:32 +0200866 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800867
Jingchang Lu55917642015-03-13 10:52:32 +0200868 if (err == 0)
869 priv->rx_stash_index = stash_idx;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800870
871 if (stash_len || stash_idx)
872 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
873
Andy Flemingb31a1d82008-12-16 15:29:15 -0800874 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000875
Andy Flemingb31a1d82008-12-16 15:29:15 -0800876 if (mac_addr)
Joe Perches6a3c910c2011-11-16 09:38:02 +0000877 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800878
879 if (model && !strcasecmp(model, "TSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200880 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000881 FSL_GIANFAR_DEV_HAS_COALESCE |
882 FSL_GIANFAR_DEV_HAS_RMON |
883 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
884
Andy Flemingb31a1d82008-12-16 15:29:15 -0800885 if (model && !strcasecmp(model, "eTSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200886 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000887 FSL_GIANFAR_DEV_HAS_COALESCE |
888 FSL_GIANFAR_DEV_HAS_RMON |
889 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000890 FSL_GIANFAR_DEV_HAS_CSUM |
891 FSL_GIANFAR_DEV_HAS_VLAN |
892 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
893 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
Hamish Martin7bff47d2015-12-15 14:14:50 +1300894 FSL_GIANFAR_DEV_HAS_TIMER |
895 FSL_GIANFAR_DEV_HAS_RX_FILER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800896
Jingchang Lu55917642015-03-13 10:52:32 +0200897 err = of_property_read_string(np, "phy-connection-type", &ctype);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800898
899 /* We only care about rgmii-id. The rest are autodetected */
Jingchang Lu55917642015-03-13 10:52:32 +0200900 if (err == 0 && !strcmp(ctype, "rgmii-id"))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800901 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
902 else
903 priv->interface = PHY_INTERFACE_MODE_MII;
904
Jingchang Lu55917642015-03-13 10:52:32 +0200905 if (of_find_property(np, "fsl,magic-packet", NULL))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800906 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
907
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300908 if (of_get_property(np, "fsl,wake-on-filer", NULL))
909 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
910
Grant Likelyfe192a42009-04-25 12:53:12 +0000911 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800912
Florian Fainellibe403642014-05-22 09:47:48 -0700913 /* In the case of a fixed PHY, the DT node associated
914 * to the PHY is the Ethernet MAC DT node.
915 */
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200916 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
Florian Fainellibe403642014-05-22 09:47:48 -0700917 err = of_phy_register_fixed_link(np);
918 if (err)
919 goto err_grp_init;
920
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200921 priv->phy_node = of_node_get(np);
Florian Fainellibe403642014-05-22 09:47:48 -0700922 }
923
Andy Flemingb31a1d82008-12-16 15:29:15 -0800924 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000925 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800926
927 return 0;
928
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000929err_grp_init:
930 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +0200931rx_alloc_failed:
932 gfar_free_rx_queues(priv);
933tx_alloc_failed:
934 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000935 free_gfar_dev(priv);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800936 return err;
937}
938
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000939static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000940{
941 struct hwtstamp_config config;
942 struct gfar_private *priv = netdev_priv(netdev);
943
944 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
945 return -EFAULT;
946
947 /* reserved for future extensions */
948 if (config.flags)
949 return -EINVAL;
950
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000951 switch (config.tx_type) {
952 case HWTSTAMP_TX_OFF:
953 priv->hwts_tx_en = 0;
954 break;
955 case HWTSTAMP_TX_ON:
956 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
957 return -ERANGE;
958 priv->hwts_tx_en = 1;
959 break;
960 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000961 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000962 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000963
964 switch (config.rx_filter) {
965 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +0000966 if (priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000967 priv->hwts_rx_en = 0;
Claudiu Manoil08511332014-02-24 12:13:45 +0200968 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000969 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000970 break;
971 default:
972 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
973 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +0000974 if (!priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +0000975 priv->hwts_rx_en = 1;
Claudiu Manoil08511332014-02-24 12:13:45 +0200976 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +0000977 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000978 config.rx_filter = HWTSTAMP_FILTER_ALL;
979 break;
980 }
981
982 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
983 -EFAULT : 0;
984}
985
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000986static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
987{
988 struct hwtstamp_config config;
989 struct gfar_private *priv = netdev_priv(netdev);
990
991 config.flags = 0;
992 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
993 config.rx_filter = (priv->hwts_rx_en ?
994 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
995
996 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
997 -EFAULT : 0;
998}
999
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001000static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1001{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001002 struct phy_device *phydev = dev->phydev;
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001003
1004 if (!netif_running(dev))
1005 return -EINVAL;
1006
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001007 if (cmd == SIOCSHWTSTAMP)
Ben Hutchingsca0c88c2013-11-18 23:05:27 +00001008 return gfar_hwtstamp_set(dev, rq);
1009 if (cmd == SIOCGHWTSTAMP)
1010 return gfar_hwtstamp_get(dev, rq);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001011
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001012 if (!phydev)
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001013 return -ENODEV;
1014
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001015 return phy_mii_ioctl(phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001016}
1017
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001018static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1019 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001020{
1021 u32 rqfpr = FPR_FILER_MASK;
1022 u32 rqfcr = 0x0;
1023
1024 rqfar--;
1025 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001026 priv->ftp_rqfpr[rqfar] = rqfpr;
1027 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001028 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1029
1030 rqfar--;
1031 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001032 priv->ftp_rqfpr[rqfar] = rqfpr;
1033 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001034 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1035
1036 rqfar--;
1037 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1038 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001039 priv->ftp_rqfcr[rqfar] = rqfcr;
1040 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001041 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1042
1043 rqfar--;
1044 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1045 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001046 priv->ftp_rqfcr[rqfar] = rqfcr;
1047 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001048 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1049
1050 return rqfar;
1051}
1052
1053static void gfar_init_filer_table(struct gfar_private *priv)
1054{
1055 int i = 0x0;
1056 u32 rqfar = MAX_FILER_IDX;
1057 u32 rqfcr = 0x0;
1058 u32 rqfpr = FPR_FILER_MASK;
1059
1060 /* Default rule */
1061 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001062 priv->ftp_rqfcr[rqfar] = rqfcr;
1063 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001064 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1065
1066 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1067 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1068 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1069 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1070 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1071 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1072
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +02001073 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001074 priv->cur_filer_idx = rqfar;
1075
1076 /* Rest are masked rules */
1077 rqfcr = RQFCR_CMP_NOMATCH;
1078 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001079 priv->ftp_rqfcr[i] = rqfcr;
1080 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001081 gfar_write_filer(priv, i, rqfcr, rqfpr);
1082 }
1083}
1084
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001085#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001086static void __gfar_detect_errata_83xx(struct gfar_private *priv)
Anton Vorontsov7d350972010-06-30 06:39:12 +00001087{
Anton Vorontsov7d350972010-06-30 06:39:12 +00001088 unsigned int pvr = mfspr(SPRN_PVR);
1089 unsigned int svr = mfspr(SPRN_SVR);
1090 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1091 unsigned int rev = svr & 0xffff;
1092
1093 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1094 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001095 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001096 priv->errata |= GFAR_ERRATA_74;
1097
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001098 /* MPC8313 and MPC837x all rev */
1099 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001100 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001101 priv->errata |= GFAR_ERRATA_76;
1102
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001103 /* MPC8313 Rev < 2.0 */
1104 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001105 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001106}
1107
1108static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1109{
1110 unsigned int svr = mfspr(SPRN_SVR);
1111
1112 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1113 priv->errata |= GFAR_ERRATA_12;
Atsushi Nemoto7bfc6082016-03-03 09:07:51 +09001114 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
Claudiu Manoil53fad772013-10-09 20:20:42 +03001115 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
Atsushi Nemoto7bfc6082016-03-03 09:07:51 +09001116 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1117 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
Claudiu Manoil53fad772013-10-09 20:20:42 +03001118 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001119}
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001120#endif
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001121
1122static void gfar_detect_errata(struct gfar_private *priv)
1123{
1124 struct device *dev = &priv->ofdev->dev;
1125
1126 /* no plans to fix */
1127 priv->errata |= GFAR_ERRATA_A002;
1128
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001129#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001130 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1131 __gfar_detect_errata_85xx(priv);
1132 else /* non-mpc85xx parts, i.e. e300 core based */
1133 __gfar_detect_errata_83xx(priv);
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001134#endif
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001135
Anton Vorontsov7d350972010-06-30 06:39:12 +00001136 if (priv->errata)
1137 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1138 priv->errata);
1139}
1140
Claudiu Manoil08511332014-02-24 12:13:45 +02001141void gfar_mac_reset(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142{
Claudiu Manoil20862782014-02-17 12:53:14 +02001143 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoila328ac92014-02-24 12:13:42 +02001144 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001147 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Andy Flemingb98ac702009-02-04 16:38:05 -08001149 /* We need to delay at least 3 TX clocks */
Claudiu Manoila328ac92014-02-24 12:13:42 +02001150 udelay(3);
Andy Flemingb98ac702009-02-04 16:38:05 -08001151
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001152 /* the soft reset bit is not self-resetting, so we need to
1153 * clear it before resuming normal operation
1154 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001155 gfar_write(&regs->maccfg1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Claudiu Manoila328ac92014-02-24 12:13:42 +02001157 udelay(3);
1158
Claudiu Manoil75354142015-07-13 16:22:06 +03001159 gfar_rx_offload_en(priv);
Claudiu Manoil88302642014-02-24 12:13:43 +02001160
1161 /* Initialize the max receive frame/buffer lengths */
Claudiu Manoil75354142015-07-13 16:22:06 +03001162 gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
1163 gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
Claudiu Manoila328ac92014-02-24 12:13:42 +02001164
1165 /* Initialize the Minimum Frame Length Register */
1166 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001169 tempval = MACCFG2_INIT_SETTINGS;
Claudiu Manoil88302642014-02-24 12:13:43 +02001170
Claudiu Manoil75354142015-07-13 16:22:06 +03001171 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1172 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
1173 * and by checking RxBD[LG] and discarding larger than MAXFRM.
Claudiu Manoil88302642014-02-24 12:13:43 +02001174 */
Claudiu Manoil75354142015-07-13 16:22:06 +03001175 if (gfar_has_errata(priv, GFAR_ERRATA_74))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001176 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
Claudiu Manoil88302642014-02-24 12:13:43 +02001177
Anton Vorontsov7d350972010-06-30 06:39:12 +00001178 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Claudiu Manoila328ac92014-02-24 12:13:42 +02001180 /* Clear mac addr hash registers */
1181 gfar_write(&regs->igaddr0, 0);
1182 gfar_write(&regs->igaddr1, 0);
1183 gfar_write(&regs->igaddr2, 0);
1184 gfar_write(&regs->igaddr3, 0);
1185 gfar_write(&regs->igaddr4, 0);
1186 gfar_write(&regs->igaddr5, 0);
1187 gfar_write(&regs->igaddr6, 0);
1188 gfar_write(&regs->igaddr7, 0);
1189
1190 gfar_write(&regs->gaddr0, 0);
1191 gfar_write(&regs->gaddr1, 0);
1192 gfar_write(&regs->gaddr2, 0);
1193 gfar_write(&regs->gaddr3, 0);
1194 gfar_write(&regs->gaddr4, 0);
1195 gfar_write(&regs->gaddr5, 0);
1196 gfar_write(&regs->gaddr6, 0);
1197 gfar_write(&regs->gaddr7, 0);
1198
1199 if (priv->extended_hash)
1200 gfar_clear_exact_match(priv->ndev);
1201
1202 gfar_mac_rx_config(priv);
1203
1204 gfar_mac_tx_config(priv);
1205
1206 gfar_set_mac_address(priv->ndev);
1207
1208 gfar_set_multi(priv->ndev);
1209
1210 /* clear ievent and imask before configuring coalescing */
1211 gfar_ints_disable(priv);
1212
1213 /* Configure the coalescing support */
1214 gfar_configure_coalescing_all(priv);
1215}
1216
1217static void gfar_hw_init(struct gfar_private *priv)
1218{
1219 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1220 u32 attrs;
1221
1222 /* Stop the DMA engine now, in case it was running before
1223 * (The firmware could have used it, and left it running).
1224 */
1225 gfar_halt(priv);
1226
1227 gfar_mac_reset(priv);
1228
1229 /* Zero out the rmon mib registers if it has them */
1230 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1231 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1232
1233 /* Mask off the CAM interrupts */
1234 gfar_write(&regs->rmon.cam1, 0xffffffff);
1235 gfar_write(&regs->rmon.cam2, 0xffffffff);
1236 }
1237
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001239 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001241 /* Set the extraction length and index */
1242 attrs = ATTRELI_EL(priv->rx_stash_size) |
1243 ATTRELI_EI(priv->rx_stash_index);
1244
1245 gfar_write(&regs->attreli, attrs);
1246
1247 /* Start with defaults, and add stashing
1248 * depending on driver parameters
1249 */
1250 attrs = ATTR_INIT_SETTINGS;
1251
1252 if (priv->bd_stash_en)
1253 attrs |= ATTR_BDSTASH;
1254
1255 if (priv->rx_stash_size != 0)
1256 attrs |= ATTR_BUFSTASH;
1257
1258 gfar_write(&regs->attr, attrs);
1259
1260 /* FIFO configs */
1261 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1262 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1263 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1264
Claudiu Manoil20862782014-02-17 12:53:14 +02001265 /* Program the interrupt steering regs, only for MG devices */
1266 if (priv->num_grps > 1)
1267 gfar_write_isrg(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001268}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Xiubo Li898157e2014-06-04 16:49:16 +08001270static void gfar_init_addr_hash_table(struct gfar_private *priv)
Claudiu Manoil20862782014-02-17 12:53:14 +02001271{
1272 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001273
Andy Flemingb31a1d82008-12-16 15:29:15 -08001274 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001275 priv->extended_hash = 1;
1276 priv->hash_width = 9;
1277
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001278 priv->hash_regs[0] = &regs->igaddr0;
1279 priv->hash_regs[1] = &regs->igaddr1;
1280 priv->hash_regs[2] = &regs->igaddr2;
1281 priv->hash_regs[3] = &regs->igaddr3;
1282 priv->hash_regs[4] = &regs->igaddr4;
1283 priv->hash_regs[5] = &regs->igaddr5;
1284 priv->hash_regs[6] = &regs->igaddr6;
1285 priv->hash_regs[7] = &regs->igaddr7;
1286 priv->hash_regs[8] = &regs->gaddr0;
1287 priv->hash_regs[9] = &regs->gaddr1;
1288 priv->hash_regs[10] = &regs->gaddr2;
1289 priv->hash_regs[11] = &regs->gaddr3;
1290 priv->hash_regs[12] = &regs->gaddr4;
1291 priv->hash_regs[13] = &regs->gaddr5;
1292 priv->hash_regs[14] = &regs->gaddr6;
1293 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001294
1295 } else {
1296 priv->extended_hash = 0;
1297 priv->hash_width = 8;
1298
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001299 priv->hash_regs[0] = &regs->gaddr0;
1300 priv->hash_regs[1] = &regs->gaddr1;
1301 priv->hash_regs[2] = &regs->gaddr2;
1302 priv->hash_regs[3] = &regs->gaddr3;
1303 priv->hash_regs[4] = &regs->gaddr4;
1304 priv->hash_regs[5] = &regs->gaddr5;
1305 priv->hash_regs[6] = &regs->gaddr6;
1306 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001307 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001308}
1309
1310/* Set up the ethernet device structure, private data,
1311 * and anything else we need before we start
1312 */
1313static int gfar_probe(struct platform_device *ofdev)
1314{
Johan Hovold42c70042016-11-28 19:25:02 +01001315 struct device_node *np = ofdev->dev.of_node;
Claudiu Manoil20862782014-02-17 12:53:14 +02001316 struct net_device *dev = NULL;
1317 struct gfar_private *priv = NULL;
1318 int err = 0, i;
1319
1320 err = gfar_of_init(ofdev, &dev);
1321
1322 if (err)
1323 return err;
1324
1325 priv = netdev_priv(dev);
1326 priv->ndev = dev;
1327 priv->ofdev = ofdev;
1328 priv->dev = &ofdev->dev;
1329 SET_NETDEV_DEV(dev, &ofdev->dev);
1330
Claudiu Manoil20862782014-02-17 12:53:14 +02001331 INIT_WORK(&priv->reset_task, gfar_reset_task);
1332
1333 platform_set_drvdata(ofdev, priv);
1334
1335 gfar_detect_errata(priv);
1336
Claudiu Manoil20862782014-02-17 12:53:14 +02001337 /* Set the dev->base_addr to the gfar reg region */
1338 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1339
1340 /* Fill in the dev structure */
1341 dev->watchdog_timeo = TX_TIMEOUT;
Jarod Wilson44770e12016-10-17 15:54:17 -04001342 /* MTU range: 50 - 9586 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001343 dev->mtu = 1500;
Jarod Wilson44770e12016-10-17 15:54:17 -04001344 dev->min_mtu = 50;
1345 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
Claudiu Manoil20862782014-02-17 12:53:14 +02001346 dev->netdev_ops = &gfar_netdev_ops;
1347 dev->ethtool_ops = &gfar_ethtool_ops;
1348
1349 /* Register for napi ...We are registering NAPI for each grp */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001350 for (i = 0; i < priv->num_grps; i++) {
1351 if (priv->poll_mode == GFAR_SQ_POLLING) {
1352 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1353 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001354 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001355 gfar_poll_tx_sq, 2);
1356 } else {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001357 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1358 gfar_poll_rx, GFAR_DEV_WEIGHT);
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001359 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001360 gfar_poll_tx, 2);
1361 }
1362 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001363
1364 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1365 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1366 NETIF_F_RXCSUM;
1367 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1368 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1369 }
1370
1371 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1372 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1373 NETIF_F_HW_VLAN_CTAG_RX;
1374 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1375 }
1376
Claudiu Manoil3d23a052015-05-06 18:07:30 +03001377 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1378
Claudiu Manoil20862782014-02-17 12:53:14 +02001379 gfar_init_addr_hash_table(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001380
Claudiu Manoil532c37b2014-02-17 12:53:16 +02001381 /* Insert receive time stamps into padding alignment bytes */
1382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1383 priv->padding = 8;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001384
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001385 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001386 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001387 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001389 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001390 for (i = 0; i < priv->num_tx_queues; i++) {
1391 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1392 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1393 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1394 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1395 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001396
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001397 for (i = 0; i < priv->num_rx_queues; i++) {
1398 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1399 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1400 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Hamish Martin7bff47d2015-12-15 14:14:50 +13001403 /* Always enable rx filer if available */
1404 priv->rx_filer_enable =
1405 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001406 /* Enable most messages by default */
1407 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001408 /* use pritority h/w tx queue scheduling for single queue devices */
1409 if (priv->num_tx_queues == 1)
1410 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001411
Claudiu Manoil08511332014-02-24 12:13:45 +02001412 set_bit(GFAR_DOWN, &priv->state);
1413
Claudiu Manoila328ac92014-02-24 12:13:42 +02001414 gfar_hw_init(priv);
Trent Piephod3eab822008-10-02 11:12:24 +00001415
Fabio Estevamd4c642e2014-06-03 19:55:38 -03001416 /* Carrier starts down, phylib will bring it up */
1417 netif_carrier_off(dev);
1418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 err = register_netdev(dev);
1420
1421 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001422 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 goto register_fail;
1424 }
1425
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001426 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
1427 priv->wol_supported |= GFAR_WOL_MAGIC;
1428
1429 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
1430 priv->rx_filer_enable)
1431 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
1432
1433 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001434
Dai Harukic50a5d92008-12-17 16:51:32 -08001435 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001436 for (i = 0; i < priv->num_grps; i++) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001437 struct gfar_priv_grp *grp = &priv->gfargrp[i];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001438 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001439 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001440 dev->name, "_g", '0' + i, "_tx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001441 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001442 dev->name, "_g", '0' + i, "_rx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001443 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001444 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001445 } else
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001446 strcpy(gfar_irq(grp, TX)->name, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001447 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001448
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001449 /* Initialize the filer table */
1450 gfar_init_filer_table(priv);
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001453 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Jan Ceuleers0977f812012-06-05 03:42:12 +00001455 /* Even more device info helps when determining which kernel
1456 * provided which set of benchmarks.
1457 */
Joe Perches59deab22011-06-14 08:57:47 +00001458 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001459 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001460 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1461 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001462 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001463 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1464 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 return 0;
1467
1468register_fail:
Johan Hovold42c70042016-11-28 19:25:02 +01001469 if (of_phy_is_fixed_link(np))
1470 of_phy_deregister_fixed_link(np);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001471 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001472 gfar_free_rx_queues(priv);
1473 gfar_free_tx_queues(priv);
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001474 of_node_put(priv->phy_node);
1475 of_node_put(priv->tbi_node);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001476 free_gfar_dev(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001477 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479
Grant Likely2dc11582010-08-06 09:25:50 -06001480static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001482 struct gfar_private *priv = platform_get_drvdata(ofdev);
Johan Hovold42c70042016-11-28 19:25:02 +01001483 struct device_node *np = ofdev->dev.of_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001485 of_node_put(priv->phy_node);
1486 of_node_put(priv->tbi_node);
Grant Likelyfe192a42009-04-25 12:53:12 +00001487
David S. Millerd9d8e042009-09-06 01:41:02 -07001488 unregister_netdev(priv->ndev);
Johan Hovold42c70042016-11-28 19:25:02 +01001489
1490 if (of_phy_is_fixed_link(np))
1491 of_phy_deregister_fixed_link(np);
1492
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001493 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001494 gfar_free_rx_queues(priv);
1495 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001496 free_gfar_dev(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 return 0;
1499}
1500
Scott Woodd87eb122008-07-11 18:04:45 -05001501#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001502
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001503static void __gfar_filer_disable(struct gfar_private *priv)
1504{
1505 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1506 u32 temp;
1507
1508 temp = gfar_read(&regs->rctrl);
1509 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
1510 gfar_write(&regs->rctrl, temp);
1511}
1512
1513static void __gfar_filer_enable(struct gfar_private *priv)
1514{
1515 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1516 u32 temp;
1517
1518 temp = gfar_read(&regs->rctrl);
1519 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
1520 gfar_write(&regs->rctrl, temp);
1521}
1522
1523/* Filer rules implementing wol capabilities */
1524static void gfar_filer_config_wol(struct gfar_private *priv)
1525{
1526 unsigned int i;
1527 u32 rqfcr;
1528
1529 __gfar_filer_disable(priv);
1530
1531 /* clear the filer table, reject any packet by default */
1532 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
1533 for (i = 0; i <= MAX_FILER_IDX; i++)
1534 gfar_write_filer(priv, i, rqfcr, 0);
1535
1536 i = 0;
1537 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
1538 /* unicast packet, accept it */
1539 struct net_device *ndev = priv->ndev;
1540 /* get the default rx queue index */
1541 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
1542 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
1543 (ndev->dev_addr[1] << 8) |
1544 ndev->dev_addr[2];
1545
1546 rqfcr = (qindex << 10) | RQFCR_AND |
1547 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
1548
1549 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1550
1551 dest_mac_addr = (ndev->dev_addr[3] << 16) |
1552 (ndev->dev_addr[4] << 8) |
1553 ndev->dev_addr[5];
1554 rqfcr = (qindex << 10) | RQFCR_GPI |
1555 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
1556 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
1557 }
1558
1559 __gfar_filer_enable(priv);
1560}
1561
1562static void gfar_filer_restore_table(struct gfar_private *priv)
1563{
1564 u32 rqfcr, rqfpr;
1565 unsigned int i;
1566
1567 __gfar_filer_disable(priv);
1568
1569 for (i = 0; i <= MAX_FILER_IDX; i++) {
1570 rqfcr = priv->ftp_rqfcr[i];
1571 rqfpr = priv->ftp_rqfpr[i];
1572 gfar_write_filer(priv, i, rqfcr, rqfpr);
1573 }
1574
1575 __gfar_filer_enable(priv);
1576}
1577
1578/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1579static void gfar_start_wol_filer(struct gfar_private *priv)
1580{
1581 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1582 u32 tempval;
1583 int i = 0;
1584
1585 /* Enable Rx hw queues */
1586 gfar_write(&regs->rqueue, priv->rqueue);
1587
1588 /* Initialize DMACTRL to have WWR and WOP */
1589 tempval = gfar_read(&regs->dmactrl);
1590 tempval |= DMACTRL_INIT_SETTINGS;
1591 gfar_write(&regs->dmactrl, tempval);
1592
1593 /* Make sure we aren't stopped */
1594 tempval = gfar_read(&regs->dmactrl);
1595 tempval &= ~DMACTRL_GRS;
1596 gfar_write(&regs->dmactrl, tempval);
1597
1598 for (i = 0; i < priv->num_grps; i++) {
1599 regs = priv->gfargrp[i].regs;
1600 /* Clear RHLT, so that the DMA starts polling now */
1601 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1602 /* enable the Filer General Purpose Interrupt */
1603 gfar_write(&regs->imask, IMASK_FGPI);
1604 }
1605
1606 /* Enable Rx DMA */
1607 tempval = gfar_read(&regs->maccfg1);
1608 tempval |= MACCFG1_RX_EN;
1609 gfar_write(&regs->maccfg1, tempval);
1610}
1611
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001612static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001613{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001614 struct gfar_private *priv = dev_get_drvdata(dev);
1615 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001616 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001617 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001618 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001619
Claudiu Manoil614b4242015-07-31 18:38:32 +03001620 if (!netif_running(ndev))
1621 return 0;
1622
1623 disable_napi(priv);
1624 netif_tx_lock(ndev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001625 netif_device_detach(ndev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001626 netif_tx_unlock(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001627
Claudiu Manoil614b4242015-07-31 18:38:32 +03001628 gfar_halt(priv);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001629
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001630 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001631 /* Enable interrupt on Magic Packet */
1632 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001633
Claudiu Manoil614b4242015-07-31 18:38:32 +03001634 /* Enable Magic Packet mode */
1635 tempval = gfar_read(&regs->maccfg2);
1636 tempval |= MACCFG2_MPEN;
1637 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001638
Claudiu Manoil614b4242015-07-31 18:38:32 +03001639 /* re-enable the Rx block */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001640 tempval = gfar_read(&regs->maccfg1);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001641 tempval |= MACCFG1_RX_EN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001642 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001643
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001644 } else if (wol & GFAR_WOL_FILER_UCAST) {
1645 gfar_filer_config_wol(priv);
1646 gfar_start_wol_filer(priv);
1647
Claudiu Manoil614b4242015-07-31 18:38:32 +03001648 } else {
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001649 phy_stop(ndev->phydev);
Scott Woodd87eb122008-07-11 18:04:45 -05001650 }
1651
1652 return 0;
1653}
1654
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001655static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001656{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001657 struct gfar_private *priv = dev_get_drvdata(dev);
1658 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001659 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001660 u32 tempval;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001661 u16 wol = priv->wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -05001662
Claudiu Manoil614b4242015-07-31 18:38:32 +03001663 if (!netif_running(ndev))
Scott Woodd87eb122008-07-11 18:04:45 -05001664 return 0;
Scott Woodd87eb122008-07-11 18:04:45 -05001665
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001666 if (wol & GFAR_WOL_MAGIC) {
Claudiu Manoil614b4242015-07-31 18:38:32 +03001667 /* Disable Magic Packet mode */
1668 tempval = gfar_read(&regs->maccfg2);
1669 tempval &= ~MACCFG2_MPEN;
1670 gfar_write(&regs->maccfg2, tempval);
Claudiu Manoil3e905b82015-10-05 17:19:59 +03001671
1672 } else if (wol & GFAR_WOL_FILER_UCAST) {
1673 /* need to stop rx only, tx is already down */
1674 gfar_halt(priv);
1675 gfar_filer_restore_table(priv);
1676
Claudiu Manoil614b4242015-07-31 18:38:32 +03001677 } else {
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001678 phy_start(ndev->phydev);
Claudiu Manoil614b4242015-07-31 18:38:32 +03001679 }
Scott Woodd87eb122008-07-11 18:04:45 -05001680
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001681 gfar_start(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001682
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001683 netif_device_attach(ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001684 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001685
1686 return 0;
1687}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001688
1689static int gfar_restore(struct device *dev)
1690{
1691 struct gfar_private *priv = dev_get_drvdata(dev);
1692 struct net_device *ndev = priv->ndev;
1693
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001694 if (!netif_running(ndev)) {
1695 netif_device_attach(ndev);
1696
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001697 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001698 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001699
Claudiu Manoil76f31e82015-07-13 16:22:03 +03001700 gfar_init_bds(ndev);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001701
Claudiu Manoila328ac92014-02-24 12:13:42 +02001702 gfar_mac_reset(priv);
1703
1704 gfar_init_tx_rx_base(priv);
1705
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001706 gfar_start(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001707
1708 priv->oldlink = 0;
1709 priv->oldspeed = 0;
1710 priv->oldduplex = -1;
1711
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001712 if (ndev->phydev)
1713 phy_start(ndev->phydev);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001714
1715 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001716 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001717
1718 return 0;
1719}
1720
1721static struct dev_pm_ops gfar_pm_ops = {
1722 .suspend = gfar_suspend,
1723 .resume = gfar_resume,
1724 .freeze = gfar_suspend,
1725 .thaw = gfar_resume,
1726 .restore = gfar_restore,
1727};
1728
1729#define GFAR_PM_OPS (&gfar_pm_ops)
1730
Scott Woodd87eb122008-07-11 18:04:45 -05001731#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001732
1733#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001734
Scott Woodd87eb122008-07-11 18:04:45 -05001735#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001737/* Reads the controller's registers to determine what interface
1738 * connects it to the PHY.
1739 */
1740static phy_interface_t gfar_get_interface(struct net_device *dev)
1741{
1742 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001743 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001744 u32 ecntrl;
1745
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001746 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001747
1748 if (ecntrl & ECNTRL_SGMII_MODE)
1749 return PHY_INTERFACE_MODE_SGMII;
1750
1751 if (ecntrl & ECNTRL_TBI_MODE) {
1752 if (ecntrl & ECNTRL_REDUCED_MODE)
1753 return PHY_INTERFACE_MODE_RTBI;
1754 else
1755 return PHY_INTERFACE_MODE_TBI;
1756 }
1757
1758 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001759 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001760 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001761 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001762 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001763 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001764
Jan Ceuleers0977f812012-06-05 03:42:12 +00001765 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001766 * be set by the device tree or platform code.
1767 */
1768 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1769 return PHY_INTERFACE_MODE_RGMII_ID;
1770
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001771 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001772 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001773 }
1774
Andy Flemingb31a1d82008-12-16 15:29:15 -08001775 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001776 return PHY_INTERFACE_MODE_GMII;
1777
1778 return PHY_INTERFACE_MODE_MII;
1779}
1780
1781
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001782/* Initializes driver's PHY state, and attaches to the PHY.
1783 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 */
1785static int init_phy(struct net_device *dev)
1786{
1787 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001788 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001789 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001790 GFAR_SUPPORTED_GBIT : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001791 phy_interface_t interface;
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001792 struct phy_device *phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
1794 priv->oldlink = 0;
1795 priv->oldspeed = 0;
1796 priv->oldduplex = -1;
1797
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001798 interface = gfar_get_interface(dev);
1799
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001800 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1801 interface);
1802 if (!phydev) {
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001803 dev_err(&dev->dev, "could not attach to PHY\n");
1804 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
Kapil Junejad3c12872007-05-11 18:25:11 -05001807 if (interface == PHY_INTERFACE_MODE_SGMII)
1808 gfar_configure_serdes(dev);
1809
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001810 /* Remove any features not supported by the controller */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001811 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1812 phydev->advertising = phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
Pavaluca Matei-B46610cf987af2014-10-27 10:42:42 +02001814 /* Add support for flow control, but don't advertise it by default */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001815 phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
Pavaluca Matei-B46610cf987af2014-10-27 10:42:42 +02001816
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Jan Ceuleers0977f812012-06-05 03:42:12 +00001820/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001821 * SERDES lynx PHY on the chip. We communicate with this PHY
1822 * through the MDIO bus on each controller, treating it as a
1823 * "normal" PHY at the address found in the TBIPA register. We assume
1824 * that the TBIPA register is valid. Either the MDIO bus code will set
1825 * it to a value that doesn't conflict with other PHYs on the bus, or the
1826 * value doesn't matter, as there are no other PHYs on the bus.
1827 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001828static void gfar_configure_serdes(struct net_device *dev)
1829{
1830 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001831 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001832
Grant Likelyfe192a42009-04-25 12:53:12 +00001833 if (!priv->tbi_node) {
1834 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1835 "device tree specify a tbi-handle\n");
1836 return;
1837 }
1838
1839 tbiphy = of_phy_find_device(priv->tbi_node);
1840 if (!tbiphy) {
1841 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001842 return;
1843 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001844
Jan Ceuleers0977f812012-06-05 03:42:12 +00001845 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001846 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1847 * everything for us? Resetting it takes the link down and requires
1848 * several seconds for it to come back.
1849 */
Russell King38737e42015-09-24 20:36:28 +01001850 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
Andrew Lunne5a03bf2016-01-06 20:11:16 +01001851 put_device(&tbiphy->mdio.dev);
Andy Flemingb31a1d82008-12-16 15:29:15 -08001852 return;
Russell King38737e42015-09-24 20:36:28 +01001853 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001854
Paul Gortmakerd0313582008-04-17 00:08:10 -04001855 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001856 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001857
Grant Likelyfe192a42009-04-25 12:53:12 +00001858 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001859 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1860 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001861
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001862 phy_write(tbiphy, MII_BMCR,
1863 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1864 BMCR_SPEED1000);
Russell King04d53b22015-09-24 20:36:18 +01001865
Andrew Lunne5a03bf2016-01-06 20:11:16 +01001866 put_device(&tbiphy->mdio.dev);
Kapil Junejad3c12872007-05-11 18:25:11 -05001867}
1868
Anton Vorontsov511d9342010-06-30 06:39:15 +00001869static int __gfar_is_rx_idle(struct gfar_private *priv)
1870{
1871 u32 res;
1872
Jan Ceuleers0977f812012-06-05 03:42:12 +00001873 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001874 * actually wait for IEVENT_GRSC flag.
1875 */
Claudiu Manoilad3660c2013-10-09 20:20:40 +03001876 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
Anton Vorontsov511d9342010-06-30 06:39:15 +00001877 return 0;
1878
Jan Ceuleers0977f812012-06-05 03:42:12 +00001879 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001880 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1881 * and the Rx can be safely reset.
1882 */
1883 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1884 res &= 0x7f807f80;
1885 if ((res & 0xffff) == (res >> 16))
1886 return 1;
1887
1888 return 0;
1889}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001890
1891/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001892static void gfar_halt_nodisable(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893{
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001894 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 u32 tempval;
Claudiu Manoila4feee82014-10-07 10:44:34 +03001896 unsigned int timeout;
1897 int stopped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001899 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
Claudiu Manoila4feee82014-10-07 10:44:34 +03001901 if (gfar_is_dma_stopped(priv))
1902 return;
1903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001905 tempval = gfar_read(&regs->dmactrl);
Claudiu Manoila4feee82014-10-07 10:44:34 +03001906 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1907 gfar_write(&regs->dmactrl, tempval);
Anton Vorontsov511d9342010-06-30 06:39:15 +00001908
Claudiu Manoila4feee82014-10-07 10:44:34 +03001909retry:
1910 timeout = 1000;
1911 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1912 cpu_relax();
1913 timeout--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 }
Claudiu Manoila4feee82014-10-07 10:44:34 +03001915
1916 if (!timeout)
1917 stopped = gfar_is_dma_stopped(priv);
1918
1919 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1920 !__gfar_is_rx_idle(priv))
1921 goto retry;
Scott Woodd87eb122008-07-11 18:04:45 -05001922}
Scott Woodd87eb122008-07-11 18:04:45 -05001923
1924/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001925void gfar_halt(struct gfar_private *priv)
Scott Woodd87eb122008-07-11 18:04:45 -05001926{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001927 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001928 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001930 /* Dissable the Rx/Tx hw queues */
1931 gfar_write(&regs->rqueue, 0);
1932 gfar_write(&regs->tqueue, 0);
Scott Wood2a54adc2008-08-12 15:10:46 -05001933
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001934 mdelay(10);
1935
1936 gfar_halt_nodisable(priv);
1937
1938 /* Disable Rx/Tx DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 tempval = gfar_read(&regs->maccfg1);
1940 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1941 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001942}
1943
1944void stop_gfar(struct net_device *dev)
1945{
1946 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001947
Claudiu Manoil08511332014-02-24 12:13:45 +02001948 netif_tx_stop_all_queues(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001949
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001950 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02001951 set_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001952 smp_mb__after_atomic();
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001953
Claudiu Manoil08511332014-02-24 12:13:45 +02001954 disable_napi(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001955
Claudiu Manoil08511332014-02-24 12:13:45 +02001956 /* disable ints and gracefully shut down Rx/Tx DMA */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001957 gfar_halt(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02001959 phy_stop(dev->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001964static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001967 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001968 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001970 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001972 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1973 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001974 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
Claudiu Manoila7312d52015-03-13 10:36:28 +02001976 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1977 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001978 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001979 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001980 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001981 txbdp++;
Claudiu Manoila7312d52015-03-13 10:36:28 +02001982 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1983 be16_to_cpu(txbdp->length),
1984 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001986 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001987 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1988 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001990 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001991 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001992}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001994static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1995{
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001996 int i;
1997
Claudiu Manoil75354142015-07-13 16:22:06 +03001998 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1999
2000 if (rx_queue->skb)
2001 dev_kfree_skb(rx_queue->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002003 for (i = 0; i < rx_queue->rx_ring_size; i++) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002004 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
2005
Anton Vorontsove69edd22009-10-12 06:00:30 +00002006 rxbdp->lstatus = 0;
2007 rxbdp->bufPtr = 0;
2008 rxbdp++;
Claudiu Manoil75354142015-07-13 16:22:06 +03002009
2010 if (!rxb->page)
2011 continue;
2012
Arseny Solokha4af0e5b2017-01-29 19:52:20 +07002013 dma_unmap_page(rx_queue->dev, rxb->dma,
2014 PAGE_SIZE, DMA_FROM_DEVICE);
Claudiu Manoil75354142015-07-13 16:22:06 +03002015 __free_page(rxb->page);
2016
2017 rxb->page = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002019
2020 kfree(rx_queue->rx_buff);
2021 rx_queue->rx_buff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002022}
Anton Vorontsove69edd22009-10-12 06:00:30 +00002023
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002024/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00002025 * Then free tx_skbuff and rx_skbuff
2026 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002027static void free_skb_resources(struct gfar_private *priv)
2028{
2029 struct gfar_priv_tx_q *tx_queue = NULL;
2030 struct gfar_priv_rx_q *rx_queue = NULL;
2031 int i;
2032
2033 /* Go through all the buffer descriptors and free their data buffers */
2034 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002035 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002036
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002037 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002038 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002039 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002040 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002041 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002042 }
2043
2044 for (i = 0; i < priv->num_rx_queues; i++) {
2045 rx_queue = priv->rx_queue[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002046 if (rx_queue->rx_buff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002047 free_skb_rx_queue(rx_queue);
2048 }
2049
Claudiu Manoil369ec162013-02-14 05:00:02 +00002050 dma_free_coherent(priv->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002051 sizeof(struct txbd8) * priv->total_tx_ring_size +
2052 sizeof(struct rxbd8) * priv->total_rx_ring_size,
2053 priv->tx_queue[0]->tx_bd_base,
2054 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055}
2056
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002057void gfar_start(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002058{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002059 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002060 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002061 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002062
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002063 /* Enable Rx/Tx hw queues */
2064 gfar_write(&regs->rqueue, priv->rqueue);
2065 gfar_write(&regs->tqueue, priv->tqueue);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002066
2067 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002068 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002069 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002070 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002071
Kumar Gala0bbaf062005-06-20 10:54:21 -05002072 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002073 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002074 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002075 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002076
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002077 for (i = 0; i < priv->num_grps; i++) {
2078 regs = priv->gfargrp[i].regs;
2079 /* Clear THLT/RHLT, so that the DMA starts polling now */
2080 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2081 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002082 }
Dai Haruki12dea572008-12-16 15:30:20 -08002083
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002084 /* Enable Rx/Tx DMA */
2085 tempval = gfar_read(&regs->maccfg1);
2086 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2087 gfar_write(&regs->maccfg1, tempval);
2088
Claudiu Manoilefeddce2014-02-17 12:53:17 +02002089 gfar_ints_enable(priv);
2090
Florian Westphal860e9532016-05-03 16:33:13 +02002091 netif_trans_update(priv->ndev); /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002092}
2093
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002094static void free_grp_irqs(struct gfar_priv_grp *grp)
2095{
2096 free_irq(gfar_irq(grp, TX)->irq, grp);
2097 free_irq(gfar_irq(grp, RX)->irq, grp);
2098 free_irq(gfar_irq(grp, ER)->irq, grp);
2099}
2100
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002101static int register_grp_irqs(struct gfar_priv_grp *grp)
2102{
2103 struct gfar_private *priv = grp->priv;
2104 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00002105 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00002108 * them. Otherwise, only register for the one
2109 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002110 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002111 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00002112 * Transmit, and Receive
2113 */
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002114 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002115 gfar_irq(grp, ER)->name, grp);
2116 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002117 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002118 gfar_irq(grp, ER)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002119
Julia Lawall2145f1a2010-08-05 10:26:20 +00002120 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002122 enable_irq_wake(gfar_irq(grp, ER)->irq);
2123
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002124 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2125 gfar_irq(grp, TX)->name, grp);
2126 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002127 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002128 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 goto tx_irq_fail;
2130 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002131 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2132 gfar_irq(grp, RX)->name, grp);
2133 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002134 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002135 gfar_irq(grp, RX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 goto rx_irq_fail;
2137 }
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002138 enable_irq_wake(gfar_irq(grp, RX)->irq);
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 } else {
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002141 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002142 gfar_irq(grp, TX)->name, grp);
2143 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002144 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002145 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 goto err_irq_fail;
2147 }
Sudeep Hollad5b8d642015-09-21 16:47:09 +01002148 enable_irq_wake(gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 }
2150
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002151 return 0;
2152
2153rx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002154 free_irq(gfar_irq(grp, TX)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002155tx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002156 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002157err_irq_fail:
2158 return err;
2159
2160}
2161
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002162static void gfar_free_irq(struct gfar_private *priv)
2163{
2164 int i;
2165
2166 /* Free the IRQs */
2167 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2168 for (i = 0; i < priv->num_grps; i++)
2169 free_grp_irqs(&priv->gfargrp[i]);
2170 } else {
2171 for (i = 0; i < priv->num_grps; i++)
2172 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2173 &priv->gfargrp[i]);
2174 }
2175}
2176
2177static int gfar_request_irq(struct gfar_private *priv)
2178{
2179 int err, i, j;
2180
2181 for (i = 0; i < priv->num_grps; i++) {
2182 err = register_grp_irqs(&priv->gfargrp[i]);
2183 if (err) {
2184 for (j = 0; j < i; j++)
2185 free_grp_irqs(&priv->gfargrp[j]);
2186 return err;
2187 }
2188 }
2189
2190 return 0;
2191}
2192
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002193/* Bring the controller up and running */
2194int startup_gfar(struct net_device *ndev)
2195{
2196 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002197 int err;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002198
Claudiu Manoila328ac92014-02-24 12:13:42 +02002199 gfar_mac_reset(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002200
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002201 err = gfar_alloc_skb_resources(ndev);
2202 if (err)
2203 return err;
2204
Claudiu Manoila328ac92014-02-24 12:13:42 +02002205 gfar_init_tx_rx_base(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002206
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002207 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002208 clear_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002209 smp_mb__after_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002210
2211 /* Start Rx/Tx DMA and enable the interrupts */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002212 gfar_start(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Claudiu Manoil2a4eebf2015-08-13 16:50:37 +03002214 /* force link state update after mac reset */
2215 priv->oldlink = 0;
2216 priv->oldspeed = 0;
2217 priv->oldduplex = -1;
2218
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02002219 phy_start(ndev->phydev);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00002220
Claudiu Manoil08511332014-02-24 12:13:45 +02002221 enable_napi(priv);
2222
2223 netif_tx_wake_all_queues(ndev);
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226}
2227
Jan Ceuleers0977f812012-06-05 03:42:12 +00002228/* Called when something needs to use the ethernet device
2229 * Returns 0 for success.
2230 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231static int gfar_enet_open(struct net_device *dev)
2232{
Li Yang94e8cc32007-10-12 21:53:51 +08002233 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 int err;
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 err = init_phy(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002237 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 return err;
2239
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002240 err = gfar_request_irq(priv);
2241 if (err)
2242 return err;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002245 if (err)
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04002246 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 return err;
2249}
2250
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002251static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002252{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002253 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07002254
2255 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002256
Kumar Gala0bbaf062005-06-20 10:54:21 -05002257 return fcb;
2258}
2259
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002260static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002261 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002262{
Kumar Gala0bbaf062005-06-20 10:54:21 -05002263 /* If we're here, it's a IP packet with a TCP or UDP
2264 * payload. We set it to checksum, using a pseudo-header
2265 * we provide
2266 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002267 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002268
Jan Ceuleers0977f812012-06-05 03:42:12 +00002269 /* Tell the controller what the protocol is
2270 * And provide the already calculated phcs
2271 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002272 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002273 flags |= TXFCB_UDP;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002274 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002275 } else
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002276 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002277
2278 /* l3os is the distance between the start of the
2279 * frame (skb->data) and the start of the IP hdr.
2280 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002281 * l3 hdr and the l4 hdr
2282 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002283 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002284 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002285
Andy Fleming7f7f5312005-11-11 12:38:59 -06002286 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002287}
2288
Arnd Bergmann278af572016-06-16 15:52:13 +02002289static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002290{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002291 fcb->flags |= TXFCB_VLN;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002292 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
Kumar Gala0bbaf062005-06-20 10:54:21 -05002293}
2294
Dai Haruki4669bc92008-12-17 16:51:04 -08002295static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002296 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002297{
2298 struct txbd8 *new_bd = bdp + stride;
2299
2300 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2301}
2302
2303static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002304 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002305{
2306 return skip_txbd(bdp, 1, base, ring_size);
2307}
2308
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002309/* eTSEC12: csum generation not supported for some fcb offsets */
2310static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2311 unsigned long fcb_addr)
2312{
2313 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2314 (fcb_addr % 0x20) > 0x18);
2315}
2316
2317/* eTSEC76: csum generation for frames larger than 2500 may
2318 * cause excess delays before start of transmission
2319 */
2320static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2321 unsigned int len)
2322{
2323 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2324 (len > 2500));
2325}
2326
Jan Ceuleers0977f812012-06-05 03:42:12 +00002327/* This is called by the kernel when a frame is ready for transmission.
2328 * It is pointed to by the dev->hard_start_xmit function pointer
2329 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2331{
2332 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002333 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002334 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002335 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002336 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002337 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002338 u32 lstatus;
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002339 skb_frag_t *frag;
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002340 int i, rq = 0;
2341 int do_tstamp, do_csum, do_vlan;
Dai Haruki4669bc92008-12-17 16:51:04 -08002342 u32 bufaddr;
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002343 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002344
2345 rq = skb->queue_mapping;
2346 tx_queue = priv->tx_queue[rq];
2347 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002348 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002349 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002350
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002351 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002352 do_vlan = skb_vlan_tag_present(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002353 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2354 priv->hwts_tx_en;
2355
2356 if (do_csum || do_vlan)
2357 fcb_len = GMAC_FCB_LEN;
2358
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002359 /* check if time stamp should be generated */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002360 if (unlikely(do_tstamp))
2361 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Dai Haruki4669bc92008-12-17 16:51:04 -08002362
Li Yang5b28bea2009-03-27 15:54:30 -07002363 /* make space for additional header when fcb is needed */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002364 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002365 struct sk_buff *skb_new;
2366
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002367 skb_new = skb_realloc_headroom(skb, fcb_len);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002368 if (!skb_new) {
2369 dev->stats.tx_errors++;
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002370 dev_kfree_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002371 return NETDEV_TX_OK;
2372 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002373
Eric Dumazet313b0372012-07-05 11:45:13 +00002374 if (skb->sk)
2375 skb_set_owner_w(skb_new, skb->sk);
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002376 dev_consume_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002377 skb = skb_new;
2378 }
2379
Dai Haruki4669bc92008-12-17 16:51:04 -08002380 /* total number of fragments in the SKB */
2381 nr_frags = skb_shinfo(skb)->nr_frags;
2382
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002383 /* calculate the required number of TxBDs for this skb */
2384 if (unlikely(do_tstamp))
2385 nr_txbds = nr_frags + 2;
2386 else
2387 nr_txbds = nr_frags + 1;
2388
Dai Haruki4669bc92008-12-17 16:51:04 -08002389 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002390 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002391 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002392 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002393 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002394 return NETDEV_TX_BUSY;
2395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
2397 /* Update transmit stats */
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002398 bytes_sent = skb->len;
2399 tx_queue->stats.tx_bytes += bytes_sent;
2400 /* keep Tx bytes on wire for BQL accounting */
2401 GFAR_CB(skb)->bytes_sent = bytes_sent;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002402 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002404 txbdp = txbdp_start = tx_queue->cur_tx;
Claudiu Manoila7312d52015-03-13 10:36:28 +02002405 lstatus = be32_to_cpu(txbdp->lstatus);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002406
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002407 /* Add TxPAL between FCB and frame if required */
2408 if (unlikely(do_tstamp)) {
2409 skb_push(skb, GMAC_TXPAL_LEN);
2410 memset(skb->data, 0, GMAC_TXPAL_LEN);
2411 }
2412
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002413 /* Add TxFCB if required */
2414 if (fcb_len) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002415 fcb = gfar_add_fcb(skb);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002416 lstatus |= BD_LFLAG(TXBD_TOE);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002417 }
2418
2419 /* Set up checksumming */
2420 if (do_csum) {
2421 gfar_tx_checksum(skb, fcb, fcb_len);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002422
2423 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2424 unlikely(gfar_csum_errata_76(priv, skb->len))) {
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002425 __skb_pull(skb, GMAC_FCB_LEN);
2426 skb_checksum_help(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002427 if (do_vlan || do_tstamp) {
2428 /* put back a new fcb for vlan/tstamp TOE */
2429 fcb = gfar_add_fcb(skb);
2430 } else {
2431 /* Tx TOE not used */
2432 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2433 fcb = NULL;
2434 }
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002435 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002436 }
2437
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002438 if (do_vlan)
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002439 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002440
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002441 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2442 DMA_TO_DEVICE);
2443 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2444 goto dma_map_err;
2445
Claudiu Manoila7312d52015-03-13 10:36:28 +02002446 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
Claudiu Manoile19d0832016-02-23 11:48:37 +02002448 /* Time stamp insertion requires one additional TxBD */
2449 if (unlikely(do_tstamp))
2450 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2451 tx_queue->tx_ring_size);
2452
Claudiu Manoil48963b42016-02-23 11:48:39 +02002453 if (likely(!nr_frags)) {
Yangbo Lu9c8b0772016-06-02 17:36:28 +08002454 if (likely(!do_tstamp))
2455 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002456 } else {
2457 u32 lstatus_start = lstatus;
2458
2459 /* Place the fragment addresses and lengths into the TxBDs */
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002460 frag = &skb_shinfo(skb)->frags[0];
2461 for (i = 0; i < nr_frags; i++, frag++) {
2462 unsigned int size;
2463
Claudiu Manoile19d0832016-02-23 11:48:37 +02002464 /* Point at the next BD, wrapping as needed */
2465 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2466
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002467 size = skb_frag_size(frag);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002468
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002469 lstatus = be32_to_cpu(txbdp->lstatus) | size |
Claudiu Manoile19d0832016-02-23 11:48:37 +02002470 BD_LFLAG(TXBD_READY);
2471
2472 /* Handle the last BD specially */
2473 if (i == nr_frags - 1)
2474 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2475
Claudiu Manoil42f397a2016-02-23 11:48:38 +02002476 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
2477 size, DMA_TO_DEVICE);
Claudiu Manoile19d0832016-02-23 11:48:37 +02002478 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2479 goto dma_map_err;
2480
2481 /* set the TxBD length and buffer pointer */
2482 txbdp->bufPtr = cpu_to_be32(bufaddr);
2483 txbdp->lstatus = cpu_to_be32(lstatus);
2484 }
2485
2486 lstatus = lstatus_start;
2487 }
2488
Jan Ceuleers0977f812012-06-05 03:42:12 +00002489 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002490 * first TxBD points to the FCB and must have a data length of
2491 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2492 * the full frame length.
2493 */
2494 if (unlikely(do_tstamp)) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002495 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2496
2497 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2498 bufaddr += fcb_len;
Claudiu Manoil48963b42016-02-23 11:48:39 +02002499
Claudiu Manoila7312d52015-03-13 10:36:28 +02002500 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2501 (skb_headlen(skb) - fcb_len);
Claudiu Manoil48963b42016-02-23 11:48:39 +02002502 if (!nr_frags)
2503 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002504
2505 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2506 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002507 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
Claudiu Manoile19d0832016-02-23 11:48:37 +02002508
2509 /* Setup tx hardware time stamping */
2510 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2511 fcb->ptp = 1;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002512 } else {
2513 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002516 netdev_tx_sent_queue(txq, bytes_sent);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002517
Claudiu Manoild55398b2014-10-07 10:44:35 +03002518 gfar_wmb();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002519
Claudiu Manoila7312d52015-03-13 10:36:28 +02002520 txbdp_start->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002521
Claudiu Manoild55398b2014-10-07 10:44:35 +03002522 gfar_wmb(); /* force lstatus write before tx_skbuff */
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002523
2524 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2525
Dai Haruki4669bc92008-12-17 16:51:04 -08002526 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002527 * (wrapping if necessary)
2528 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002529 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002530 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002531
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002532 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002533
Claudiu Manoilbc602282015-05-06 18:07:29 +03002534 /* We can work in parallel with gfar_clean_tx_ring(), except
2535 * when modifying num_txbdfree. Note that we didn't grab the lock
2536 * when we were reading the num_txbdfree and checking for available
2537 * space, that's because outside of this function it can only grow.
2538 */
2539 spin_lock_bh(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002540 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002541 tx_queue->num_txbdfree -= (nr_txbds);
Claudiu Manoilbc602282015-05-06 18:07:29 +03002542 spin_unlock_bh(&tx_queue->txlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
2544 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002545 * are full. We need to tell the kernel to stop sending us stuff.
2546 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002547 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002548 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002550 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 }
2552
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002554 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002556 return NETDEV_TX_OK;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002557
2558dma_map_err:
2559 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2560 if (do_tstamp)
2561 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2562 for (i = 0; i < nr_frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002563 lstatus = be32_to_cpu(txbdp->lstatus);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002564 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2565 break;
2566
Claudiu Manoila7312d52015-03-13 10:36:28 +02002567 lstatus &= ~BD_LFLAG(TXBD_READY);
2568 txbdp->lstatus = cpu_to_be32(lstatus);
2569 bufaddr = be32_to_cpu(txbdp->bufPtr);
2570 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002571 DMA_TO_DEVICE);
2572 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2573 }
2574 gfar_wmb();
2575 dev_kfree_skb_any(skb);
2576 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577}
2578
2579/* Stops the kernel queue, and halts the controller */
2580static int gfar_close(struct net_device *dev)
2581{
2582 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002583
Sebastian Siewiorab939902008-08-19 21:12:45 +02002584 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 stop_gfar(dev);
2586
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002587 /* Disconnect from the PHY */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02002588 phy_disconnect(dev->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002590 gfar_free_irq(priv);
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 return 0;
2593}
2594
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002596static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002598 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
2600 return 0;
2601}
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2604{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
Claudiu Manoil08511332014-02-24 12:13:45 +02002607 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2608 cpu_relax();
2609
Claudiu Manoil88302642014-02-24 12:13:43 +02002610 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 stop_gfar(dev);
2612
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 dev->mtu = new_mtu;
2614
Claudiu Manoil88302642014-02-24 12:13:43 +02002615 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 startup_gfar(dev);
2617
Claudiu Manoil08511332014-02-24 12:13:45 +02002618 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2619
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 return 0;
2621}
2622
Claudiu Manoil08511332014-02-24 12:13:45 +02002623void reset_gfar(struct net_device *ndev)
2624{
2625 struct gfar_private *priv = netdev_priv(ndev);
2626
2627 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2628 cpu_relax();
2629
2630 stop_gfar(ndev);
2631 startup_gfar(ndev);
2632
2633 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2634}
2635
Sebastian Siewiorab939902008-08-19 21:12:45 +02002636/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 * transmitted after a set amount of time.
2638 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002639 * starting over will fix the problem.
2640 */
2641static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002643 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002644 reset_task);
Claudiu Manoil08511332014-02-24 12:13:45 +02002645 reset_gfar(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646}
2647
Sebastian Siewiorab939902008-08-19 21:12:45 +02002648static void gfar_timeout(struct net_device *dev)
2649{
2650 struct gfar_private *priv = netdev_priv(dev);
2651
2652 dev->stats.tx_errors++;
2653 schedule_work(&priv->reset_task);
2654}
2655
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656/* Interrupt Handler for Transmit complete */
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002657static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002659 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002660 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002661 struct gfar_private *priv = netdev_priv(dev);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002662 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002663 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002664 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002665 struct sk_buff *skb;
2666 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002667 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002668 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002669 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002670 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002671 int tqi = tx_queue->qindex;
2672 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002673 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002674 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002676 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002677 bdp = tx_queue->dirty_tx;
2678 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002679
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002680 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002681
Dai Haruki4669bc92008-12-17 16:51:04 -08002682 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002683
Jan Ceuleers0977f812012-06-05 03:42:12 +00002684 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002685 * Also, we need to dma_unmap_single() the TxPAL.
2686 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002687 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002688 nr_txbds = frags + 2;
2689 else
2690 nr_txbds = frags + 1;
2691
2692 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002693
Claudiu Manoila7312d52015-03-13 10:36:28 +02002694 lstatus = be32_to_cpu(lbdp->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002695
2696 /* Only clean completed frames */
2697 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002698 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 break;
2700
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002701 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002702 next = next_txbd(bdp, base, tx_ring_size);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002703 buflen = be16_to_cpu(next->length) +
2704 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002705 } else
Claudiu Manoila7312d52015-03-13 10:36:28 +02002706 buflen = be16_to_cpu(bdp->length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002707
Claudiu Manoila7312d52015-03-13 10:36:28 +02002708 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002709 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002710
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002711 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002712 struct skb_shared_hwtstamps shhwtstamps;
Scott Woodb4b67f22015-07-29 16:13:06 +03002713 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2714 ~0x7UL);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002715
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002716 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
Yangbo Luf54af122016-02-24 17:26:56 +08002717 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002718 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002719 skb_tstamp_tx(skb, &shhwtstamps);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002720 gfar_clear_txbd_status(bdp);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002721 bdp = next;
2722 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002723
Claudiu Manoila7312d52015-03-13 10:36:28 +02002724 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002725 bdp = next_txbd(bdp, base, tx_ring_size);
2726
2727 for (i = 0; i < frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002728 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2729 be16_to_cpu(bdp->length),
2730 DMA_TO_DEVICE);
2731 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002732 bdp = next_txbd(bdp, base, tx_ring_size);
2733 }
2734
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002735 bytes_sent += GFAR_CB(skb)->bytes_sent;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002736
Eric Dumazetacb600d2012-10-05 06:23:55 +00002737 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002738
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002739 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002740
2741 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002742 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002743
Dai Harukid080cd62008-04-09 19:37:51 -05002744 howmany++;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002745 spin_lock(&tx_queue->txlock);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002746 tx_queue->num_txbdfree += nr_txbds;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002747 spin_unlock(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Dai Haruki4669bc92008-12-17 16:51:04 -08002750 /* If we freed a buffer, we can restart transmission, if necessary */
Claudiu Manoil08511332014-02-24 12:13:45 +02002751 if (tx_queue->num_txbdfree &&
2752 netif_tx_queue_stopped(txq) &&
2753 !(test_bit(GFAR_DOWN, &priv->state)))
2754 netif_wake_subqueue(priv->ndev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Dai Haruki4669bc92008-12-17 16:51:04 -08002756 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002757 tx_queue->skb_dirtytx = skb_dirtytx;
2758 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002760 netdev_tx_completed_queue(txq, howmany, bytes_sent);
Dai Harukid080cd62008-04-09 19:37:51 -05002761}
2762
Claudiu Manoil75354142015-07-13 16:22:06 +03002763static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002764{
Claudiu Manoil75354142015-07-13 16:22:06 +03002765 struct page *page;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002766 dma_addr_t addr;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002767
Claudiu Manoil75354142015-07-13 16:22:06 +03002768 page = dev_alloc_page();
2769 if (unlikely(!page))
2770 return false;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002771
Claudiu Manoil75354142015-07-13 16:22:06 +03002772 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
2773 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
2774 __free_page(page);
Eran Libertyacbc0f02010-07-07 15:54:54 -07002775
Claudiu Manoil75354142015-07-13 16:22:06 +03002776 return false;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002777 }
2778
Claudiu Manoil75354142015-07-13 16:22:06 +03002779 rxb->dma = addr;
2780 rxb->page = page;
2781 rxb->page_offset = 0;
2782
2783 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784}
2785
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002786static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
2787{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002788 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002789 struct gfar_extra_stats *estats = &priv->extra_stats;
2790
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002791 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002792 atomic64_inc(&estats->rx_alloc_err);
2793}
2794
2795static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2796 int alloc_cnt)
2797{
Claudiu Manoil75354142015-07-13 16:22:06 +03002798 struct rxbd8 *bdp;
2799 struct gfar_rx_buff *rxb;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002800 int i;
2801
2802 i = rx_queue->next_to_use;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002803 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoil75354142015-07-13 16:22:06 +03002804 rxb = &rx_queue->rx_buff[i];
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002805
2806 while (alloc_cnt--) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002807 /* try reuse page */
2808 if (unlikely(!rxb->page)) {
2809 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002810 gfar_rx_alloc_err(rx_queue);
2811 break;
2812 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002813 }
2814
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002815 /* Setup the new RxBD */
Claudiu Manoil75354142015-07-13 16:22:06 +03002816 gfar_init_rxbdp(rx_queue, bdp,
2817 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002818
2819 /* Update to the next pointer */
Claudiu Manoil75354142015-07-13 16:22:06 +03002820 bdp++;
2821 rxb++;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002822
Claudiu Manoil75354142015-07-13 16:22:06 +03002823 if (unlikely(++i == rx_queue->rx_ring_size)) {
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002824 i = 0;
Claudiu Manoil75354142015-07-13 16:22:06 +03002825 bdp = rx_queue->rx_bd_base;
2826 rxb = rx_queue->rx_buff;
2827 }
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002828 }
2829
2830 rx_queue->next_to_use = i;
Claudiu Manoil75354142015-07-13 16:22:06 +03002831 rx_queue->next_to_alloc = i;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03002832}
2833
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002834static void count_errors(u32 lstatus, struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03002836 struct gfar_private *priv = netdev_priv(ndev);
2837 struct net_device_stats *stats = &ndev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 struct gfar_extra_stats *estats = &priv->extra_stats;
2839
Jan Ceuleers0977f812012-06-05 03:42:12 +00002840 /* If the packet was truncated, none of the other errors matter */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002841 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 stats->rx_length_errors++;
2843
Paul Gortmaker212079d2013-02-12 15:38:19 -05002844 atomic64_inc(&estats->rx_trunc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 return;
2847 }
2848 /* Count the errors, if there were any */
Claudiu Manoilf9660822015-07-13 16:22:04 +03002849 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 stats->rx_length_errors++;
2851
Claudiu Manoilf9660822015-07-13 16:22:04 +03002852 if (lstatus & BD_LFLAG(RXBD_LARGE))
Paul Gortmaker212079d2013-02-12 15:38:19 -05002853 atomic64_inc(&estats->rx_large);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 else
Paul Gortmaker212079d2013-02-12 15:38:19 -05002855 atomic64_inc(&estats->rx_short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002857 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 stats->rx_frame_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002859 atomic64_inc(&estats->rx_nonoctet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002861 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002862 atomic64_inc(&estats->rx_crcerr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 stats->rx_crc_errors++;
2864 }
Claudiu Manoilf9660822015-07-13 16:22:04 +03002865 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002866 atomic64_inc(&estats->rx_overrun);
Claudiu Manoilf9660822015-07-13 16:22:04 +03002867 stats->rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 }
2869}
2870
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002871irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872{
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002873 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2874 unsigned long flags;
Claudiu Manoil3e905b82015-10-05 17:19:59 +03002875 u32 imask, ievent;
2876
2877 ievent = gfar_read(&grp->regs->ievent);
2878
2879 if (unlikely(ievent & IEVENT_FGPI)) {
2880 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2881 return IRQ_HANDLED;
2882 }
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002883
2884 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2885 spin_lock_irqsave(&grp->grplock, flags);
2886 imask = gfar_read(&grp->regs->imask);
2887 imask &= IMASK_RX_DISABLED;
2888 gfar_write(&grp->regs->imask, imask);
2889 spin_unlock_irqrestore(&grp->grplock, flags);
2890 __napi_schedule(&grp->napi_rx);
2891 } else {
2892 /* Clear IEVENT, so interrupts aren't called again
2893 * because of the packets that have already arrived.
2894 */
2895 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2896 }
2897
2898 return IRQ_HANDLED;
2899}
2900
2901/* Interrupt Handler for Transmit complete */
2902static irqreturn_t gfar_transmit(int irq, void *grp_id)
2903{
2904 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2905 unsigned long flags;
2906 u32 imask;
2907
2908 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2909 spin_lock_irqsave(&grp->grplock, flags);
2910 imask = gfar_read(&grp->regs->imask);
2911 imask &= IMASK_TX_DISABLED;
2912 gfar_write(&grp->regs->imask, imask);
2913 spin_unlock_irqrestore(&grp->grplock, flags);
2914 __napi_schedule(&grp->napi_tx);
2915 } else {
2916 /* Clear IEVENT, so interrupts aren't called again
2917 * because of the packets that have already arrived.
2918 */
2919 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2920 }
2921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 return IRQ_HANDLED;
2923}
2924
Claudiu Manoil75354142015-07-13 16:22:06 +03002925static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2926 struct sk_buff *skb, bool first)
2927{
2928 unsigned int size = lstatus & BD_LENGTH_MASK;
2929 struct page *page = rxb->page;
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002930 bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
Claudiu Manoil75354142015-07-13 16:22:06 +03002931
2932 /* Remove the FCS from the packet length */
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002933 if (last)
Claudiu Manoil75354142015-07-13 16:22:06 +03002934 size -= ETH_FCS_LEN;
2935
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002936 if (likely(first)) {
Claudiu Manoil75354142015-07-13 16:22:06 +03002937 skb_put(skb, size);
Zefir Kurtisi6c389fc2016-08-22 15:58:12 +02002938 } else {
2939 /* the last fragments' length contains the full frame length */
2940 if (last)
2941 size -= skb->len;
2942
2943 /* in case the last fragment consisted only of the FCS */
2944 if (size > 0)
2945 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2946 rxb->page_offset + RXBUF_ALIGNMENT,
2947 size, GFAR_RXB_TRUESIZE);
2948 }
Claudiu Manoil75354142015-07-13 16:22:06 +03002949
2950 /* try reuse page */
Eric Dumazet69fed992017-01-18 19:44:42 -08002951 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
Claudiu Manoil75354142015-07-13 16:22:06 +03002952 return false;
2953
2954 /* change offset to the other half */
2955 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2956
Joonsoo Kimfe896d12016-03-17 14:19:26 -07002957 page_ref_inc(page);
Claudiu Manoil75354142015-07-13 16:22:06 +03002958
2959 return true;
2960}
2961
2962static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2963 struct gfar_rx_buff *old_rxb)
2964{
2965 struct gfar_rx_buff *new_rxb;
2966 u16 nta = rxq->next_to_alloc;
2967
2968 new_rxb = &rxq->rx_buff[nta];
2969
2970 /* find next buf that can reuse a page */
2971 nta++;
2972 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2973
2974 /* copy page reference */
2975 *new_rxb = *old_rxb;
2976
2977 /* sync for use by the device */
2978 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2979 old_rxb->page_offset,
2980 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2981}
2982
2983static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2984 u32 lstatus, struct sk_buff *skb)
2985{
2986 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2987 struct page *page = rxb->page;
2988 bool first = false;
2989
2990 if (likely(!skb)) {
2991 void *buff_addr = page_address(page) + rxb->page_offset;
2992
2993 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2994 if (unlikely(!skb)) {
2995 gfar_rx_alloc_err(rx_queue);
2996 return NULL;
2997 }
2998 skb_reserve(skb, RXBUF_ALIGNMENT);
2999 first = true;
3000 }
3001
3002 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
3003 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
3004
3005 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
3006 /* reuse the free half of the page */
3007 gfar_reuse_rx_page(rx_queue, rxb);
3008 } else {
3009 /* page cannot be reused, unmap it */
3010 dma_unmap_page(rx_queue->dev, rxb->dma,
3011 PAGE_SIZE, DMA_FROM_DEVICE);
3012 }
3013
3014 /* clear rxb content */
3015 rxb->page = NULL;
3016
3017 return skb;
3018}
3019
Kumar Gala0bbaf062005-06-20 10:54:21 -05003020static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
3021{
3022 /* If valid headers were found, and valid sums
3023 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00003024 * checksumming is necessary. Otherwise, it is [FIXME]
3025 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003026 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
3027 (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05003028 skb->ip_summed = CHECKSUM_UNNECESSARY;
3029 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003030 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003031}
3032
Jan Ceuleers0977f812012-06-05 03:42:12 +00003033/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003034static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003036 struct gfar_private *priv = netdev_priv(ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003037 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
Dai Haruki2c2db482008-12-16 15:31:15 -08003039 /* fcb is at the beginning if exists */
3040 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041
Jan Ceuleers0977f812012-06-05 03:42:12 +00003042 /* Remove the FCB from the skb
3043 * Remove the padded bytes, if there are any
3044 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003045 if (priv->uses_rxfcb)
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003046 skb_pull(skb, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003047
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003048 /* Get receive timestamp from the skb */
3049 if (priv->hwts_rx_en) {
3050 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3051 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003052
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003053 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
Yangbo Luf54af122016-02-24 17:26:56 +08003054 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00003055 }
3056
3057 if (priv->padding)
3058 skb_pull(skb, priv->padding);
3059
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003060 if (ndev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08003061 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003062
Dai Haruki2c2db482008-12-16 15:31:15 -08003063 /* Tell the skb what kind of packet this is */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003064 skb->protocol = eth_type_trans(skb, ndev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003065
Patrick McHardyf6469682013-04-19 02:04:27 +00003066 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07003067 * Even if vlan rx accel is disabled, on some chips
3068 * RXFCB_VLN is pseudo randomly set.
3069 */
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003070 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
Claudiu Manoil26eb9372015-03-13 10:36:29 +02003071 be16_to_cpu(fcb->flags) & RXFCB_VLN)
3072 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3073 be16_to_cpu(fcb->vlctl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074}
3075
3076/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00003077 * until the budget/quota has been reached. Returns the number
3078 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003080int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081{
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003082 struct net_device *ndev = rx_queue->ndev;
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003083 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil75354142015-07-13 16:22:06 +03003084 struct rxbd8 *bdp;
3085 int i, howmany = 0;
3086 struct sk_buff *skb = rx_queue->skb;
3087 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
3088 unsigned int total_bytes = 0, total_pkts = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089
3090 /* Get the first full descriptor */
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003091 i = rx_queue->next_to_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003093 while (rx_work_limit--) {
Claudiu Manoilf9660822015-07-13 16:22:04 +03003094 u32 lstatus;
Dai Haruki2c2db482008-12-16 15:31:15 -08003095
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003096 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
3097 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3098 cleaned_cnt = 0;
3099 }
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003100
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003101 bdp = &rx_queue->rx_bd_base[i];
Claudiu Manoilf9660822015-07-13 16:22:04 +03003102 lstatus = be32_to_cpu(bdp->lstatus);
3103 if (lstatus & BD_LFLAG(RXBD_EMPTY))
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003104 break;
3105
3106 /* order rx buffer descriptor reads */
Scott Wood3b6330c2007-05-16 15:06:59 -05003107 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05003108
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003109 /* fetch next to clean buffer from the ring */
Claudiu Manoil75354142015-07-13 16:22:06 +03003110 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
3111 if (unlikely(!skb))
3112 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113
Claudiu Manoil75354142015-07-13 16:22:06 +03003114 cleaned_cnt++;
3115 howmany++;
Andy Fleming81183052008-11-12 10:07:11 -06003116
Claudiu Manoil75354142015-07-13 16:22:06 +03003117 if (unlikely(++i == rx_queue->rx_ring_size))
3118 i = 0;
Anton Vorontsov63b88b92010-06-11 10:51:03 +00003119
Claudiu Manoil75354142015-07-13 16:22:06 +03003120 rx_queue->next_to_clean = i;
3121
3122 /* fetch next buffer if not the last in frame */
3123 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
3124 continue;
3125
3126 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
Claudiu Manoilf23223f2015-07-13 16:22:05 +03003127 count_errors(lstatus, ndev);
Andy Fleming815b97c2008-04-22 17:18:29 -05003128
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003129 /* discard faulty buffer */
3130 dev_kfree_skb(skb);
Claudiu Manoil75354142015-07-13 16:22:06 +03003131 skb = NULL;
3132 rx_queue->stats.rx_dropped++;
3133 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 }
3135
Claudiu Manoil75354142015-07-13 16:22:06 +03003136 /* Increment the number of packets */
3137 total_pkts++;
3138 total_bytes += skb->len;
3139
3140 skb_record_rx_queue(skb, rx_queue->qindex);
3141
3142 gfar_process_frame(ndev, skb);
3143
3144 /* Send the packet up the stack */
3145 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
3146
3147 skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 }
3149
Claudiu Manoil75354142015-07-13 16:22:06 +03003150 /* Store incomplete frames for completion */
3151 rx_queue->skb = skb;
3152
3153 rx_queue->stats.rx_packets += total_pkts;
3154 rx_queue->stats.rx_bytes += total_bytes;
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003155
3156 if (cleaned_cnt)
3157 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
3158
3159 /* Update Last Free RxBD pointer for LFC */
3160 if (unlikely(priv->tx_actual_en)) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003161 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3162
3163 gfar_write(rx_queue->rfbptr, bdp_dma);
Claudiu Manoil76f31e82015-07-13 16:22:03 +03003164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 return howmany;
3167}
3168
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003169static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003170{
3171 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003172 container_of(napi, struct gfar_priv_grp, napi_rx);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003173 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003174 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003175 int work_done = 0;
3176
3177 /* Clear IEVENT, so interrupts aren't called again
3178 * because of the packets that have already arrived
3179 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003180 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003181
3182 work_done = gfar_clean_rx_ring(rx_queue, budget);
3183
3184 if (work_done < budget) {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003185 u32 imask;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003186 napi_complete(napi);
3187 /* Clear the halt bit in RSTAT */
3188 gfar_write(&regs->rstat, gfargrp->rstat);
3189
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003190 spin_lock_irq(&gfargrp->grplock);
3191 imask = gfar_read(&regs->imask);
3192 imask |= IMASK_RX_DEFAULT;
3193 gfar_write(&regs->imask, imask);
3194 spin_unlock_irq(&gfargrp->grplock);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03003195 }
3196
3197 return work_done;
3198}
3199
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003200static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003202 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003203 container_of(napi, struct gfar_priv_grp, napi_tx);
3204 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003205 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003206 u32 imask;
3207
3208 /* Clear IEVENT, so interrupts aren't called again
3209 * because of the packets that have already arrived
3210 */
3211 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3212
3213 /* run Tx cleanup to completion */
3214 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3215 gfar_clean_tx_ring(tx_queue);
3216
3217 napi_complete(napi);
3218
3219 spin_lock_irq(&gfargrp->grplock);
3220 imask = gfar_read(&regs->imask);
3221 imask |= IMASK_TX_DEFAULT;
3222 gfar_write(&regs->imask, imask);
3223 spin_unlock_irq(&gfargrp->grplock);
3224
3225 return 0;
3226}
3227
3228static int gfar_poll_rx(struct napi_struct *napi, int budget)
3229{
3230 struct gfar_priv_grp *gfargrp =
3231 container_of(napi, struct gfar_priv_grp, napi_rx);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003232 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003233 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003234 struct gfar_priv_rx_q *rx_queue = NULL;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003235 int work_done = 0, work_done_per_q = 0;
Claudiu Manoil39c0a0d2013-03-21 03:12:13 +00003236 int i, budget_per_q = 0;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003237 unsigned long rstat_rxf;
3238 int num_act_queues;
Dai Harukid080cd62008-04-09 19:37:51 -05003239
Dai Haruki8c7396a2008-12-17 16:52:00 -08003240 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00003241 * because of the packets that have already arrived
3242 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003243 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08003244
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003245 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3246
3247 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3248 if (num_act_queues)
3249 budget_per_q = budget/num_act_queues;
3250
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003251 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3252 /* skip queue if not active */
3253 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3254 continue;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003255
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003256 rx_queue = priv->rx_queue[i];
3257 work_done_per_q =
3258 gfar_clean_rx_ring(rx_queue, budget_per_q);
3259 work_done += work_done_per_q;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003260
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003261 /* finished processing this queue */
3262 if (work_done_per_q < budget_per_q) {
3263 /* clear active queue hw indication */
3264 gfar_write(&regs->rstat,
3265 RSTAT_CLEAR_RXF0 >> i);
3266 num_act_queues--;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003267
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003268 if (!num_act_queues)
3269 break;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003270 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003271 }
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003272
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003273 if (!num_act_queues) {
3274 u32 imask;
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003275 napi_complete(napi);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003276
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003277 /* Clear the halt bit in RSTAT */
3278 gfar_write(&regs->rstat, gfargrp->rstat);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003279
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003280 spin_lock_irq(&gfargrp->grplock);
3281 imask = gfar_read(&regs->imask);
3282 imask |= IMASK_RX_DEFAULT;
3283 gfar_write(&regs->imask, imask);
3284 spin_unlock_irq(&gfargrp->grplock);
Dai Harukid080cd62008-04-09 19:37:51 -05003285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003287 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003290static int gfar_poll_tx(struct napi_struct *napi, int budget)
3291{
3292 struct gfar_priv_grp *gfargrp =
3293 container_of(napi, struct gfar_priv_grp, napi_tx);
3294 struct gfar_private *priv = gfargrp->priv;
3295 struct gfar __iomem *regs = gfargrp->regs;
3296 struct gfar_priv_tx_q *tx_queue = NULL;
3297 int has_tx_work = 0;
3298 int i;
3299
3300 /* Clear IEVENT, so interrupts aren't called again
3301 * because of the packets that have already arrived
3302 */
3303 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3304
3305 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3306 tx_queue = priv->tx_queue[i];
3307 /* run Tx cleanup to completion */
3308 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3309 gfar_clean_tx_ring(tx_queue);
3310 has_tx_work = 1;
3311 }
3312 }
3313
3314 if (!has_tx_work) {
3315 u32 imask;
3316 napi_complete(napi);
3317
3318 spin_lock_irq(&gfargrp->grplock);
3319 imask = gfar_read(&regs->imask);
3320 imask |= IMASK_TX_DEFAULT;
3321 gfar_write(&regs->imask, imask);
3322 spin_unlock_irq(&gfargrp->grplock);
3323 }
3324
3325 return 0;
3326}
3327
3328
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003329#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00003330/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003331 * without having to re-enable interrupts. It's not called while
3332 * the interrupt routine is executing.
3333 */
3334static void gfar_netpoll(struct net_device *dev)
3335{
3336 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00003337 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003338
3339 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003340 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003341 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003342 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3343
3344 disable_irq(gfar_irq(grp, TX)->irq);
3345 disable_irq(gfar_irq(grp, RX)->irq);
3346 disable_irq(gfar_irq(grp, ER)->irq);
3347 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3348 enable_irq(gfar_irq(grp, ER)->irq);
3349 enable_irq(gfar_irq(grp, RX)->irq);
3350 enable_irq(gfar_irq(grp, TX)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003351 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003352 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003353 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003354 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3355
3356 disable_irq(gfar_irq(grp, TX)->irq);
3357 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3358 enable_irq(gfar_irq(grp, TX)->irq);
Anton Vorontsov43de0042009-12-09 02:52:19 -08003359 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003360 }
3361}
3362#endif
3363
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003365static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003367 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368
3369 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003370 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003373 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003374 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375
3376 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003377 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003378 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003380 /* Check for errors */
3381 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003382 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
3384 return IRQ_HANDLED;
3385}
3386
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387/* Called every time the controller might need to be made
3388 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003389 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 * function converts those variables into the appropriate
3391 * register values, and can bring down the device if needed.
3392 */
3393static void adjust_link(struct net_device *dev)
3394{
3395 struct gfar_private *priv = netdev_priv(dev);
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003396 struct phy_device *phydev = dev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003398 if (unlikely(phydev->link != priv->oldlink ||
Guenter Roeck0ae93b22015-03-02 12:03:27 -08003399 (phydev->link && (phydev->duplex != priv->oldduplex ||
3400 phydev->speed != priv->oldspeed))))
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003401 gfar_update_link_state(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003402}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
3404/* Update the hash table based on the current list of multicast
3405 * addresses we subscribe to. Also, change the promiscuity of
3406 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003407 * whenever dev->flags is changed
3408 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409static void gfar_set_multi(struct net_device *dev)
3410{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003411 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003413 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 u32 tempval;
3415
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003416 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 /* Set RCTRL to PROM */
3418 tempval = gfar_read(&regs->rctrl);
3419 tempval |= RCTRL_PROM;
3420 gfar_write(&regs->rctrl, tempval);
3421 } else {
3422 /* Set RCTRL to not PROM */
3423 tempval = gfar_read(&regs->rctrl);
3424 tempval &= ~(RCTRL_PROM);
3425 gfar_write(&regs->rctrl, tempval);
3426 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003427
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003428 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003430 gfar_write(&regs->igaddr0, 0xffffffff);
3431 gfar_write(&regs->igaddr1, 0xffffffff);
3432 gfar_write(&regs->igaddr2, 0xffffffff);
3433 gfar_write(&regs->igaddr3, 0xffffffff);
3434 gfar_write(&regs->igaddr4, 0xffffffff);
3435 gfar_write(&regs->igaddr5, 0xffffffff);
3436 gfar_write(&regs->igaddr6, 0xffffffff);
3437 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 gfar_write(&regs->gaddr0, 0xffffffff);
3439 gfar_write(&regs->gaddr1, 0xffffffff);
3440 gfar_write(&regs->gaddr2, 0xffffffff);
3441 gfar_write(&regs->gaddr3, 0xffffffff);
3442 gfar_write(&regs->gaddr4, 0xffffffff);
3443 gfar_write(&regs->gaddr5, 0xffffffff);
3444 gfar_write(&regs->gaddr6, 0xffffffff);
3445 gfar_write(&regs->gaddr7, 0xffffffff);
3446 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003447 int em_num;
3448 int idx;
3449
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003451 gfar_write(&regs->igaddr0, 0x0);
3452 gfar_write(&regs->igaddr1, 0x0);
3453 gfar_write(&regs->igaddr2, 0x0);
3454 gfar_write(&regs->igaddr3, 0x0);
3455 gfar_write(&regs->igaddr4, 0x0);
3456 gfar_write(&regs->igaddr5, 0x0);
3457 gfar_write(&regs->igaddr6, 0x0);
3458 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 gfar_write(&regs->gaddr0, 0x0);
3460 gfar_write(&regs->gaddr1, 0x0);
3461 gfar_write(&regs->gaddr2, 0x0);
3462 gfar_write(&regs->gaddr3, 0x0);
3463 gfar_write(&regs->gaddr4, 0x0);
3464 gfar_write(&regs->gaddr5, 0x0);
3465 gfar_write(&regs->gaddr6, 0x0);
3466 gfar_write(&regs->gaddr7, 0x0);
3467
Andy Fleming7f7f5312005-11-11 12:38:59 -06003468 /* If we have extended hash tables, we need to
3469 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003470 * setting them
3471 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003472 if (priv->extended_hash) {
3473 em_num = GFAR_EM_NUM + 1;
3474 gfar_clear_exact_match(dev);
3475 idx = 1;
3476 } else {
3477 idx = 0;
3478 em_num = 0;
3479 }
3480
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003481 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 return;
3483
3484 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003485 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003486 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003487 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003488 idx++;
3489 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003490 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 }
3492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493}
3494
Andy Fleming7f7f5312005-11-11 12:38:59 -06003495
3496/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003497 * don't interfere with normal reception
3498 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003499static void gfar_clear_exact_match(struct net_device *dev)
3500{
3501 int idx;
Joe Perches6a3c910c2011-11-16 09:38:02 +00003502 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003503
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003504 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003505 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003506}
3507
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508/* Set the appropriate hash bit for the given addr */
3509/* The algorithm works like so:
3510 * 1) Take the Destination Address (ie the multicast address), and
3511 * do a CRC on it (little endian), and reverse the bits of the
3512 * result.
3513 * 2) Use the 8 most significant bits as a hash into a 256-entry
3514 * table. The table is controlled through 8 32-bit registers:
3515 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3516 * gaddr7. This means that the 3 most significant bits in the
3517 * hash index which gaddr register to use, and the 5 other bits
3518 * indicate which bit (assuming an IBM numbering scheme, which
3519 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003520 * the entry.
3521 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3523{
3524 u32 tempval;
3525 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c910c2011-11-16 09:38:02 +00003526 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003527 int width = priv->hash_width;
3528 u8 whichbit = (result >> (32 - width)) & 0x1f;
3529 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 u32 value = (1 << (31-whichbit));
3531
Kumar Gala0bbaf062005-06-20 10:54:21 -05003532 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003534 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535}
3536
Andy Fleming7f7f5312005-11-11 12:38:59 -06003537
3538/* There are multiple MAC Address register pairs on some controllers
3539 * This function sets the numth pair to a given address
3540 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003541static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3542 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003543{
3544 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003545 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003546 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003547 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003548
3549 macptr += num*2;
3550
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003551 /* For a station address of 0x12345678ABCD in transmission
3552 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3553 * MACnADDR2 is set to 0x34120000.
Jan Ceuleers0977f812012-06-05 03:42:12 +00003554 */
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003555 tempval = (addr[5] << 24) | (addr[4] << 16) |
3556 (addr[3] << 8) | addr[2];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003557
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003558 gfar_write(macptr, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003559
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003560 tempval = (addr[1] << 24) | (addr[0] << 16);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003561
3562 gfar_write(macptr+1, tempval);
3563}
3564
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003566static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003568 struct gfar_priv_grp *gfargrp = grp_id;
3569 struct gfar __iomem *regs = gfargrp->regs;
3570 struct gfar_private *priv= gfargrp->priv;
3571 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572
3573 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003574 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
3576 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003577 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003578
3579 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003580 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003581 (events & IEVENT_MAG))
3582 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583
3584 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003585 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003586 netdev_dbg(dev,
3587 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003588 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
3590 /* Update the error counters */
3591 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003592 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593
3594 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003595 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003597 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 if (events & IEVENT_XFUN) {
Joe Perches59deab22011-06-14 08:57:47 +00003599 netif_dbg(priv, tx_err, dev,
3600 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003601 dev->stats.tx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003602 atomic64_inc(&priv->extra_stats.tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
Claudiu Manoilbc602282015-05-06 18:07:29 +03003604 schedule_work(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 }
Joe Perches59deab22011-06-14 08:57:47 +00003606 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 }
3608 if (events & IEVENT_BSY) {
Claudiu Manoil1de65a52015-10-23 11:42:00 +03003609 dev->stats.rx_over_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003610 atomic64_inc(&priv->extra_stats.rx_bsy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611
Joe Perches59deab22011-06-14 08:57:47 +00003612 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3613 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 }
3615 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003616 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003617 atomic64_inc(&priv->extra_stats.rx_babr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618
Joe Perches59deab22011-06-14 08:57:47 +00003619 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 }
3621 if (events & IEVENT_EBERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003622 atomic64_inc(&priv->extra_stats.eberr);
Joe Perches59deab22011-06-14 08:57:47 +00003623 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 }
Joe Perches59deab22011-06-14 08:57:47 +00003625 if (events & IEVENT_RXC)
3626 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627
3628 if (events & IEVENT_BABT) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003629 atomic64_inc(&priv->extra_stats.tx_babt);
Joe Perches59deab22011-06-14 08:57:47 +00003630 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 }
3632 return IRQ_HANDLED;
3633}
3634
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003635static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3636{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003637 struct net_device *ndev = priv->ndev;
3638 struct phy_device *phydev = ndev->phydev;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003639 u32 val = 0;
3640
3641 if (!phydev->duplex)
3642 return val;
3643
3644 if (!priv->pause_aneg_en) {
3645 if (priv->tx_pause_en)
3646 val |= MACCFG1_TX_FLOW;
3647 if (priv->rx_pause_en)
3648 val |= MACCFG1_RX_FLOW;
3649 } else {
3650 u16 lcl_adv, rmt_adv;
3651 u8 flowctrl;
3652 /* get link partner capabilities */
3653 rmt_adv = 0;
3654 if (phydev->pause)
3655 rmt_adv = LPA_PAUSE_CAP;
3656 if (phydev->asym_pause)
3657 rmt_adv |= LPA_PAUSE_ASYM;
3658
Pavaluca Matei-B4661043ef8d22014-10-27 10:42:43 +02003659 lcl_adv = 0;
3660 if (phydev->advertising & ADVERTISED_Pause)
3661 lcl_adv |= ADVERTISE_PAUSE_CAP;
3662 if (phydev->advertising & ADVERTISED_Asym_Pause)
3663 lcl_adv |= ADVERTISE_PAUSE_ASYM;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003664
3665 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3666 if (flowctrl & FLOW_CTRL_TX)
3667 val |= MACCFG1_TX_FLOW;
3668 if (flowctrl & FLOW_CTRL_RX)
3669 val |= MACCFG1_RX_FLOW;
3670 }
3671
3672 return val;
3673}
3674
3675static noinline void gfar_update_link_state(struct gfar_private *priv)
3676{
3677 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Philippe Reynes4c4a6b02016-05-16 01:30:08 +02003678 struct net_device *ndev = priv->ndev;
3679 struct phy_device *phydev = ndev->phydev;
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003680 struct gfar_priv_rx_q *rx_queue = NULL;
3681 int i;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003682
3683 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3684 return;
3685
3686 if (phydev->link) {
3687 u32 tempval1 = gfar_read(&regs->maccfg1);
3688 u32 tempval = gfar_read(&regs->maccfg2);
3689 u32 ecntrl = gfar_read(&regs->ecntrl);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003690 u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003691
3692 if (phydev->duplex != priv->oldduplex) {
3693 if (!(phydev->duplex))
3694 tempval &= ~(MACCFG2_FULL_DUPLEX);
3695 else
3696 tempval |= MACCFG2_FULL_DUPLEX;
3697
3698 priv->oldduplex = phydev->duplex;
3699 }
3700
3701 if (phydev->speed != priv->oldspeed) {
3702 switch (phydev->speed) {
3703 case 1000:
3704 tempval =
3705 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3706
3707 ecntrl &= ~(ECNTRL_R100);
3708 break;
3709 case 100:
3710 case 10:
3711 tempval =
3712 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3713
3714 /* Reduced mode distinguishes
3715 * between 10 and 100
3716 */
3717 if (phydev->speed == SPEED_100)
3718 ecntrl |= ECNTRL_R100;
3719 else
3720 ecntrl &= ~(ECNTRL_R100);
3721 break;
3722 default:
3723 netif_warn(priv, link, priv->ndev,
3724 "Ack! Speed (%d) is not 10/100/1000!\n",
3725 phydev->speed);
3726 break;
3727 }
3728
3729 priv->oldspeed = phydev->speed;
3730 }
3731
3732 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3733 tempval1 |= gfar_get_flowctrl_cfg(priv);
3734
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003735 /* Turn last free buffer recording on */
3736 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3737 for (i = 0; i < priv->num_rx_queues; i++) {
Scott Woodb4b67f22015-07-29 16:13:06 +03003738 u32 bdp_dma;
3739
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003740 rx_queue = priv->rx_queue[i];
Scott Woodb4b67f22015-07-29 16:13:06 +03003741 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
3742 gfar_write(rx_queue->rfbptr, bdp_dma);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003743 }
3744
3745 priv->tx_actual_en = 1;
3746 }
3747
3748 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3749 priv->tx_actual_en = 0;
3750
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003751 gfar_write(&regs->maccfg1, tempval1);
3752 gfar_write(&regs->maccfg2, tempval);
3753 gfar_write(&regs->ecntrl, ecntrl);
3754
3755 if (!priv->oldlink)
3756 priv->oldlink = 1;
3757
3758 } else if (priv->oldlink) {
3759 priv->oldlink = 0;
3760 priv->oldspeed = 0;
3761 priv->oldduplex = -1;
3762 }
3763
3764 if (netif_msg_link(priv))
3765 phy_print_status(phydev);
3766}
3767
Fabian Frederick94e5a2a2015-03-17 19:37:34 +01003768static const struct of_device_id gfar_match[] =
Andy Flemingb31a1d82008-12-16 15:29:15 -08003769{
3770 {
3771 .type = "network",
3772 .compatible = "gianfar",
3773 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003774 {
3775 .compatible = "fsl,etsec2",
3776 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003777 {},
3778};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003779MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003780
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003782static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003783 .driver = {
3784 .name = "fsl-gianfar",
Grant Likely40182942010-04-13 16:13:02 -07003785 .pm = GFAR_PM_OPS,
3786 .of_match_table = gfar_match,
3787 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788 .probe = gfar_probe,
3789 .remove = gfar_remove,
3790};
3791
Axel Lindb62f682011-11-27 16:44:17 +00003792module_platform_driver(gfar_driver);