blob: 3c84e5acd42dc03051b23f22f214ecbd30ae940f [file] [log] [blame]
Jan Ceuleers0977f812012-06-05 03:42:12 +00001/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06004 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Claudiu Manoil20862782014-02-17 12:53:14 +020012 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000013 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Andy Flemingb31a1d82008-12-16 15:29:15 -080028 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050033 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050038 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040042 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020044 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050045 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
Joe Perches59deab22011-06-14 08:57:47 +000064#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050077#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/spinlock.h>
79#include <linux/mm.h>
Rob Herring5af50732013-09-17 14:28:33 -050080#include <linux/of_address.h>
81#include <linux/of_irq.h>
Grant Likelyfe192a42009-04-25 12:53:12 +000082#include <linux/of_mdio.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -080083#include <linux/of_platform.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050084#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080087#include <linux/in.h>
Manfred Rudigiercc772ab2010-04-08 23:10:03 +000088#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90#include <asm/io.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030091#ifdef CONFIG_PPC
Anton Vorontsov7d350972010-06-30 06:39:12 +000092#include <asm/reg.h>
Claudiu Manoil2969b1f2013-10-09 20:20:41 +030093#include <asm/mpc85xx.h>
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +030094#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/irq.h>
96#include <asm/uaccess.h>
97#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/dma-mapping.h>
99#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400100#include <linux/mii.h>
101#include <linux/phy.h>
Andy Flemingb31a1d82008-12-16 15:29:15 -0800102#include <linux/phy_fixed.h>
103#include <linux/of.h>
David Daney4b6ba8a2010-10-26 15:07:13 -0700104#include <linux/of_net.h>
Claudiu Manoilfd31a952014-10-07 10:44:31 +0300105#include <linux/of_address.h>
106#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108#include "gianfar.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Andy Fleming7f7f5312005-11-11 12:38:59 -0600112const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static int gfar_enet_open(struct net_device *dev);
115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200116static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev);
Kevin Hao91c53f762014-12-24 14:05:44 +0800119static struct sk_buff *gfar_new_skb(struct net_device *dev,
120 dma_addr_t *bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int gfar_set_mac_address(struct net_device *dev);
122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100123static irqreturn_t gfar_error(int irq, void *dev_id);
124static irqreturn_t gfar_transmit(int irq, void *dev_id);
125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static void adjust_link(struct net_device *dev);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +0300127static noinline void gfar_update_link_state(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128static int init_phy(struct net_device *dev);
Grant Likely74888762011-02-22 21:05:51 -0700129static int gfar_probe(struct platform_device *ofdev);
Grant Likely2dc11582010-08-06 09:25:50 -0600130static int gfar_remove(struct platform_device *ofdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400131static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static void gfar_set_multi(struct net_device *dev);
133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500134static void gfar_configure_serdes(struct net_device *dev);
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200135static int gfar_poll_rx(struct napi_struct *napi, int budget);
136static int gfar_poll_tx(struct napi_struct *napi, int budget);
137static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300139#ifdef CONFIG_NET_POLL_CONTROLLER
140static void gfar_netpoll(struct net_device *dev);
141#endif
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000142int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
Claudiu Manoilc233cf402013-03-19 07:40:02 +0000143static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
Claudiu Manoil61db26c2013-02-14 05:00:05 +0000144static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
145 int amount_pull, struct napi_struct *napi);
Claudiu Manoilc10650b2014-02-17 12:53:18 +0200146static void gfar_halt_nodisable(struct gfar_private *priv);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600147static void gfar_clear_exact_match(struct net_device *dev);
Joe Perchesb6bc7652010-12-21 02:16:08 -0800148static void gfar_set_mac_for_addr(struct net_device *dev, int num,
149 const u8 *addr);
Andy Fleming26ccfc32009-03-10 12:58:28 +0000150static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152MODULE_AUTHOR("Freescale Semiconductor, Inc");
153MODULE_DESCRIPTION("Gianfar Ethernet Driver");
154MODULE_LICENSE("GPL");
155
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000156static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000157 dma_addr_t buf)
158{
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000159 u32 lstatus;
160
Claudiu Manoila7312d52015-03-13 10:36:28 +0200161 bdp->bufPtr = cpu_to_be32(buf);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000162
163 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000164 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000165 lstatus |= BD_LFLAG(RXBD_WRAP);
166
Claudiu Manoild55398b2014-10-07 10:44:35 +0300167 gfar_wmb();
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000168
Claudiu Manoila7312d52015-03-13 10:36:28 +0200169 bdp->lstatus = cpu_to_be32(lstatus);
Anton Vorontsov8a102fe2009-10-12 06:00:37 +0000170}
171
Anton Vorontsov87283272009-10-12 06:00:39 +0000172static int gfar_init_bds(struct net_device *ndev)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000173{
Anton Vorontsov87283272009-10-12 06:00:39 +0000174 struct gfar_private *priv = netdev_priv(ndev);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200175 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000176 struct gfar_priv_tx_q *tx_queue = NULL;
177 struct gfar_priv_rx_q *rx_queue = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000178 struct txbd8 *txbdp;
179 struct rxbd8 *rxbdp;
Kevin Hao03366a332014-12-24 14:05:45 +0800180 u32 __iomem *rfbptr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000181 int i, j;
Kevin Hao0a4b5a22014-12-11 14:08:41 +0800182 dma_addr_t bufaddr;
Anton Vorontsov87283272009-10-12 06:00:39 +0000183
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000184 for (i = 0; i < priv->num_tx_queues; i++) {
185 tx_queue = priv->tx_queue[i];
186 /* Initialize some variables in our dev structure */
187 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
188 tx_queue->dirty_tx = tx_queue->tx_bd_base;
189 tx_queue->cur_tx = tx_queue->tx_bd_base;
190 tx_queue->skb_curtx = 0;
191 tx_queue->skb_dirtytx = 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000192
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000193 /* Initialize Transmit Descriptor Ring */
194 txbdp = tx_queue->tx_bd_base;
195 for (j = 0; j < tx_queue->tx_ring_size; j++) {
196 txbdp->lstatus = 0;
197 txbdp->bufPtr = 0;
198 txbdp++;
Anton Vorontsov87283272009-10-12 06:00:39 +0000199 }
200
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000201 /* Set the last descriptor in the ring to indicate wrap */
202 txbdp--;
Claudiu Manoila7312d52015-03-13 10:36:28 +0200203 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
204 TXBD_WRAP);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000205 }
206
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200207 rfbptr = &regs->rfbptr0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000208 for (i = 0; i < priv->num_rx_queues; i++) {
209 rx_queue = priv->rx_queue[i];
210 rx_queue->cur_rx = rx_queue->rx_bd_base;
211 rx_queue->skb_currx = 0;
212 rxbdp = rx_queue->rx_bd_base;
213
214 for (j = 0; j < rx_queue->rx_ring_size; j++) {
215 struct sk_buff *skb = rx_queue->rx_skbuff[j];
216
217 if (skb) {
Claudiu Manoila7312d52015-03-13 10:36:28 +0200218 bufaddr = be32_to_cpu(rxbdp->bufPtr);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000219 } else {
Kevin Hao0a4b5a22014-12-11 14:08:41 +0800220 skb = gfar_new_skb(ndev, &bufaddr);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000221 if (!skb) {
Joe Perches59deab22011-06-14 08:57:47 +0000222 netdev_err(ndev, "Can't allocate RX buffers\n");
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +0000223 return -ENOMEM;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000224 }
225 rx_queue->rx_skbuff[j] = skb;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000226 }
227
Kevin Hao0a4b5a22014-12-11 14:08:41 +0800228 gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000229 rxbdp++;
230 }
231
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200232 rx_queue->rfbptr = rfbptr;
233 rfbptr += 2;
Anton Vorontsov87283272009-10-12 06:00:39 +0000234 }
235
236 return 0;
237}
238
239static int gfar_alloc_skb_resources(struct net_device *ndev)
240{
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000241 void *vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000242 dma_addr_t addr;
243 int i, j, k;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000244 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil369ec162013-02-14 05:00:02 +0000245 struct device *dev = priv->dev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000246 struct gfar_priv_tx_q *tx_queue = NULL;
247 struct gfar_priv_rx_q *rx_queue = NULL;
248
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000249 priv->total_tx_ring_size = 0;
250 for (i = 0; i < priv->num_tx_queues; i++)
251 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
252
253 priv->total_rx_ring_size = 0;
254 for (i = 0; i < priv->num_rx_queues; i++)
255 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000256
257 /* Allocate memory for the buffer descriptors */
Anton Vorontsov87283272009-10-12 06:00:39 +0000258 vaddr = dma_alloc_coherent(dev,
Joe Perchesd0320f72013-03-14 13:07:21 +0000259 (priv->total_tx_ring_size *
260 sizeof(struct txbd8)) +
261 (priv->total_rx_ring_size *
262 sizeof(struct rxbd8)),
263 &addr, GFP_KERNEL);
264 if (!vaddr)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000265 return -ENOMEM;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000266
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000267 for (i = 0; i < priv->num_tx_queues; i++) {
268 tx_queue = priv->tx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000269 tx_queue->tx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000270 tx_queue->tx_bd_dma_base = addr;
271 tx_queue->dev = ndev;
272 /* enet DMA only understands physical addresses */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000273 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
274 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000275 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000276
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000277 /* Start the rx descriptor ring where the tx ring leaves off */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000278 for (i = 0; i < priv->num_rx_queues; i++) {
279 rx_queue = priv->rx_queue[i];
Joe Perches43d620c2011-06-16 19:08:06 +0000280 rx_queue->rx_bd_base = vaddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000281 rx_queue->rx_bd_dma_base = addr;
282 rx_queue->dev = ndev;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000283 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
284 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000285 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000286
287 /* Setup the skbuff rings */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000288 for (i = 0; i < priv->num_tx_queues; i++) {
289 tx_queue = priv->tx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000290 tx_queue->tx_skbuff =
291 kmalloc_array(tx_queue->tx_ring_size,
292 sizeof(*tx_queue->tx_skbuff),
293 GFP_KERNEL);
294 if (!tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000295 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000299 }
300
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
Joe Perches14f8dc42013-02-07 11:46:27 +0000303 rx_queue->rx_skbuff =
304 kmalloc_array(rx_queue->rx_ring_size,
305 sizeof(*rx_queue->rx_skbuff),
306 GFP_KERNEL);
307 if (!rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000308 goto cleanup;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000309
310 for (j = 0; j < rx_queue->rx_ring_size; j++)
311 rx_queue->rx_skbuff[j] = NULL;
312 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000313
Anton Vorontsov87283272009-10-12 06:00:39 +0000314 if (gfar_init_bds(ndev))
315 goto cleanup;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000316
317 return 0;
318
319cleanup:
320 free_skb_resources(priv);
321 return -ENOMEM;
322}
323
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000324static void gfar_init_tx_rx_base(struct gfar_private *priv)
325{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000326 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov18294ad2009-11-04 12:53:00 +0000327 u32 __iomem *baddr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000328 int i;
329
330 baddr = &regs->tbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000331 for (i = 0; i < priv->num_tx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000332 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000333 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000334 }
335
336 baddr = &regs->rbase0;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000337 for (i = 0; i < priv->num_rx_queues; i++) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000338 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000339 baddr += 2;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000340 }
341}
342
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200343static void gfar_init_rqprm(struct gfar_private *priv)
344{
345 struct gfar __iomem *regs = priv->gfargrp[0].regs;
346 u32 __iomem *baddr;
347 int i;
348
349 baddr = &regs->rqprm0;
350 for (i = 0; i < priv->num_rx_queues; i++) {
351 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
352 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
353 baddr++;
354 }
355}
356
Claudiu Manoil88302642014-02-24 12:13:43 +0200357static void gfar_rx_buff_size_config(struct gfar_private *priv)
358{
Claudiu Manoilf5b720b2014-10-15 19:11:46 +0300359 int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
Claudiu Manoil88302642014-02-24 12:13:43 +0200360
361 /* set this when rx hw offload (TOE) functions are being used */
362 priv->uses_rxfcb = 0;
363
364 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
365 priv->uses_rxfcb = 1;
366
367 if (priv->hwts_rx_en)
368 priv->uses_rxfcb = 1;
369
370 if (priv->uses_rxfcb)
371 frame_size += GMAC_FCB_LEN;
372
373 frame_size += priv->padding;
374
375 frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
376 INCREMENTAL_BUFFER_SIZE;
377
378 priv->rx_buffer_size = frame_size;
379}
380
Claudiu Manoila328ac92014-02-24 12:13:42 +0200381static void gfar_mac_rx_config(struct gfar_private *priv)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000382{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000383 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000384 u32 rctrl = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000385
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000386 if (priv->rx_filer_enable) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000387 rctrl |= RCTRL_FILREN;
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000388 /* Program the RIR0 reg with the required distribution */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200389 if (priv->poll_mode == GFAR_SQ_POLLING)
390 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
391 else /* GFAR_MQ_POLLING */
392 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
Sandeep Gopalpet1ccb8382009-12-16 01:14:58 +0000393 }
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000394
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000395 /* Restore PROMISC mode */
Claudiu Manoila328ac92014-02-24 12:13:42 +0200396 if (priv->ndev->flags & IFF_PROMISC)
Claudiu Manoilf5ae6272013-01-23 00:18:36 +0000397 rctrl |= RCTRL_PROM;
398
Claudiu Manoil88302642014-02-24 12:13:43 +0200399 if (priv->ndev->features & NETIF_F_RXCSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000400 rctrl |= RCTRL_CHECKSUMMING;
401
Claudiu Manoil88302642014-02-24 12:13:43 +0200402 if (priv->extended_hash)
403 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000404
405 if (priv->padding) {
406 rctrl &= ~RCTRL_PAL_MASK;
407 rctrl |= RCTRL_PADDING(priv->padding);
408 }
409
Manfred Rudigier97553f72010-06-11 01:49:05 +0000410 /* Enable HW time stamping if requested from user space */
Claudiu Manoil88302642014-02-24 12:13:43 +0200411 if (priv->hwts_rx_en)
Manfred Rudigier97553f72010-06-11 01:49:05 +0000412 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
413
Claudiu Manoil88302642014-02-24 12:13:43 +0200414 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Sebastian Pöhnb852b722011-07-26 00:03:13 +0000415 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000416
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200417 /* Clear the LFC bit */
418 gfar_write(&regs->rctrl, rctrl);
419 /* Init flow control threshold values */
420 gfar_init_rqprm(priv);
421 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
422 rctrl |= RCTRL_LFC;
423
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000424 /* Init rctrl based on our settings */
425 gfar_write(&regs->rctrl, rctrl);
Claudiu Manoila328ac92014-02-24 12:13:42 +0200426}
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000427
Claudiu Manoila328ac92014-02-24 12:13:42 +0200428static void gfar_mac_tx_config(struct gfar_private *priv)
429{
430 struct gfar __iomem *regs = priv->gfargrp[0].regs;
431 u32 tctrl = 0;
432
433 if (priv->ndev->features & NETIF_F_IP_CSUM)
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000434 tctrl |= TCTRL_INIT_CSUM;
435
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +0000436 if (priv->prio_sched_en)
437 tctrl |= TCTRL_TXSCHED_PRIO;
438 else {
439 tctrl |= TCTRL_TXSCHED_WRRS;
440 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
441 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
442 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000443
Claudiu Manoil88302642014-02-24 12:13:43 +0200444 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
445 tctrl |= TCTRL_VLINS;
446
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000447 gfar_write(&regs->tctrl, tctrl);
Anton Vorontsov826aa4a2009-10-12 06:00:34 +0000448}
449
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200450static void gfar_configure_coalescing(struct gfar_private *priv,
451 unsigned long tx_mask, unsigned long rx_mask)
452{
453 struct gfar __iomem *regs = priv->gfargrp[0].regs;
454 u32 __iomem *baddr;
455
456 if (priv->mode == MQ_MG_MODE) {
457 int i = 0;
458
459 baddr = &regs->txic0;
460 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
461 gfar_write(baddr + i, 0);
462 if (likely(priv->tx_queue[i]->txcoalescing))
463 gfar_write(baddr + i, priv->tx_queue[i]->txic);
464 }
465
466 baddr = &regs->rxic0;
467 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
468 gfar_write(baddr + i, 0);
469 if (likely(priv->rx_queue[i]->rxcoalescing))
470 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
471 }
472 } else {
473 /* Backward compatible case -- even if we enable
474 * multiple queues, there's only single reg to program
475 */
476 gfar_write(&regs->txic, 0);
477 if (likely(priv->tx_queue[0]->txcoalescing))
478 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
479
480 gfar_write(&regs->rxic, 0);
481 if (unlikely(priv->rx_queue[0]->rxcoalescing))
482 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
483 }
484}
485
486void gfar_configure_coalescing_all(struct gfar_private *priv)
487{
488 gfar_configure_coalescing(priv, 0xFF, 0xFF);
489}
490
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000491static struct net_device_stats *gfar_get_stats(struct net_device *dev)
492{
493 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000494 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
495 unsigned long tx_packets = 0, tx_bytes = 0;
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000496 int i;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000497
498 for (i = 0; i < priv->num_rx_queues; i++) {
499 rx_packets += priv->rx_queue[i]->stats.rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000500 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000501 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
502 }
503
504 dev->stats.rx_packets = rx_packets;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000505 dev->stats.rx_bytes = rx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000506 dev->stats.rx_dropped = rx_dropped;
507
508 for (i = 0; i < priv->num_tx_queues; i++) {
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000509 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
510 tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000511 }
512
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000513 dev->stats.tx_bytes = tx_bytes;
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000514 dev->stats.tx_packets = tx_packets;
515
516 return &dev->stats;
517}
518
Andy Fleming26ccfc32009-03-10 12:58:28 +0000519static const struct net_device_ops gfar_netdev_ops = {
520 .ndo_open = gfar_enet_open,
521 .ndo_start_xmit = gfar_start_xmit,
522 .ndo_stop = gfar_close,
523 .ndo_change_mtu = gfar_change_mtu,
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000524 .ndo_set_features = gfar_set_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000525 .ndo_set_rx_mode = gfar_set_multi,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000526 .ndo_tx_timeout = gfar_timeout,
527 .ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +0000528 .ndo_get_stats = gfar_get_stats,
Ben Hutchings240c1022009-07-09 17:54:35 +0000529 .ndo_set_mac_address = eth_mac_addr,
530 .ndo_validate_addr = eth_validate_addr,
Andy Fleming26ccfc32009-03-10 12:58:28 +0000531#ifdef CONFIG_NET_POLL_CONTROLLER
532 .ndo_poll_controller = gfar_netpoll,
533#endif
534};
535
Claudiu Manoilefeddce2014-02-17 12:53:17 +0200536static void gfar_ints_disable(struct gfar_private *priv)
537{
538 int i;
539 for (i = 0; i < priv->num_grps; i++) {
540 struct gfar __iomem *regs = priv->gfargrp[i].regs;
541 /* Clear IEVENT */
542 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
543
544 /* Initialize IMASK */
545 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
546 }
547}
548
549static void gfar_ints_enable(struct gfar_private *priv)
550{
551 int i;
552 for (i = 0; i < priv->num_grps; i++) {
553 struct gfar __iomem *regs = priv->gfargrp[i].regs;
554 /* Unmask the interrupts we look for */
555 gfar_write(&regs->imask, IMASK_DEFAULT);
556 }
557}
558
Kevin Hao91c53f762014-12-24 14:05:44 +0800559static void lock_tx_qs(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000560{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000561 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000562
563 for (i = 0; i < priv->num_tx_queues; i++)
564 spin_lock(&priv->tx_queue[i]->txlock);
565}
566
Kevin Hao91c53f762014-12-24 14:05:44 +0800567static void unlock_tx_qs(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000568{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000569 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000570
571 for (i = 0; i < priv->num_tx_queues; i++)
572 spin_unlock(&priv->tx_queue[i]->txlock);
573}
574
Claudiu Manoil20862782014-02-17 12:53:14 +0200575static int gfar_alloc_tx_queues(struct gfar_private *priv)
576{
577 int i;
578
579 for (i = 0; i < priv->num_tx_queues; i++) {
580 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
581 GFP_KERNEL);
582 if (!priv->tx_queue[i])
583 return -ENOMEM;
584
585 priv->tx_queue[i]->tx_skbuff = NULL;
586 priv->tx_queue[i]->qindex = i;
587 priv->tx_queue[i]->dev = priv->ndev;
588 spin_lock_init(&(priv->tx_queue[i]->txlock));
589 }
590 return 0;
591}
592
593static int gfar_alloc_rx_queues(struct gfar_private *priv)
594{
595 int i;
596
597 for (i = 0; i < priv->num_rx_queues; i++) {
598 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
599 GFP_KERNEL);
600 if (!priv->rx_queue[i])
601 return -ENOMEM;
602
603 priv->rx_queue[i]->rx_skbuff = NULL;
604 priv->rx_queue[i]->qindex = i;
605 priv->rx_queue[i]->dev = priv->ndev;
Claudiu Manoil20862782014-02-17 12:53:14 +0200606 }
607 return 0;
608}
609
610static void gfar_free_tx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000611{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000612 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000613
614 for (i = 0; i < priv->num_tx_queues; i++)
615 kfree(priv->tx_queue[i]);
616}
617
Claudiu Manoil20862782014-02-17 12:53:14 +0200618static void gfar_free_rx_queues(struct gfar_private *priv)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000619{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000620 int i;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000621
622 for (i = 0; i < priv->num_rx_queues; i++)
623 kfree(priv->rx_queue[i]);
624}
625
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000626static void unmap_group_regs(struct gfar_private *priv)
627{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000628 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000629
630 for (i = 0; i < MAXGROUPS; i++)
631 if (priv->gfargrp[i].regs)
632 iounmap(priv->gfargrp[i].regs);
633}
634
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000635static void free_gfar_dev(struct gfar_private *priv)
636{
637 int i, j;
638
639 for (i = 0; i < priv->num_grps; i++)
640 for (j = 0; j < GFAR_NUM_IRQS; j++) {
641 kfree(priv->gfargrp[i].irqinfo[j]);
642 priv->gfargrp[i].irqinfo[j] = NULL;
643 }
644
645 free_netdev(priv->ndev);
646}
647
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000648static void disable_napi(struct gfar_private *priv)
649{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000650 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000651
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200652 for (i = 0; i < priv->num_grps; i++) {
653 napi_disable(&priv->gfargrp[i].napi_rx);
654 napi_disable(&priv->gfargrp[i].napi_tx);
655 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000656}
657
658static void enable_napi(struct gfar_private *priv)
659{
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +0000660 int i;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000661
Claudiu Manoilaeb12c52014-03-07 14:42:45 +0200662 for (i = 0; i < priv->num_grps; i++) {
663 napi_enable(&priv->gfargrp[i].napi_rx);
664 napi_enable(&priv->gfargrp[i].napi_tx);
665 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000666}
667
668static int gfar_parse_group(struct device_node *np,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000669 struct gfar_private *priv, const char *model)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000670{
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000671 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000672 int i;
673
Paul Gortmaker7c1e7e92013-02-04 09:49:42 +0000674 for (i = 0; i < GFAR_NUM_IRQS; i++) {
675 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
676 GFP_KERNEL);
677 if (!grp->irqinfo[i])
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000678 return -ENOMEM;
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000679 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000680
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000681 grp->regs = of_iomap(np, 0);
682 if (!grp->regs)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000683 return -ENOMEM;
684
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000685 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000686
687 /* If we aren't the FEC we have multiple interrupts */
688 if (model && strcasecmp(model, "FEC")) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000689 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
690 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
691 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
692 gfar_irq(grp, RX)->irq == NO_IRQ ||
693 gfar_irq(grp, ER)->irq == NO_IRQ)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000694 return -EINVAL;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000695 }
696
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000697 grp->priv = priv;
698 spin_lock_init(&grp->grplock);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000699 if (priv->mode == MQ_MG_MODE) {
Jingchang Lu55917642015-03-13 10:52:32 +0200700 u32 rxq_mask, txq_mask;
701 int ret;
702
703 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
704 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
705
706 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
707 if (!ret) {
708 grp->rx_bit_map = rxq_mask ?
709 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
710 }
711
712 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
713 if (!ret) {
714 grp->tx_bit_map = txq_mask ?
715 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
716 }
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200717
718 if (priv->poll_mode == GFAR_SQ_POLLING) {
719 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
720 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
721 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200722 }
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000723 } else {
Claudiu Manoil5fedcc12013-01-29 03:55:11 +0000724 grp->rx_bit_map = 0xFF;
725 grp->tx_bit_map = 0xFF;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000726 }
Claudiu Manoil20862782014-02-17 12:53:14 +0200727
728 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
729 * right to left, so we need to revert the 8 bits to get the q index
730 */
731 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
732 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
733
734 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
735 * also assign queues to groups
736 */
737 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200738 if (!grp->rx_queue)
739 grp->rx_queue = priv->rx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200740 grp->num_rx_queues++;
741 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
742 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
743 priv->rx_queue[i]->grp = grp;
744 }
745
746 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200747 if (!grp->tx_queue)
748 grp->tx_queue = priv->tx_queue[i];
Claudiu Manoil20862782014-02-17 12:53:14 +0200749 grp->num_tx_queues++;
750 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
751 priv->tqueue |= (TQUEUE_EN0 >> i);
752 priv->tx_queue[i]->grp = grp;
753 }
754
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000755 priv->num_grps++;
756
757 return 0;
758}
759
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100760static int gfar_of_group_count(struct device_node *np)
761{
762 struct device_node *child;
763 int num = 0;
764
765 for_each_available_child_of_node(np, child)
766 if (!of_node_cmp(child->name, "queue-group"))
767 num++;
768
769 return num;
770}
771
Grant Likely2dc11582010-08-06 09:25:50 -0600772static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800773{
Andy Flemingb31a1d82008-12-16 15:29:15 -0800774 const char *model;
775 const char *ctype;
776 const void *mac_addr;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000777 int err = 0, i;
778 struct net_device *dev = NULL;
779 struct gfar_private *priv = NULL;
Grant Likely61c7a082010-04-13 16:12:29 -0700780 struct device_node *np = ofdev->dev.of_node;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000781 struct device_node *child = NULL;
Jingchang Lu55917642015-03-13 10:52:32 +0200782 struct property *stash;
783 u32 stash_len = 0;
784 u32 stash_idx = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000785 unsigned int num_tx_qs, num_rx_qs;
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200786 unsigned short mode, poll_mode;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800787
Kevin Hao4b222ca2015-01-28 20:06:48 +0800788 if (!np)
Andy Flemingb31a1d82008-12-16 15:29:15 -0800789 return -ENODEV;
790
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200791 if (of_device_is_compatible(np, "fsl,etsec2")) {
792 mode = MQ_MG_MODE;
793 poll_mode = GFAR_SQ_POLLING;
794 } else {
795 mode = SQ_SG_MODE;
796 poll_mode = GFAR_SQ_POLLING;
797 }
798
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200799 if (mode == SQ_SG_MODE) {
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200800 num_tx_qs = 1;
801 num_rx_qs = 1;
802 } else { /* MQ_MG_MODE */
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200803 /* get the actual number of supported groups */
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100804 unsigned int num_grps = gfar_of_group_count(np);
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200805
806 if (num_grps == 0 || num_grps > MAXGROUPS) {
807 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
808 num_grps);
809 pr_err("Cannot do alloc_etherdev, aborting\n");
810 return -EINVAL;
811 }
812
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200813 if (poll_mode == GFAR_SQ_POLLING) {
Claudiu Manoilc65d7532014-03-21 09:33:17 +0200814 num_tx_qs = num_grps; /* one txq per int group */
815 num_rx_qs = num_grps; /* one rxq per int group */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200816 } else { /* GFAR_MQ_POLLING */
Jingchang Lu55917642015-03-13 10:52:32 +0200817 u32 tx_queues, rx_queues;
818 int ret;
819
820 /* parse the num of HW tx and rx queues */
821 ret = of_property_read_u32(np, "fsl,num_tx_queues",
822 &tx_queues);
823 num_tx_qs = ret ? 1 : tx_queues;
824
825 ret = of_property_read_u32(np, "fsl,num_rx_queues",
826 &rx_queues);
827 num_rx_qs = ret ? 1 : rx_queues;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +0200828 }
829 }
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000830
831 if (num_tx_qs > MAX_TX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000832 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
833 num_tx_qs, MAX_TX_QS);
834 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000835 return -EINVAL;
836 }
837
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000838 if (num_rx_qs > MAX_RX_QS) {
Joe Perches59deab22011-06-14 08:57:47 +0000839 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
840 num_rx_qs, MAX_RX_QS);
841 pr_err("Cannot do alloc_etherdev, aborting\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000842 return -EINVAL;
843 }
844
845 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
846 dev = *pdev;
847 if (NULL == dev)
848 return -ENOMEM;
849
850 priv = netdev_priv(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000851 priv->ndev = dev;
852
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200853 priv->mode = mode;
854 priv->poll_mode = poll_mode;
855
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000856 priv->num_tx_queues = num_tx_qs;
Ben Hutchingsfe069122010-09-27 08:27:37 +0000857 netif_set_real_num_rx_queues(dev, num_rx_qs);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000858 priv->num_rx_queues = num_rx_qs;
Claudiu Manoil20862782014-02-17 12:53:14 +0200859
860 err = gfar_alloc_tx_queues(priv);
861 if (err)
862 goto tx_alloc_failed;
863
864 err = gfar_alloc_rx_queues(priv);
865 if (err)
866 goto rx_alloc_failed;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800867
Jingchang Lu55917642015-03-13 10:52:32 +0200868 err = of_property_read_string(np, "model", &model);
869 if (err) {
870 pr_err("Device model property missing, aborting\n");
871 goto rx_alloc_failed;
872 }
873
Jan Ceuleers0977f812012-06-05 03:42:12 +0000874 /* Init Rx queue filer rule set linked list */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700875 INIT_LIST_HEAD(&priv->rx_list.list);
876 priv->rx_list.count = 0;
877 mutex_init(&priv->rx_queue_access);
878
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000879 for (i = 0; i < MAXGROUPS; i++)
880 priv->gfargrp[i].regs = NULL;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800881
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000882 /* Parse and initialize group specific information */
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200883 if (priv->mode == MQ_MG_MODE) {
Tobias Waldekranzf50724c2015-03-05 14:48:23 +0100884 for_each_available_child_of_node(np, child) {
885 if (of_node_cmp(child->name, "queue-group"))
886 continue;
887
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000888 err = gfar_parse_group(child, priv, model);
889 if (err)
890 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800891 }
Claudiu Manoilb338ce22014-03-11 18:01:24 +0200892 } else { /* SQ_SG_MODE */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000893 err = gfar_parse_group(np, priv, model);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000894 if (err)
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000895 goto err_grp_init;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800896 }
897
Jingchang Lu55917642015-03-13 10:52:32 +0200898 stash = of_find_property(np, "bd-stash", NULL);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800899
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000900 if (stash) {
Andy Fleming4d7902f2009-02-04 16:43:44 -0800901 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
902 priv->bd_stash_en = 1;
903 }
904
Jingchang Lu55917642015-03-13 10:52:32 +0200905 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800906
Jingchang Lu55917642015-03-13 10:52:32 +0200907 if (err == 0)
908 priv->rx_stash_size = stash_len;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800909
Jingchang Lu55917642015-03-13 10:52:32 +0200910 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
Andy Fleming4d7902f2009-02-04 16:43:44 -0800911
Jingchang Lu55917642015-03-13 10:52:32 +0200912 if (err == 0)
913 priv->rx_stash_index = stash_idx;
Andy Fleming4d7902f2009-02-04 16:43:44 -0800914
915 if (stash_len || stash_idx)
916 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
917
Andy Flemingb31a1d82008-12-16 15:29:15 -0800918 mac_addr = of_get_mac_address(np);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000919
Andy Flemingb31a1d82008-12-16 15:29:15 -0800920 if (mac_addr)
Joe Perches6a3c910c2011-11-16 09:38:02 +0000921 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800922
923 if (model && !strcasecmp(model, "TSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200924 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000925 FSL_GIANFAR_DEV_HAS_COALESCE |
926 FSL_GIANFAR_DEV_HAS_RMON |
927 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
928
Andy Flemingb31a1d82008-12-16 15:29:15 -0800929 if (model && !strcasecmp(model, "eTSEC"))
Claudiu Manoil34018fd2014-02-17 12:53:15 +0200930 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000931 FSL_GIANFAR_DEV_HAS_COALESCE |
932 FSL_GIANFAR_DEV_HAS_RMON |
933 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +0000934 FSL_GIANFAR_DEV_HAS_CSUM |
935 FSL_GIANFAR_DEV_HAS_VLAN |
936 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
937 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
938 FSL_GIANFAR_DEV_HAS_TIMER;
Andy Flemingb31a1d82008-12-16 15:29:15 -0800939
Jingchang Lu55917642015-03-13 10:52:32 +0200940 err = of_property_read_string(np, "phy-connection-type", &ctype);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800941
942 /* We only care about rgmii-id. The rest are autodetected */
Jingchang Lu55917642015-03-13 10:52:32 +0200943 if (err == 0 && !strcmp(ctype, "rgmii-id"))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800944 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
945 else
946 priv->interface = PHY_INTERFACE_MODE_MII;
947
Jingchang Lu55917642015-03-13 10:52:32 +0200948 if (of_find_property(np, "fsl,magic-packet", NULL))
Andy Flemingb31a1d82008-12-16 15:29:15 -0800949 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
950
Grant Likelyfe192a42009-04-25 12:53:12 +0000951 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800952
Florian Fainellibe403642014-05-22 09:47:48 -0700953 /* In the case of a fixed PHY, the DT node associated
954 * to the PHY is the Ethernet MAC DT node.
955 */
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200956 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
Florian Fainellibe403642014-05-22 09:47:48 -0700957 err = of_phy_register_fixed_link(np);
958 if (err)
959 goto err_grp_init;
960
Uwe Kleine-König6f2c9bd2014-08-07 22:17:07 +0200961 priv->phy_node = of_node_get(np);
Florian Fainellibe403642014-05-22 09:47:48 -0700962 }
963
Andy Flemingb31a1d82008-12-16 15:29:15 -0800964 /* Find the TBI PHY. If it's not there, we don't support SGMII */
Grant Likelyfe192a42009-04-25 12:53:12 +0000965 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800966
967 return 0;
968
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000969err_grp_init:
970 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +0200971rx_alloc_failed:
972 gfar_free_rx_queues(priv);
973tx_alloc_failed:
974 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +0000975 free_gfar_dev(priv);
Andy Flemingb31a1d82008-12-16 15:29:15 -0800976 return err;
977}
978
Ben Hutchingsca0c88c2013-11-18 23:05:27 +0000979static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
Manfred Rudigiercc772ab2010-04-08 23:10:03 +0000980{
981 struct hwtstamp_config config;
982 struct gfar_private *priv = netdev_priv(netdev);
983
984 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
985 return -EFAULT;
986
987 /* reserved for future extensions */
988 if (config.flags)
989 return -EINVAL;
990
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +0000991 switch (config.tx_type) {
992 case HWTSTAMP_TX_OFF:
993 priv->hwts_tx_en = 0;
994 break;
995 case HWTSTAMP_TX_ON:
996 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
997 return -ERANGE;
998 priv->hwts_tx_en = 1;
999 break;
1000 default:
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001001 return -ERANGE;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00001002 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001003
1004 switch (config.rx_filter) {
1005 case HWTSTAMP_FILTER_NONE:
Manfred Rudigier97553f72010-06-11 01:49:05 +00001006 if (priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +00001007 priv->hwts_rx_en = 0;
Claudiu Manoil08511332014-02-24 12:13:45 +02001008 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +00001009 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001010 break;
1011 default:
1012 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
1013 return -ERANGE;
Manfred Rudigier97553f72010-06-11 01:49:05 +00001014 if (!priv->hwts_rx_en) {
Manfred Rudigier97553f72010-06-11 01:49:05 +00001015 priv->hwts_rx_en = 1;
Claudiu Manoil08511332014-02-24 12:13:45 +02001016 reset_gfar(netdev);
Manfred Rudigier97553f72010-06-11 01:49:05 +00001017 }
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001018 config.rx_filter = HWTSTAMP_FILTER_ALL;
1019 break;
1020 }
1021
1022 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1023 -EFAULT : 0;
1024}
1025
Ben Hutchingsca0c88c2013-11-18 23:05:27 +00001026static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
1027{
1028 struct hwtstamp_config config;
1029 struct gfar_private *priv = netdev_priv(netdev);
1030
1031 config.flags = 0;
1032 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1033 config.rx_filter = (priv->hwts_rx_en ?
1034 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
1035
1036 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1037 -EFAULT : 0;
1038}
1039
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001040static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1041{
1042 struct gfar_private *priv = netdev_priv(dev);
1043
1044 if (!netif_running(dev))
1045 return -EINVAL;
1046
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001047 if (cmd == SIOCSHWTSTAMP)
Ben Hutchingsca0c88c2013-11-18 23:05:27 +00001048 return gfar_hwtstamp_set(dev, rq);
1049 if (cmd == SIOCGHWTSTAMP)
1050 return gfar_hwtstamp_get(dev, rq);
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001051
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001052 if (!priv->phydev)
1053 return -ENODEV;
1054
Richard Cochran28b04112010-07-17 08:48:55 +00001055 return phy_mii_ioctl(priv->phydev, rq, cmd);
Clifford Wolf0faac9f2009-01-09 10:23:11 +00001056}
1057
Anton Vorontsov18294ad2009-11-04 12:53:00 +00001058static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1059 u32 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001060{
1061 u32 rqfpr = FPR_FILER_MASK;
1062 u32 rqfcr = 0x0;
1063
1064 rqfar--;
1065 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001066 priv->ftp_rqfpr[rqfar] = rqfpr;
1067 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001068 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1069
1070 rqfar--;
1071 rqfcr = RQFCR_CMP_NOMATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001072 priv->ftp_rqfpr[rqfar] = rqfpr;
1073 priv->ftp_rqfcr[rqfar] = rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001074 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1075
1076 rqfar--;
1077 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1078 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001079 priv->ftp_rqfcr[rqfar] = rqfcr;
1080 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001081 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1082
1083 rqfar--;
1084 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1085 rqfpr = class;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001086 priv->ftp_rqfcr[rqfar] = rqfcr;
1087 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001088 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1089
1090 return rqfar;
1091}
1092
1093static void gfar_init_filer_table(struct gfar_private *priv)
1094{
1095 int i = 0x0;
1096 u32 rqfar = MAX_FILER_IDX;
1097 u32 rqfcr = 0x0;
1098 u32 rqfpr = FPR_FILER_MASK;
1099
1100 /* Default rule */
1101 rqfcr = RQFCR_CMP_MATCH;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001102 priv->ftp_rqfcr[rqfar] = rqfcr;
1103 priv->ftp_rqfpr[rqfar] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001104 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1105
1106 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1107 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1108 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1109 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1110 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1111 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1112
Uwe Kleine-König85dd08e2010-06-11 12:16:55 +02001113 /* cur_filer_idx indicated the first non-masked rule */
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001114 priv->cur_filer_idx = rqfar;
1115
1116 /* Rest are masked rules */
1117 rqfcr = RQFCR_CMP_NOMATCH;
1118 for (i = 0; i < rqfar; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +00001119 priv->ftp_rqfcr[i] = rqfcr;
1120 priv->ftp_rqfpr[i] = rqfpr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001121 gfar_write_filer(priv, i, rqfcr, rqfpr);
1122 }
1123}
1124
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001125#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001126static void __gfar_detect_errata_83xx(struct gfar_private *priv)
Anton Vorontsov7d350972010-06-30 06:39:12 +00001127{
Anton Vorontsov7d350972010-06-30 06:39:12 +00001128 unsigned int pvr = mfspr(SPRN_PVR);
1129 unsigned int svr = mfspr(SPRN_SVR);
1130 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1131 unsigned int rev = svr & 0xffff;
1132
1133 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1134 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001135 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001136 priv->errata |= GFAR_ERRATA_74;
1137
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001138 /* MPC8313 and MPC837x all rev */
1139 if ((pvr == 0x80850010 && mod == 0x80b0) ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001140 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
Anton Vorontsovdeb90ea2010-06-30 06:39:13 +00001141 priv->errata |= GFAR_ERRATA_76;
1142
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001143 /* MPC8313 Rev < 2.0 */
1144 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001145 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001146}
1147
1148static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1149{
1150 unsigned int svr = mfspr(SPRN_SVR);
1151
1152 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1153 priv->errata |= GFAR_ERRATA_12;
Claudiu Manoil53fad772013-10-09 20:20:42 +03001154 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1155 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1156 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001157}
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001158#endif
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001159
1160static void gfar_detect_errata(struct gfar_private *priv)
1161{
1162 struct device *dev = &priv->ofdev->dev;
1163
1164 /* no plans to fix */
1165 priv->errata |= GFAR_ERRATA_A002;
1166
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001167#ifdef CONFIG_PPC
Claudiu Manoil2969b1f2013-10-09 20:20:41 +03001168 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1169 __gfar_detect_errata_85xx(priv);
1170 else /* non-mpc85xx parts, i.e. e300 core based */
1171 __gfar_detect_errata_83xx(priv);
Claudiu Manoild6ef0bc2014-10-07 10:44:32 +03001172#endif
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00001173
Anton Vorontsov7d350972010-06-30 06:39:12 +00001174 if (priv->errata)
1175 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1176 priv->errata);
1177}
1178
Claudiu Manoil08511332014-02-24 12:13:45 +02001179void gfar_mac_reset(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180{
Claudiu Manoil20862782014-02-17 12:53:14 +02001181 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoila328ac92014-02-24 12:13:42 +02001182 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 /* Reset MAC layer */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001185 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
Andy Flemingb98ac702009-02-04 16:38:05 -08001187 /* We need to delay at least 3 TX clocks */
Claudiu Manoila328ac92014-02-24 12:13:42 +02001188 udelay(3);
Andy Flemingb98ac702009-02-04 16:38:05 -08001189
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001190 /* the soft reset bit is not self-resetting, so we need to
1191 * clear it before resuming normal operation
1192 */
Claudiu Manoil20862782014-02-17 12:53:14 +02001193 gfar_write(&regs->maccfg1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Claudiu Manoila328ac92014-02-24 12:13:42 +02001195 udelay(3);
1196
Claudiu Manoil88302642014-02-24 12:13:43 +02001197 /* Compute rx_buff_size based on config flags */
1198 gfar_rx_buff_size_config(priv);
1199
1200 /* Initialize the max receive frame/buffer lengths */
1201 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
Claudiu Manoila328ac92014-02-24 12:13:42 +02001202 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1203
1204 /* Initialize the Minimum Frame Length Register */
1205 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 /* Initialize MACCFG2. */
Anton Vorontsov7d350972010-06-30 06:39:12 +00001208 tempval = MACCFG2_INIT_SETTINGS;
Claudiu Manoil88302642014-02-24 12:13:43 +02001209
1210 /* If the mtu is larger than the max size for standard
1211 * ethernet frames (ie, a jumbo frame), then set maccfg2
1212 * to allow huge frames, and to check the length
1213 */
1214 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1215 gfar_has_errata(priv, GFAR_ERRATA_74))
Anton Vorontsov7d350972010-06-30 06:39:12 +00001216 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
Claudiu Manoil88302642014-02-24 12:13:43 +02001217
Anton Vorontsov7d350972010-06-30 06:39:12 +00001218 gfar_write(&regs->maccfg2, tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
Claudiu Manoila328ac92014-02-24 12:13:42 +02001220 /* Clear mac addr hash registers */
1221 gfar_write(&regs->igaddr0, 0);
1222 gfar_write(&regs->igaddr1, 0);
1223 gfar_write(&regs->igaddr2, 0);
1224 gfar_write(&regs->igaddr3, 0);
1225 gfar_write(&regs->igaddr4, 0);
1226 gfar_write(&regs->igaddr5, 0);
1227 gfar_write(&regs->igaddr6, 0);
1228 gfar_write(&regs->igaddr7, 0);
1229
1230 gfar_write(&regs->gaddr0, 0);
1231 gfar_write(&regs->gaddr1, 0);
1232 gfar_write(&regs->gaddr2, 0);
1233 gfar_write(&regs->gaddr3, 0);
1234 gfar_write(&regs->gaddr4, 0);
1235 gfar_write(&regs->gaddr5, 0);
1236 gfar_write(&regs->gaddr6, 0);
1237 gfar_write(&regs->gaddr7, 0);
1238
1239 if (priv->extended_hash)
1240 gfar_clear_exact_match(priv->ndev);
1241
1242 gfar_mac_rx_config(priv);
1243
1244 gfar_mac_tx_config(priv);
1245
1246 gfar_set_mac_address(priv->ndev);
1247
1248 gfar_set_multi(priv->ndev);
1249
1250 /* clear ievent and imask before configuring coalescing */
1251 gfar_ints_disable(priv);
1252
1253 /* Configure the coalescing support */
1254 gfar_configure_coalescing_all(priv);
1255}
1256
1257static void gfar_hw_init(struct gfar_private *priv)
1258{
1259 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1260 u32 attrs;
1261
1262 /* Stop the DMA engine now, in case it was running before
1263 * (The firmware could have used it, and left it running).
1264 */
1265 gfar_halt(priv);
1266
1267 gfar_mac_reset(priv);
1268
1269 /* Zero out the rmon mib registers if it has them */
1270 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1271 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1272
1273 /* Mask off the CAM interrupts */
1274 gfar_write(&regs->rmon.cam1, 0xffffffff);
1275 gfar_write(&regs->rmon.cam2, 0xffffffff);
1276 }
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 /* Initialize ECNTRL */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001279 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Claudiu Manoil34018fd2014-02-17 12:53:15 +02001281 /* Set the extraction length and index */
1282 attrs = ATTRELI_EL(priv->rx_stash_size) |
1283 ATTRELI_EI(priv->rx_stash_index);
1284
1285 gfar_write(&regs->attreli, attrs);
1286
1287 /* Start with defaults, and add stashing
1288 * depending on driver parameters
1289 */
1290 attrs = ATTR_INIT_SETTINGS;
1291
1292 if (priv->bd_stash_en)
1293 attrs |= ATTR_BDSTASH;
1294
1295 if (priv->rx_stash_size != 0)
1296 attrs |= ATTR_BUFSTASH;
1297
1298 gfar_write(&regs->attr, attrs);
1299
1300 /* FIFO configs */
1301 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1302 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1303 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1304
Claudiu Manoil20862782014-02-17 12:53:14 +02001305 /* Program the interrupt steering regs, only for MG devices */
1306 if (priv->num_grps > 1)
1307 gfar_write_isrg(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001308}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
Xiubo Li898157e2014-06-04 16:49:16 +08001310static void gfar_init_addr_hash_table(struct gfar_private *priv)
Claudiu Manoil20862782014-02-17 12:53:14 +02001311{
1312 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001313
Andy Flemingb31a1d82008-12-16 15:29:15 -08001314 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001315 priv->extended_hash = 1;
1316 priv->hash_width = 9;
1317
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001318 priv->hash_regs[0] = &regs->igaddr0;
1319 priv->hash_regs[1] = &regs->igaddr1;
1320 priv->hash_regs[2] = &regs->igaddr2;
1321 priv->hash_regs[3] = &regs->igaddr3;
1322 priv->hash_regs[4] = &regs->igaddr4;
1323 priv->hash_regs[5] = &regs->igaddr5;
1324 priv->hash_regs[6] = &regs->igaddr6;
1325 priv->hash_regs[7] = &regs->igaddr7;
1326 priv->hash_regs[8] = &regs->gaddr0;
1327 priv->hash_regs[9] = &regs->gaddr1;
1328 priv->hash_regs[10] = &regs->gaddr2;
1329 priv->hash_regs[11] = &regs->gaddr3;
1330 priv->hash_regs[12] = &regs->gaddr4;
1331 priv->hash_regs[13] = &regs->gaddr5;
1332 priv->hash_regs[14] = &regs->gaddr6;
1333 priv->hash_regs[15] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001334
1335 } else {
1336 priv->extended_hash = 0;
1337 priv->hash_width = 8;
1338
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001339 priv->hash_regs[0] = &regs->gaddr0;
1340 priv->hash_regs[1] = &regs->gaddr1;
1341 priv->hash_regs[2] = &regs->gaddr2;
1342 priv->hash_regs[3] = &regs->gaddr3;
1343 priv->hash_regs[4] = &regs->gaddr4;
1344 priv->hash_regs[5] = &regs->gaddr5;
1345 priv->hash_regs[6] = &regs->gaddr6;
1346 priv->hash_regs[7] = &regs->gaddr7;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001347 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001348}
1349
1350/* Set up the ethernet device structure, private data,
1351 * and anything else we need before we start
1352 */
1353static int gfar_probe(struct platform_device *ofdev)
1354{
1355 struct net_device *dev = NULL;
1356 struct gfar_private *priv = NULL;
1357 int err = 0, i;
1358
1359 err = gfar_of_init(ofdev, &dev);
1360
1361 if (err)
1362 return err;
1363
1364 priv = netdev_priv(dev);
1365 priv->ndev = dev;
1366 priv->ofdev = ofdev;
1367 priv->dev = &ofdev->dev;
1368 SET_NETDEV_DEV(dev, &ofdev->dev);
1369
1370 spin_lock_init(&priv->bflock);
1371 INIT_WORK(&priv->reset_task, gfar_reset_task);
1372
1373 platform_set_drvdata(ofdev, priv);
1374
1375 gfar_detect_errata(priv);
1376
Claudiu Manoil20862782014-02-17 12:53:14 +02001377 /* Set the dev->base_addr to the gfar reg region */
1378 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1379
1380 /* Fill in the dev structure */
1381 dev->watchdog_timeo = TX_TIMEOUT;
1382 dev->mtu = 1500;
1383 dev->netdev_ops = &gfar_netdev_ops;
1384 dev->ethtool_ops = &gfar_ethtool_ops;
1385
1386 /* Register for napi ...We are registering NAPI for each grp */
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02001387 for (i = 0; i < priv->num_grps; i++) {
1388 if (priv->poll_mode == GFAR_SQ_POLLING) {
1389 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1390 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1391 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1392 gfar_poll_tx_sq, 2);
1393 } else {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02001394 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1395 gfar_poll_rx, GFAR_DEV_WEIGHT);
1396 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1397 gfar_poll_tx, 2);
1398 }
1399 }
Claudiu Manoil20862782014-02-17 12:53:14 +02001400
1401 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1402 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1403 NETIF_F_RXCSUM;
1404 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1405 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1406 }
1407
1408 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1409 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1410 NETIF_F_HW_VLAN_CTAG_RX;
1411 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1412 }
1413
1414 gfar_init_addr_hash_table(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001415
Claudiu Manoil532c37b2014-02-17 12:53:16 +02001416 /* Insert receive time stamps into padding alignment bytes */
1417 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1418 priv->padding = 8;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001419
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00001420 if (dev->features & NETIF_F_IP_CSUM ||
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001421 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Wu Jiajun-B06378bee9e582012-05-21 23:00:48 +00001422 dev->needed_headroom = GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
1424 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001426 /* Initializing some of the rx/tx queue level parameters */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001427 for (i = 0; i < priv->num_tx_queues; i++) {
1428 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1429 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1430 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1431 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1432 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001433
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001434 for (i = 0; i < priv->num_rx_queues; i++) {
1435 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1436 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1437 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Jan Ceuleers0977f812012-06-05 03:42:12 +00001440 /* always enable rx filer */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001441 priv->rx_filer_enable = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001442 /* Enable most messages by default */
1443 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
Claudiu Manoilb98b8ba2012-09-23 22:39:08 +00001444 /* use pritority h/w tx queue scheduling for single queue devices */
1445 if (priv->num_tx_queues == 1)
1446 priv->prio_sched_en = 1;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001447
Claudiu Manoil08511332014-02-24 12:13:45 +02001448 set_bit(GFAR_DOWN, &priv->state);
1449
Claudiu Manoila328ac92014-02-24 12:13:42 +02001450 gfar_hw_init(priv);
Trent Piephod3eab822008-10-02 11:12:24 +00001451
Fabio Estevamd4c642e2014-06-03 19:55:38 -03001452 /* Carrier starts down, phylib will bring it up */
1453 netif_carrier_off(dev);
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 err = register_netdev(dev);
1456
1457 if (err) {
Joe Perches59deab22011-06-14 08:57:47 +00001458 pr_err("%s: Cannot register net device, aborting\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 goto register_fail;
1460 }
1461
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001462 device_init_wakeup(&dev->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001463 priv->device_flags &
1464 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08001465
Dai Harukic50a5d92008-12-17 16:51:32 -08001466 /* fill out IRQ number and name fields */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001467 for (i = 0; i < priv->num_grps; i++) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001468 struct gfar_priv_grp *grp = &priv->gfargrp[i];
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001469 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001470 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001471 dev->name, "_g", '0' + i, "_tx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001472 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001473 dev->name, "_g", '0' + i, "_rx");
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001474 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
Joe Perches0015e552012-03-25 07:10:07 +00001475 dev->name, "_g", '0' + i, "_er");
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001476 } else
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001477 strcpy(gfar_irq(grp, TX)->name, dev->name);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001478 }
Dai Harukic50a5d92008-12-17 16:51:32 -08001479
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001480 /* Initialize the filer table */
1481 gfar_init_filer_table(priv);
1482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 /* Print out the device info */
Joe Perches59deab22011-06-14 08:57:47 +00001484 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Jan Ceuleers0977f812012-06-05 03:42:12 +00001486 /* Even more device info helps when determining which kernel
1487 * provided which set of benchmarks.
1488 */
Joe Perches59deab22011-06-14 08:57:47 +00001489 netdev_info(dev, "Running with NAPI enabled\n");
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001490 for (i = 0; i < priv->num_rx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001491 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1492 i, priv->rx_queue[i]->rx_ring_size);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001493 for (i = 0; i < priv->num_tx_queues; i++)
Joe Perches59deab22011-06-14 08:57:47 +00001494 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1495 i, priv->tx_queue[i]->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 return 0;
1498
1499register_fail:
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001500 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001501 gfar_free_rx_queues(priv);
1502 gfar_free_tx_queues(priv);
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001503 of_node_put(priv->phy_node);
1504 of_node_put(priv->tbi_node);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001505 free_gfar_dev(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001506 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507}
1508
Grant Likely2dc11582010-08-06 09:25:50 -06001509static int gfar_remove(struct platform_device *ofdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510{
Jingoo Han8513fbd2013-05-23 00:52:31 +00001511 struct gfar_private *priv = platform_get_drvdata(ofdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Uwe Kleine-König888c88b2014-08-07 21:20:12 +02001513 of_node_put(priv->phy_node);
1514 of_node_put(priv->tbi_node);
Grant Likelyfe192a42009-04-25 12:53:12 +00001515
David S. Millerd9d8e042009-09-06 01:41:02 -07001516 unregister_netdev(priv->ndev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001517 unmap_group_regs(priv);
Claudiu Manoil20862782014-02-17 12:53:14 +02001518 gfar_free_rx_queues(priv);
1519 gfar_free_tx_queues(priv);
Claudiu Manoilee873fd2013-01-29 03:55:12 +00001520 free_gfar_dev(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
1522 return 0;
1523}
1524
Scott Woodd87eb122008-07-11 18:04:45 -05001525#ifdef CONFIG_PM
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001526
1527static int gfar_suspend(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001528{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001529 struct gfar_private *priv = dev_get_drvdata(dev);
1530 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001531 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001532 unsigned long flags;
1533 u32 tempval;
1534
1535 int magic_packet = priv->wol_en &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001536 (priv->device_flags &
1537 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001538
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001539 netif_device_detach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001540
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001541 if (netif_running(ndev)) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001542
1543 local_irq_save(flags);
1544 lock_tx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001545
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001546 gfar_halt_nodisable(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001547
1548 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001549 tempval = gfar_read(&regs->maccfg1);
Scott Woodd87eb122008-07-11 18:04:45 -05001550
1551 tempval &= ~MACCFG1_TX_EN;
1552
1553 if (!magic_packet)
1554 tempval &= ~MACCFG1_RX_EN;
1555
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001556 gfar_write(&regs->maccfg1, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001557
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001558 unlock_tx_qs(priv);
1559 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001560
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001561 disable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001562
1563 if (magic_packet) {
1564 /* Enable interrupt on Magic Packet */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001565 gfar_write(&regs->imask, IMASK_MAG);
Scott Woodd87eb122008-07-11 18:04:45 -05001566
1567 /* Enable Magic Packet mode */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001568 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001569 tempval |= MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001570 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001571 } else {
1572 phy_stop(priv->phydev);
1573 }
1574 }
1575
1576 return 0;
1577}
1578
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001579static int gfar_resume(struct device *dev)
Scott Woodd87eb122008-07-11 18:04:45 -05001580{
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001581 struct gfar_private *priv = dev_get_drvdata(dev);
1582 struct net_device *ndev = priv->ndev;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001583 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001584 unsigned long flags;
1585 u32 tempval;
1586 int magic_packet = priv->wol_en &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001587 (priv->device_flags &
1588 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
Scott Woodd87eb122008-07-11 18:04:45 -05001589
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001590 if (!netif_running(ndev)) {
1591 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001592 return 0;
1593 }
1594
1595 if (!magic_packet && priv->phydev)
1596 phy_start(priv->phydev);
1597
1598 /* Disable Magic Packet mode, in case something
1599 * else woke us up.
1600 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001601 local_irq_save(flags);
1602 lock_tx_qs(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001603
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001604 tempval = gfar_read(&regs->maccfg2);
Scott Woodd87eb122008-07-11 18:04:45 -05001605 tempval &= ~MACCFG2_MPEN;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001606 gfar_write(&regs->maccfg2, tempval);
Scott Woodd87eb122008-07-11 18:04:45 -05001607
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001608 gfar_start(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001609
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001610 unlock_tx_qs(priv);
1611 local_irq_restore(flags);
Scott Woodd87eb122008-07-11 18:04:45 -05001612
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001613 netif_device_attach(ndev);
Scott Woodd87eb122008-07-11 18:04:45 -05001614
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001615 enable_napi(priv);
Scott Woodd87eb122008-07-11 18:04:45 -05001616
1617 return 0;
1618}
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001619
1620static int gfar_restore(struct device *dev)
1621{
1622 struct gfar_private *priv = dev_get_drvdata(dev);
1623 struct net_device *ndev = priv->ndev;
1624
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001625 if (!netif_running(ndev)) {
1626 netif_device_attach(ndev);
1627
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001628 return 0;
Wang Dongsheng103cdd12012-11-09 04:43:51 +00001629 }
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001630
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001631 if (gfar_init_bds(ndev)) {
1632 free_skb_resources(priv);
1633 return -ENOMEM;
1634 }
1635
Claudiu Manoila328ac92014-02-24 12:13:42 +02001636 gfar_mac_reset(priv);
1637
1638 gfar_init_tx_rx_base(priv);
1639
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001640 gfar_start(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001641
1642 priv->oldlink = 0;
1643 priv->oldspeed = 0;
1644 priv->oldduplex = -1;
1645
1646 if (priv->phydev)
1647 phy_start(priv->phydev);
1648
1649 netif_device_attach(ndev);
Anton Vorontsov5ea681d2009-11-10 14:11:05 +00001650 enable_napi(priv);
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001651
1652 return 0;
1653}
1654
1655static struct dev_pm_ops gfar_pm_ops = {
1656 .suspend = gfar_suspend,
1657 .resume = gfar_resume,
1658 .freeze = gfar_suspend,
1659 .thaw = gfar_resume,
1660 .restore = gfar_restore,
1661};
1662
1663#define GFAR_PM_OPS (&gfar_pm_ops)
1664
Scott Woodd87eb122008-07-11 18:04:45 -05001665#else
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001666
1667#define GFAR_PM_OPS NULL
Anton Vorontsovbe926fc2009-10-12 06:00:42 +00001668
Scott Woodd87eb122008-07-11 18:04:45 -05001669#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001671/* Reads the controller's registers to determine what interface
1672 * connects it to the PHY.
1673 */
1674static phy_interface_t gfar_get_interface(struct net_device *dev)
1675{
1676 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001677 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001678 u32 ecntrl;
1679
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001680 ecntrl = gfar_read(&regs->ecntrl);
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001681
1682 if (ecntrl & ECNTRL_SGMII_MODE)
1683 return PHY_INTERFACE_MODE_SGMII;
1684
1685 if (ecntrl & ECNTRL_TBI_MODE) {
1686 if (ecntrl & ECNTRL_REDUCED_MODE)
1687 return PHY_INTERFACE_MODE_RTBI;
1688 else
1689 return PHY_INTERFACE_MODE_TBI;
1690 }
1691
1692 if (ecntrl & ECNTRL_REDUCED_MODE) {
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001693 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001694 return PHY_INTERFACE_MODE_RMII;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001695 }
Andy Fleming7132ab72007-07-11 11:43:07 -05001696 else {
Andy Flemingb31a1d82008-12-16 15:29:15 -08001697 phy_interface_t interface = priv->interface;
Andy Fleming7132ab72007-07-11 11:43:07 -05001698
Jan Ceuleers0977f812012-06-05 03:42:12 +00001699 /* This isn't autodetected right now, so it must
Andy Fleming7132ab72007-07-11 11:43:07 -05001700 * be set by the device tree or platform code.
1701 */
1702 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1703 return PHY_INTERFACE_MODE_RGMII_ID;
1704
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001705 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -05001706 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001707 }
1708
Andy Flemingb31a1d82008-12-16 15:29:15 -08001709 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001710 return PHY_INTERFACE_MODE_GMII;
1711
1712 return PHY_INTERFACE_MODE_MII;
1713}
1714
1715
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001716/* Initializes driver's PHY state, and attaches to the PHY.
1717 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 */
1719static int init_phy(struct net_device *dev)
1720{
1721 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001722 uint gigabit_support =
Andy Flemingb31a1d82008-12-16 15:29:15 -08001723 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001724 GFAR_SUPPORTED_GBIT : 0;
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001725 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
1727 priv->oldlink = 0;
1728 priv->oldspeed = 0;
1729 priv->oldduplex = -1;
1730
Andy Fleminge8a2b6a2006-12-01 12:01:06 -06001731 interface = gfar_get_interface(dev);
1732
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001733 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1734 interface);
Anton Vorontsov1db780f2009-07-16 21:31:42 +00001735 if (!priv->phydev) {
1736 dev_err(&dev->dev, "could not attach to PHY\n");
1737 return -ENODEV;
Grant Likelyfe192a42009-04-25 12:53:12 +00001738 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
Kapil Junejad3c12872007-05-11 18:25:11 -05001740 if (interface == PHY_INTERFACE_MODE_SGMII)
1741 gfar_configure_serdes(dev);
1742
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001743 /* Remove any features not supported by the controller */
Grant Likelyfe192a42009-04-25 12:53:12 +00001744 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1745 priv->phydev->advertising = priv->phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
Pavaluca Matei-B46610cf987af2014-10-27 10:42:42 +02001747 /* Add support for flow control, but don't advertise it by default */
1748 priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751}
1752
Jan Ceuleers0977f812012-06-05 03:42:12 +00001753/* Initialize TBI PHY interface for communicating with the
Paul Gortmakerd0313582008-04-17 00:08:10 -04001754 * SERDES lynx PHY on the chip. We communicate with this PHY
1755 * through the MDIO bus on each controller, treating it as a
1756 * "normal" PHY at the address found in the TBIPA register. We assume
1757 * that the TBIPA register is valid. Either the MDIO bus code will set
1758 * it to a value that doesn't conflict with other PHYs on the bus, or the
1759 * value doesn't matter, as there are no other PHYs on the bus.
1760 */
Kapil Junejad3c12872007-05-11 18:25:11 -05001761static void gfar_configure_serdes(struct net_device *dev)
1762{
1763 struct gfar_private *priv = netdev_priv(dev);
Grant Likelyfe192a42009-04-25 12:53:12 +00001764 struct phy_device *tbiphy;
Trent Piephoc1324192008-10-30 18:17:06 -07001765
Grant Likelyfe192a42009-04-25 12:53:12 +00001766 if (!priv->tbi_node) {
1767 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1768 "device tree specify a tbi-handle\n");
1769 return;
1770 }
1771
1772 tbiphy = of_phy_find_device(priv->tbi_node);
1773 if (!tbiphy) {
1774 dev_err(&dev->dev, "error: Could not get TBI device\n");
Andy Flemingb31a1d82008-12-16 15:29:15 -08001775 return;
1776 }
Kapil Junejad3c12872007-05-11 18:25:11 -05001777
Jan Ceuleers0977f812012-06-05 03:42:12 +00001778 /* If the link is already up, we must already be ok, and don't need to
Trent Piephobdb59f92008-10-30 18:17:07 -07001779 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1780 * everything for us? Resetting it takes the link down and requires
1781 * several seconds for it to come back.
1782 */
Grant Likelyfe192a42009-04-25 12:53:12 +00001783 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
Andy Flemingb31a1d82008-12-16 15:29:15 -08001784 return;
Kapil Junejad3c12872007-05-11 18:25:11 -05001785
Paul Gortmakerd0313582008-04-17 00:08:10 -04001786 /* Single clk mode, mii mode off(for serdes communication) */
Grant Likelyfe192a42009-04-25 12:53:12 +00001787 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -05001788
Grant Likelyfe192a42009-04-25 12:53:12 +00001789 phy_write(tbiphy, MII_ADVERTISE,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001790 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1791 ADVERTISE_1000XPSE_ASYM);
Kapil Junejad3c12872007-05-11 18:25:11 -05001792
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001793 phy_write(tbiphy, MII_BMCR,
1794 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1795 BMCR_SPEED1000);
Kapil Junejad3c12872007-05-11 18:25:11 -05001796}
1797
Anton Vorontsov511d9342010-06-30 06:39:15 +00001798static int __gfar_is_rx_idle(struct gfar_private *priv)
1799{
1800 u32 res;
1801
Jan Ceuleers0977f812012-06-05 03:42:12 +00001802 /* Normaly TSEC should not hang on GRS commands, so we should
Anton Vorontsov511d9342010-06-30 06:39:15 +00001803 * actually wait for IEVENT_GRSC flag.
1804 */
Claudiu Manoilad3660c2013-10-09 20:20:40 +03001805 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
Anton Vorontsov511d9342010-06-30 06:39:15 +00001806 return 0;
1807
Jan Ceuleers0977f812012-06-05 03:42:12 +00001808 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
Anton Vorontsov511d9342010-06-30 06:39:15 +00001809 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1810 * and the Rx can be safely reset.
1811 */
1812 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1813 res &= 0x7f807f80;
1814 if ((res & 0xffff) == (res >> 16))
1815 return 1;
1816
1817 return 0;
1818}
Kumar Gala0bbaf062005-06-20 10:54:21 -05001819
1820/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001821static void gfar_halt_nodisable(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822{
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001823 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 u32 tempval;
Claudiu Manoila4feee82014-10-07 10:44:34 +03001825 unsigned int timeout;
1826 int stopped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
Claudiu Manoilefeddce2014-02-17 12:53:17 +02001828 gfar_ints_disable(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Claudiu Manoila4feee82014-10-07 10:44:34 +03001830 if (gfar_is_dma_stopped(priv))
1831 return;
1832
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 /* Stop the DMA, and wait for it to stop */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001834 tempval = gfar_read(&regs->dmactrl);
Claudiu Manoila4feee82014-10-07 10:44:34 +03001835 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1836 gfar_write(&regs->dmactrl, tempval);
Anton Vorontsov511d9342010-06-30 06:39:15 +00001837
Claudiu Manoila4feee82014-10-07 10:44:34 +03001838retry:
1839 timeout = 1000;
1840 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1841 cpu_relax();
1842 timeout--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 }
Claudiu Manoila4feee82014-10-07 10:44:34 +03001844
1845 if (!timeout)
1846 stopped = gfar_is_dma_stopped(priv);
1847
1848 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1849 !__gfar_is_rx_idle(priv))
1850 goto retry;
Scott Woodd87eb122008-07-11 18:04:45 -05001851}
Scott Woodd87eb122008-07-11 18:04:45 -05001852
1853/* Halt the receive and transmit queues */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001854void gfar_halt(struct gfar_private *priv)
Scott Woodd87eb122008-07-11 18:04:45 -05001855{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001856 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Scott Woodd87eb122008-07-11 18:04:45 -05001857 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001859 /* Dissable the Rx/Tx hw queues */
1860 gfar_write(&regs->rqueue, 0);
1861 gfar_write(&regs->tqueue, 0);
Scott Wood2a54adc2008-08-12 15:10:46 -05001862
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001863 mdelay(10);
1864
1865 gfar_halt_nodisable(priv);
1866
1867 /* Disable Rx/Tx DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 tempval = gfar_read(&regs->maccfg1);
1869 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1870 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001871}
1872
1873void stop_gfar(struct net_device *dev)
1874{
1875 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001876
Claudiu Manoil08511332014-02-24 12:13:45 +02001877 netif_tx_stop_all_queues(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001878
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001879 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02001880 set_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001881 smp_mb__after_atomic();
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001882
Claudiu Manoil08511332014-02-24 12:13:45 +02001883 disable_napi(priv);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001884
Claudiu Manoil08511332014-02-24 12:13:45 +02001885 /* disable ints and gracefully shut down Rx/Tx DMA */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001886 gfar_halt(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
Claudiu Manoil08511332014-02-24 12:13:45 +02001888 phy_stop(priv->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 free_skb_resources(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891}
1892
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001893static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 struct txbd8 *txbdp;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001896 struct gfar_private *priv = netdev_priv(tx_queue->dev);
Dai Haruki4669bc92008-12-17 16:51:04 -08001897 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001899 txbdp = tx_queue->tx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001901 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1902 if (!tx_queue->tx_skbuff[i])
Dai Haruki4669bc92008-12-17 16:51:04 -08001903 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
Claudiu Manoila7312d52015-03-13 10:36:28 +02001905 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1906 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
Dai Haruki4669bc92008-12-17 16:51:04 -08001907 txbdp->lstatus = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001908 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001909 j++) {
Dai Haruki4669bc92008-12-17 16:51:04 -08001910 txbdp++;
Claudiu Manoila7312d52015-03-13 10:36:28 +02001911 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1912 be16_to_cpu(txbdp->length),
1913 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 }
Andy Flemingad5da7a2008-05-07 13:20:55 -05001915 txbdp++;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001916 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1917 tx_queue->tx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001919 kfree(tx_queue->tx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001920 tx_queue->tx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001921}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001923static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1924{
1925 struct rxbd8 *rxbdp;
1926 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1927 int i;
1928
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001929 rxbdp = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001931 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1932 if (rx_queue->rx_skbuff[i]) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02001933 dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
Claudiu Manoil369ec162013-02-14 05:00:02 +00001934 priv->rx_buffer_size,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001935 DMA_FROM_DEVICE);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001936 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1937 rx_queue->rx_skbuff[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
Anton Vorontsove69edd22009-10-12 06:00:30 +00001939 rxbdp->lstatus = 0;
1940 rxbdp->bufPtr = 0;
1941 rxbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 }
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00001943 kfree(rx_queue->rx_skbuff);
Claudiu Manoil1eb8f7a2012-11-08 22:11:41 +00001944 rx_queue->rx_skbuff = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001945}
Anton Vorontsove69edd22009-10-12 06:00:30 +00001946
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001947/* If there are any tx skbs or rx skbs still around, free them.
Jan Ceuleers0977f812012-06-05 03:42:12 +00001948 * Then free tx_skbuff and rx_skbuff
1949 */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001950static void free_skb_resources(struct gfar_private *priv)
1951{
1952 struct gfar_priv_tx_q *tx_queue = NULL;
1953 struct gfar_priv_rx_q *rx_queue = NULL;
1954 int i;
1955
1956 /* Go through all the buffer descriptors and free their data buffers */
1957 for (i = 0; i < priv->num_tx_queues; i++) {
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001958 struct netdev_queue *txq;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001959
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001960 tx_queue = priv->tx_queue[i];
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001961 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001962 if (tx_queue->tx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001963 free_skb_tx_queue(tx_queue);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05001964 netdev_tx_reset_queue(txq);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001965 }
1966
1967 for (i = 0; i < priv->num_rx_queues; i++) {
1968 rx_queue = priv->rx_queue[i];
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001969 if (rx_queue->rx_skbuff)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00001970 free_skb_rx_queue(rx_queue);
1971 }
1972
Claudiu Manoil369ec162013-02-14 05:00:02 +00001973 dma_free_coherent(priv->dev,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00001974 sizeof(struct txbd8) * priv->total_tx_ring_size +
1975 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1976 priv->tx_queue[0]->tx_bd_base,
1977 priv->tx_queue[0]->tx_bd_dma_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978}
1979
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001980void gfar_start(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001981{
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001982 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001983 u32 tempval;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00001984 int i = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001985
Claudiu Manoilc10650b2014-02-17 12:53:18 +02001986 /* Enable Rx/Tx hw queues */
1987 gfar_write(&regs->rqueue, priv->rqueue);
1988 gfar_write(&regs->tqueue, priv->tqueue);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001989
1990 /* Initialize DMACTRL to have WWR and WOP */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001991 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001992 tempval |= DMACTRL_INIT_SETTINGS;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001993 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001994
Kumar Gala0bbaf062005-06-20 10:54:21 -05001995 /* Make sure we aren't stopped */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001996 tempval = gfar_read(&regs->dmactrl);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001997 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00001998 gfar_write(&regs->dmactrl, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001999
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002000 for (i = 0; i < priv->num_grps; i++) {
2001 regs = priv->gfargrp[i].regs;
2002 /* Clear THLT/RHLT, so that the DMA starts polling now */
2003 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2004 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002005 }
Dai Haruki12dea572008-12-16 15:30:20 -08002006
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002007 /* Enable Rx/Tx DMA */
2008 tempval = gfar_read(&regs->maccfg1);
2009 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2010 gfar_write(&regs->maccfg1, tempval);
2011
Claudiu Manoilefeddce2014-02-17 12:53:17 +02002012 gfar_ints_enable(priv);
2013
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002014 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002015}
2016
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002017static void free_grp_irqs(struct gfar_priv_grp *grp)
2018{
2019 free_irq(gfar_irq(grp, TX)->irq, grp);
2020 free_irq(gfar_irq(grp, RX)->irq, grp);
2021 free_irq(gfar_irq(grp, ER)->irq, grp);
2022}
2023
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002024static int register_grp_irqs(struct gfar_priv_grp *grp)
2025{
2026 struct gfar_private *priv = grp->priv;
2027 struct net_device *dev = priv->ndev;
Anton Vorontsovccc05c62009-10-12 06:00:26 +00002028 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 /* If the device has multiple interrupts, register for
Jan Ceuleers0977f812012-06-05 03:42:12 +00002031 * them. Otherwise, only register for the one
2032 */
Andy Flemingb31a1d82008-12-16 15:29:15 -08002033 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002034 /* Install our interrupt handlers for Error,
Jan Ceuleers0977f812012-06-05 03:42:12 +00002035 * Transmit, and Receive
2036 */
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002037 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2038 gfar_irq(grp, ER)->name, grp);
2039 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002040 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002041 gfar_irq(grp, ER)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002042
Julia Lawall2145f1a2010-08-05 10:26:20 +00002043 goto err_irq_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002045 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2046 gfar_irq(grp, TX)->name, grp);
2047 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002048 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002049 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 goto tx_irq_fail;
2051 }
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002052 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2053 gfar_irq(grp, RX)->name, grp);
2054 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002055 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002056 gfar_irq(grp, RX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 goto rx_irq_fail;
2058 }
2059 } else {
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002060 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2061 gfar_irq(grp, TX)->name, grp);
2062 if (err < 0) {
Joe Perches59deab22011-06-14 08:57:47 +00002063 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002064 gfar_irq(grp, TX)->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 goto err_irq_fail;
2066 }
2067 }
2068
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002069 return 0;
2070
2071rx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002072 free_irq(gfar_irq(grp, TX)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002073tx_irq_fail:
Claudiu Manoilee873fd2013-01-29 03:55:12 +00002074 free_irq(gfar_irq(grp, ER)->irq, grp);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002075err_irq_fail:
2076 return err;
2077
2078}
2079
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002080static void gfar_free_irq(struct gfar_private *priv)
2081{
2082 int i;
2083
2084 /* Free the IRQs */
2085 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2086 for (i = 0; i < priv->num_grps; i++)
2087 free_grp_irqs(&priv->gfargrp[i]);
2088 } else {
2089 for (i = 0; i < priv->num_grps; i++)
2090 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2091 &priv->gfargrp[i]);
2092 }
2093}
2094
2095static int gfar_request_irq(struct gfar_private *priv)
2096{
2097 int err, i, j;
2098
2099 for (i = 0; i < priv->num_grps; i++) {
2100 err = register_grp_irqs(&priv->gfargrp[i]);
2101 if (err) {
2102 for (j = 0; j < i; j++)
2103 free_grp_irqs(&priv->gfargrp[j]);
2104 return err;
2105 }
2106 }
2107
2108 return 0;
2109}
2110
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002111/* Bring the controller up and running */
2112int startup_gfar(struct net_device *ndev)
2113{
2114 struct gfar_private *priv = netdev_priv(ndev);
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002115 int err;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002116
Claudiu Manoila328ac92014-02-24 12:13:42 +02002117 gfar_mac_reset(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002118
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002119 err = gfar_alloc_skb_resources(ndev);
2120 if (err)
2121 return err;
2122
Claudiu Manoila328ac92014-02-24 12:13:42 +02002123 gfar_init_tx_rx_base(priv);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002124
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002125 smp_mb__before_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002126 clear_bit(GFAR_DOWN, &priv->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002127 smp_mb__after_atomic();
Claudiu Manoil08511332014-02-24 12:13:45 +02002128
2129 /* Start Rx/Tx DMA and enable the interrupts */
Claudiu Manoilc10650b2014-02-17 12:53:18 +02002130 gfar_start(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Anton Vorontsov826aa4a2009-10-12 06:00:34 +00002132 phy_start(priv->phydev);
2133
Claudiu Manoil08511332014-02-24 12:13:45 +02002134 enable_napi(priv);
2135
2136 netif_tx_wake_all_queues(ndev);
2137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139}
2140
Jan Ceuleers0977f812012-06-05 03:42:12 +00002141/* Called when something needs to use the ethernet device
2142 * Returns 0 for success.
2143 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144static int gfar_enet_open(struct net_device *dev)
2145{
Li Yang94e8cc32007-10-12 21:53:51 +08002146 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 int err;
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 err = init_phy(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002150 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 return err;
2152
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002153 err = gfar_request_irq(priv);
2154 if (err)
2155 return err;
2156
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +02002158 if (err)
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04002159 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Anton Vorontsov2884e5c2009-02-01 00:52:34 -08002161 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 return err;
2164}
2165
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002166static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002167{
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002168 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
Kumar Gala6c31d552009-04-28 08:04:10 -07002169
2170 memset(fcb, 0, GMAC_FCB_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002171
Kumar Gala0bbaf062005-06-20 10:54:21 -05002172 return fcb;
2173}
2174
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002175static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002176 int fcb_length)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002177{
Kumar Gala0bbaf062005-06-20 10:54:21 -05002178 /* If we're here, it's a IP packet with a TCP or UDP
2179 * payload. We set it to checksum, using a pseudo-header
2180 * we provide
2181 */
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00002182 u8 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002183
Jan Ceuleers0977f812012-06-05 03:42:12 +00002184 /* Tell the controller what the protocol is
2185 * And provide the already calculated phcs
2186 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002187 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06002188 flags |= TXFCB_UDP;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002189 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
Andy Fleming7f7f5312005-11-11 12:38:59 -06002190 } else
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002191 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002192
2193 /* l3os is the distance between the start of the
2194 * frame (skb->data) and the start of the IP hdr.
2195 * l4os is the distance between the start of the
Jan Ceuleers0977f812012-06-05 03:42:12 +00002196 * l3 hdr and the l4 hdr
2197 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002198 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03002199 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002200
Andy Fleming7f7f5312005-11-11 12:38:59 -06002201 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002202}
2203
Andy Fleming7f7f5312005-11-11 12:38:59 -06002204void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05002205{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002206 fcb->flags |= TXFCB_VLN;
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002207 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
Kumar Gala0bbaf062005-06-20 10:54:21 -05002208}
2209
Dai Haruki4669bc92008-12-17 16:51:04 -08002210static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002211 struct txbd8 *base, int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002212{
2213 struct txbd8 *new_bd = bdp + stride;
2214
2215 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2216}
2217
2218static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002219 int ring_size)
Dai Haruki4669bc92008-12-17 16:51:04 -08002220{
2221 return skip_txbd(bdp, 1, base, ring_size);
2222}
2223
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002224/* eTSEC12: csum generation not supported for some fcb offsets */
2225static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2226 unsigned long fcb_addr)
2227{
2228 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2229 (fcb_addr % 0x20) > 0x18);
2230}
2231
2232/* eTSEC76: csum generation for frames larger than 2500 may
2233 * cause excess delays before start of transmission
2234 */
2235static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2236 unsigned int len)
2237{
2238 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2239 (len > 2500));
2240}
2241
Jan Ceuleers0977f812012-06-05 03:42:12 +00002242/* This is called by the kernel when a frame is ready for transmission.
2243 * It is pointed to by the dev->hard_start_xmit function pointer
2244 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2246{
2247 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002248 struct gfar_priv_tx_q *tx_queue = NULL;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002249 struct netdev_queue *txq;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002250 struct gfar __iomem *regs = NULL;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002251 struct txfcb *fcb = NULL;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002252 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
Dai Haruki5a5efed2008-12-16 15:34:50 -08002253 u32 lstatus;
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002254 int i, rq = 0;
2255 int do_tstamp, do_csum, do_vlan;
Dai Haruki4669bc92008-12-17 16:51:04 -08002256 u32 bufaddr;
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002257 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002258
2259 rq = skb->queue_mapping;
2260 tx_queue = priv->tx_queue[rq];
2261 txq = netdev_get_tx_queue(dev, rq);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002262 base = tx_queue->tx_bd_base;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00002263 regs = tx_queue->grp->regs;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002264
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002265 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002266 do_vlan = skb_vlan_tag_present(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002267 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2268 priv->hwts_tx_en;
2269
2270 if (do_csum || do_vlan)
2271 fcb_len = GMAC_FCB_LEN;
2272
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002273 /* check if time stamp should be generated */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002274 if (unlikely(do_tstamp))
2275 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Dai Haruki4669bc92008-12-17 16:51:04 -08002276
Li Yang5b28bea2009-03-27 15:54:30 -07002277 /* make space for additional header when fcb is needed */
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002278 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002279 struct sk_buff *skb_new;
2280
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002281 skb_new = skb_realloc_headroom(skb, fcb_len);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002282 if (!skb_new) {
2283 dev->stats.tx_errors++;
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002284 dev_kfree_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002285 return NETDEV_TX_OK;
2286 }
Manfred Rudigierdb83d132012-01-09 23:26:50 +00002287
Eric Dumazet313b0372012-07-05 11:45:13 +00002288 if (skb->sk)
2289 skb_set_owner_w(skb_new, skb->sk);
Eric W. Biedermanc9974ad2014-03-11 14:20:26 -07002290 dev_consume_skb_any(skb);
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002291 skb = skb_new;
2292 }
2293
Dai Haruki4669bc92008-12-17 16:51:04 -08002294 /* total number of fragments in the SKB */
2295 nr_frags = skb_shinfo(skb)->nr_frags;
2296
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002297 /* calculate the required number of TxBDs for this skb */
2298 if (unlikely(do_tstamp))
2299 nr_txbds = nr_frags + 2;
2300 else
2301 nr_txbds = nr_frags + 1;
2302
Dai Haruki4669bc92008-12-17 16:51:04 -08002303 /* check if there is space to queue this packet */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002304 if (nr_txbds > tx_queue->num_txbdfree) {
Dai Haruki4669bc92008-12-17 16:51:04 -08002305 /* no space, stop the queue */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002306 netif_tx_stop_queue(txq);
Dai Haruki4669bc92008-12-17 16:51:04 -08002307 dev->stats.tx_fifo_errors++;
Dai Haruki4669bc92008-12-17 16:51:04 -08002308 return NETDEV_TX_BUSY;
2309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
2311 /* Update transmit stats */
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002312 bytes_sent = skb->len;
2313 tx_queue->stats.tx_bytes += bytes_sent;
2314 /* keep Tx bytes on wire for BQL accounting */
2315 GFAR_CB(skb)->bytes_sent = bytes_sent;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +00002316 tx_queue->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002318 txbdp = txbdp_start = tx_queue->cur_tx;
Claudiu Manoila7312d52015-03-13 10:36:28 +02002319 lstatus = be32_to_cpu(txbdp->lstatus);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002320
2321 /* Time stamp insertion requires one additional TxBD */
2322 if (unlikely(do_tstamp))
2323 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002324 tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Dai Haruki4669bc92008-12-17 16:51:04 -08002326 if (nr_frags == 0) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002327 if (unlikely(do_tstamp)) {
2328 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2329
2330 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2331 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2332 } else {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002333 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002334 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002335 } else {
2336 /* Place the fragment addresses and lengths into the TxBDs */
2337 for (i = 0; i < nr_frags; i++) {
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002338 unsigned int frag_len;
Dai Haruki4669bc92008-12-17 16:51:04 -08002339 /* Point at the next BD, wrapping as needed */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002340 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002342 frag_len = skb_shinfo(skb)->frags[i].size;
Dai Haruki4669bc92008-12-17 16:51:04 -08002343
Claudiu Manoila7312d52015-03-13 10:36:28 +02002344 lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002345 BD_LFLAG(TXBD_READY);
Dai Haruki4669bc92008-12-17 16:51:04 -08002346
2347 /* Handle the last BD specially */
2348 if (i == nr_frags - 1)
2349 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2350
Claudiu Manoil369ec162013-02-14 05:00:02 +00002351 bufaddr = skb_frag_dma_map(priv->dev,
Ian Campbell2234a722011-08-29 23:18:29 +00002352 &skb_shinfo(skb)->frags[i],
2353 0,
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002354 frag_len,
Ian Campbell2234a722011-08-29 23:18:29 +00002355 DMA_TO_DEVICE);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002356 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2357 goto dma_map_err;
Dai Haruki4669bc92008-12-17 16:51:04 -08002358
2359 /* set the TxBD length and buffer pointer */
Claudiu Manoila7312d52015-03-13 10:36:28 +02002360 txbdp->bufPtr = cpu_to_be32(bufaddr);
2361 txbdp->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002362 }
2363
Claudiu Manoila7312d52015-03-13 10:36:28 +02002364 lstatus = be32_to_cpu(txbdp_start->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002367 /* Add TxPAL between FCB and frame if required */
2368 if (unlikely(do_tstamp)) {
2369 skb_push(skb, GMAC_TXPAL_LEN);
2370 memset(skb->data, 0, GMAC_TXPAL_LEN);
2371 }
2372
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002373 /* Add TxFCB if required */
2374 if (fcb_len) {
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002375 fcb = gfar_add_fcb(skb);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002376 lstatus |= BD_LFLAG(TXBD_TOE);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002377 }
2378
2379 /* Set up checksumming */
2380 if (do_csum) {
2381 gfar_tx_checksum(skb, fcb, fcb_len);
Claudiu Manoil02d88fb2013-08-05 17:20:09 +03002382
2383 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2384 unlikely(gfar_csum_errata_76(priv, skb->len))) {
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002385 __skb_pull(skb, GMAC_FCB_LEN);
2386 skb_checksum_help(skb);
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002387 if (do_vlan || do_tstamp) {
2388 /* put back a new fcb for vlan/tstamp TOE */
2389 fcb = gfar_add_fcb(skb);
2390 } else {
2391 /* Tx TOE not used */
2392 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2393 fcb = NULL;
2394 }
Alex Dubov4363c2fdd2011-03-16 17:57:13 +00002395 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002396 }
2397
Claudiu Manoil0d0cffd2013-08-05 17:20:10 +03002398 if (do_vlan)
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002399 gfar_tx_vlan(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002400
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002401 /* Setup tx hardware time stamping if requested */
2402 if (unlikely(do_tstamp)) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002404 fcb->ptp = 1;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002405 }
2406
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002407 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2408 DMA_TO_DEVICE);
2409 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2410 goto dma_map_err;
2411
Claudiu Manoila7312d52015-03-13 10:36:28 +02002412 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
Jan Ceuleers0977f812012-06-05 03:42:12 +00002414 /* If time stamping is requested one additional TxBD must be set up. The
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002415 * first TxBD points to the FCB and must have a data length of
2416 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2417 * the full frame length.
2418 */
2419 if (unlikely(do_tstamp)) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002420 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2421
2422 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2423 bufaddr += fcb_len;
2424 lstatus_ts |= BD_LFLAG(TXBD_READY) |
2425 (skb_headlen(skb) - fcb_len);
2426
2427 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2428 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002429 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2430 } else {
2431 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002434 netdev_tx_sent_queue(txq, bytes_sent);
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002435
Claudiu Manoild55398b2014-10-07 10:44:35 +03002436 gfar_wmb();
Andy Fleming7f7f5312005-11-11 12:38:59 -06002437
Claudiu Manoila7312d52015-03-13 10:36:28 +02002438 txbdp_start->lstatus = cpu_to_be32(lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002439
Claudiu Manoild55398b2014-10-07 10:44:35 +03002440 gfar_wmb(); /* force lstatus write before tx_skbuff */
Anton Vorontsov0eddba52010-03-03 08:18:58 +00002441
2442 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2443
Dai Haruki4669bc92008-12-17 16:51:04 -08002444 /* Update the current skb pointer to the next entry we will use
Jan Ceuleers0977f812012-06-05 03:42:12 +00002445 * (wrapping if necessary)
2446 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002447 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002448 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002449
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002450 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002451
Claudiu Manoilbc602282015-05-06 18:07:29 +03002452 /* We can work in parallel with gfar_clean_tx_ring(), except
2453 * when modifying num_txbdfree. Note that we didn't grab the lock
2454 * when we were reading the num_txbdfree and checking for available
2455 * space, that's because outside of this function it can only grow.
2456 */
2457 spin_lock_bh(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002458 /* reduce TxBD free count */
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002459 tx_queue->num_txbdfree -= (nr_txbds);
Claudiu Manoilbc602282015-05-06 18:07:29 +03002460 spin_unlock_bh(&tx_queue->txlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
2462 /* If the next BD still needs to be cleaned up, then the bds
Jan Ceuleers0977f812012-06-05 03:42:12 +00002463 * are full. We need to tell the kernel to stop sending us stuff.
2464 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002465 if (!tx_queue->num_txbdfree) {
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002466 netif_tx_stop_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002468 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 }
2470
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 /* Tell the DMA to go go go */
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00002472 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
Stephen Hemminger54dc79f2009-03-27 00:38:45 -07002474 return NETDEV_TX_OK;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002475
2476dma_map_err:
2477 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2478 if (do_tstamp)
2479 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2480 for (i = 0; i < nr_frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002481 lstatus = be32_to_cpu(txbdp->lstatus);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002482 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2483 break;
2484
Claudiu Manoila7312d52015-03-13 10:36:28 +02002485 lstatus &= ~BD_LFLAG(TXBD_READY);
2486 txbdp->lstatus = cpu_to_be32(lstatus);
2487 bufaddr = be32_to_cpu(txbdp->bufPtr);
2488 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002489 DMA_TO_DEVICE);
2490 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2491 }
2492 gfar_wmb();
2493 dev_kfree_skb_any(skb);
2494 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495}
2496
2497/* Stops the kernel queue, and halts the controller */
2498static int gfar_close(struct net_device *dev)
2499{
2500 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002501
Sebastian Siewiorab939902008-08-19 21:12:45 +02002502 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 stop_gfar(dev);
2504
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002505 /* Disconnect from the PHY */
2506 phy_disconnect(priv->phydev);
2507 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508
Claudiu Manoil80ec3962014-02-24 12:13:44 +02002509 gfar_free_irq(priv);
2510
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 return 0;
2512}
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05002515static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516{
Andy Fleming7f7f5312005-11-11 12:38:59 -06002517 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
2519 return 0;
2520}
2521
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2523{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002525 int frame_size = new_mtu + ETH_HLEN;
2526
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Joe Perches59deab22011-06-14 08:57:47 +00002528 netif_err(priv, drv, dev, "Invalid MTU setting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 return -EINVAL;
2530 }
2531
Claudiu Manoil08511332014-02-24 12:13:45 +02002532 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2533 cpu_relax();
2534
Claudiu Manoil88302642014-02-24 12:13:43 +02002535 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 stop_gfar(dev);
2537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 dev->mtu = new_mtu;
2539
Claudiu Manoil88302642014-02-24 12:13:43 +02002540 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 startup_gfar(dev);
2542
Claudiu Manoil08511332014-02-24 12:13:45 +02002543 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 return 0;
2546}
2547
Claudiu Manoil08511332014-02-24 12:13:45 +02002548void reset_gfar(struct net_device *ndev)
2549{
2550 struct gfar_private *priv = netdev_priv(ndev);
2551
2552 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2553 cpu_relax();
2554
2555 stop_gfar(ndev);
2556 startup_gfar(ndev);
2557
2558 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2559}
2560
Sebastian Siewiorab939902008-08-19 21:12:45 +02002561/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 * transmitted after a set amount of time.
2563 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02002564 * starting over will fix the problem.
2565 */
2566static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567{
Sebastian Siewiorab939902008-08-19 21:12:45 +02002568 struct gfar_private *priv = container_of(work, struct gfar_private,
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002569 reset_task);
Claudiu Manoil08511332014-02-24 12:13:45 +02002570 reset_gfar(priv->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571}
2572
Sebastian Siewiorab939902008-08-19 21:12:45 +02002573static void gfar_timeout(struct net_device *dev)
2574{
2575 struct gfar_private *priv = netdev_priv(dev);
2576
2577 dev->stats.tx_errors++;
2578 schedule_work(&priv->reset_task);
2579}
2580
Eran Libertyacbc0f02010-07-07 15:54:54 -07002581static void gfar_align_skb(struct sk_buff *skb)
2582{
2583 /* We need the data buffer to be aligned properly. We will reserve
2584 * as many bytes as needed to align the data properly
2585 */
2586 skb_reserve(skb, RXBUF_ALIGNMENT -
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002587 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
Eran Libertyacbc0f02010-07-07 15:54:54 -07002588}
2589
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590/* Interrupt Handler for Transmit complete */
Claudiu Manoilc233cf402013-03-19 07:40:02 +00002591static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002593 struct net_device *dev = tx_queue->dev;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002594 struct netdev_queue *txq;
Dai Harukid080cd62008-04-09 19:37:51 -05002595 struct gfar_private *priv = netdev_priv(dev);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002596 struct txbd8 *bdp, *next = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002597 struct txbd8 *lbdp = NULL;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002598 struct txbd8 *base = tx_queue->tx_bd_base;
Dai Haruki4669bc92008-12-17 16:51:04 -08002599 struct sk_buff *skb;
2600 int skb_dirtytx;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002601 int tx_ring_size = tx_queue->tx_ring_size;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002602 int frags = 0, nr_txbds = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002603 int i;
Dai Harukid080cd62008-04-09 19:37:51 -05002604 int howmany = 0;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002605 int tqi = tx_queue->qindex;
2606 unsigned int bytes_sent = 0;
Dai Haruki4669bc92008-12-17 16:51:04 -08002607 u32 lstatus;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002608 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002610 txq = netdev_get_tx_queue(dev, tqi);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002611 bdp = tx_queue->dirty_tx;
2612 skb_dirtytx = tx_queue->skb_dirtytx;
Dai Haruki4669bc92008-12-17 16:51:04 -08002613
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002614 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Anton Vorontsova3bc1f12009-11-10 14:11:10 +00002615
Dai Haruki4669bc92008-12-17 16:51:04 -08002616 frags = skb_shinfo(skb)->nr_frags;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002617
Jan Ceuleers0977f812012-06-05 03:42:12 +00002618 /* When time stamping, one additional TxBD must be freed.
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002619 * Also, we need to dma_unmap_single() the TxPAL.
2620 */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002621 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002622 nr_txbds = frags + 2;
2623 else
2624 nr_txbds = frags + 1;
2625
2626 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002627
Claudiu Manoila7312d52015-03-13 10:36:28 +02002628 lstatus = be32_to_cpu(lbdp->lstatus);
Dai Haruki4669bc92008-12-17 16:51:04 -08002629
2630 /* Only clean completed frames */
2631 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002632 (lstatus & BD_LENGTH_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 break;
2634
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002635 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002636 next = next_txbd(bdp, base, tx_ring_size);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002637 buflen = be16_to_cpu(next->length) +
2638 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002639 } else
Claudiu Manoila7312d52015-03-13 10:36:28 +02002640 buflen = be16_to_cpu(bdp->length);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002641
Claudiu Manoila7312d52015-03-13 10:36:28 +02002642 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002643 buflen, DMA_TO_DEVICE);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002644
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002645 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002646 struct skb_shared_hwtstamps shhwtstamps;
2647 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002648
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002649 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2650 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
Manfred Rudigier9c4886e2012-01-09 23:26:51 +00002651 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002652 skb_tstamp_tx(skb, &shhwtstamps);
Claudiu Manoila7312d52015-03-13 10:36:28 +02002653 gfar_clear_txbd_status(bdp);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002654 bdp = next;
2655 }
Dai Haruki4669bc92008-12-17 16:51:04 -08002656
Claudiu Manoila7312d52015-03-13 10:36:28 +02002657 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002658 bdp = next_txbd(bdp, base, tx_ring_size);
2659
2660 for (i = 0; i < frags; i++) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002661 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2662 be16_to_cpu(bdp->length),
2663 DMA_TO_DEVICE);
2664 gfar_clear_txbd_status(bdp);
Dai Haruki4669bc92008-12-17 16:51:04 -08002665 bdp = next_txbd(bdp, base, tx_ring_size);
2666 }
2667
Claudiu Manoil50ad0762013-08-30 15:01:15 +03002668 bytes_sent += GFAR_CB(skb)->bytes_sent;
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002669
Eric Dumazetacb600d2012-10-05 06:23:55 +00002670 dev_kfree_skb_any(skb);
Andy Fleming0fd56bb2009-02-04 16:43:16 -08002671
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002672 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Dai Haruki4669bc92008-12-17 16:51:04 -08002673
2674 skb_dirtytx = (skb_dirtytx + 1) &
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002675 TX_RING_MOD_MASK(tx_ring_size);
Dai Haruki4669bc92008-12-17 16:51:04 -08002676
Dai Harukid080cd62008-04-09 19:37:51 -05002677 howmany++;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002678 spin_lock(&tx_queue->txlock);
Manfred Rudigierf0ee7ac2010-04-08 23:10:35 +00002679 tx_queue->num_txbdfree += nr_txbds;
Claudiu Manoilbc602282015-05-06 18:07:29 +03002680 spin_unlock(&tx_queue->txlock);
Dai Haruki4669bc92008-12-17 16:51:04 -08002681 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682
Dai Haruki4669bc92008-12-17 16:51:04 -08002683 /* If we freed a buffer, we can restart transmission, if necessary */
Claudiu Manoil08511332014-02-24 12:13:45 +02002684 if (tx_queue->num_txbdfree &&
2685 netif_tx_queue_stopped(txq) &&
2686 !(test_bit(GFAR_DOWN, &priv->state)))
2687 netif_wake_subqueue(priv->ndev, tqi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
Dai Haruki4669bc92008-12-17 16:51:04 -08002689 /* Update dirty indicators */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002690 tx_queue->skb_dirtytx = skb_dirtytx;
2691 tx_queue->dirty_tx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692
Paul Gortmakerd8a0f1b2012-01-06 13:51:03 -05002693 netdev_tx_completed_queue(txq, howmany, bytes_sent);
Dai Harukid080cd62008-04-09 19:37:51 -05002694}
2695
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002696static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
Eran Libertyacbc0f02010-07-07 15:54:54 -07002697{
2698 struct gfar_private *priv = netdev_priv(dev);
Eric Dumazetacb600d2012-10-05 06:23:55 +00002699 struct sk_buff *skb;
Eran Libertyacbc0f02010-07-07 15:54:54 -07002700
2701 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2702 if (!skb)
2703 return NULL;
2704
2705 gfar_align_skb(skb);
2706
2707 return skb;
2708}
Andy Fleming815b97c2008-04-22 17:18:29 -05002709
Kevin Hao91c53f762014-12-24 14:05:44 +08002710static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711{
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002712 struct gfar_private *priv = netdev_priv(dev);
2713 struct sk_buff *skb;
2714 dma_addr_t addr;
2715
2716 skb = gfar_alloc_skb(dev);
2717 if (!skb)
2718 return NULL;
2719
2720 addr = dma_map_single(priv->dev, skb->data,
2721 priv->rx_buffer_size, DMA_FROM_DEVICE);
2722 if (unlikely(dma_mapping_error(priv->dev, addr))) {
2723 dev_kfree_skb_any(skb);
2724 return NULL;
2725 }
2726
2727 *bufaddr = addr;
2728 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729}
2730
Li Yang298e1a92007-10-16 14:18:13 +08002731static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732{
Li Yang298e1a92007-10-16 14:18:13 +08002733 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002734 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 struct gfar_extra_stats *estats = &priv->extra_stats;
2736
Jan Ceuleers0977f812012-06-05 03:42:12 +00002737 /* If the packet was truncated, none of the other errors matter */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 if (status & RXBD_TRUNCATED) {
2739 stats->rx_length_errors++;
2740
Paul Gortmaker212079d2013-02-12 15:38:19 -05002741 atomic64_inc(&estats->rx_trunc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 return;
2744 }
2745 /* Count the errors, if there were any */
2746 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2747 stats->rx_length_errors++;
2748
2749 if (status & RXBD_LARGE)
Paul Gortmaker212079d2013-02-12 15:38:19 -05002750 atomic64_inc(&estats->rx_large);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 else
Paul Gortmaker212079d2013-02-12 15:38:19 -05002752 atomic64_inc(&estats->rx_short);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 }
2754 if (status & RXBD_NONOCTET) {
2755 stats->rx_frame_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002756 atomic64_inc(&estats->rx_nonoctet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 }
2758 if (status & RXBD_CRCERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002759 atomic64_inc(&estats->rx_crcerr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 stats->rx_crc_errors++;
2761 }
2762 if (status & RXBD_OVERRUN) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05002763 atomic64_inc(&estats->rx_overrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 stats->rx_crc_errors++;
2765 }
2766}
2767
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00002768irqreturn_t gfar_receive(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769{
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002770 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2771 unsigned long flags;
2772 u32 imask;
2773
2774 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2775 spin_lock_irqsave(&grp->grplock, flags);
2776 imask = gfar_read(&grp->regs->imask);
2777 imask &= IMASK_RX_DISABLED;
2778 gfar_write(&grp->regs->imask, imask);
2779 spin_unlock_irqrestore(&grp->grplock, flags);
2780 __napi_schedule(&grp->napi_rx);
2781 } else {
2782 /* Clear IEVENT, so interrupts aren't called again
2783 * because of the packets that have already arrived.
2784 */
2785 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2786 }
2787
2788 return IRQ_HANDLED;
2789}
2790
2791/* Interrupt Handler for Transmit complete */
2792static irqreturn_t gfar_transmit(int irq, void *grp_id)
2793{
2794 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2795 unsigned long flags;
2796 u32 imask;
2797
2798 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2799 spin_lock_irqsave(&grp->grplock, flags);
2800 imask = gfar_read(&grp->regs->imask);
2801 imask &= IMASK_TX_DISABLED;
2802 gfar_write(&grp->regs->imask, imask);
2803 spin_unlock_irqrestore(&grp->grplock, flags);
2804 __napi_schedule(&grp->napi_tx);
2805 } else {
2806 /* Clear IEVENT, so interrupts aren't called again
2807 * because of the packets that have already arrived.
2808 */
2809 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2810 }
2811
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 return IRQ_HANDLED;
2813}
2814
Kumar Gala0bbaf062005-06-20 10:54:21 -05002815static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2816{
2817 /* If valid headers were found, and valid sums
2818 * were verified, then we tell the kernel that no
Jan Ceuleers0977f812012-06-05 03:42:12 +00002819 * checksumming is necessary. Otherwise, it is [FIXME]
2820 */
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002821 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2822 (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05002823 skb->ip_summed = CHECKSUM_UNNECESSARY;
2824 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002825 skb_checksum_none_assert(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002826}
2827
Jan Ceuleers0977f812012-06-05 03:42:12 +00002828/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
Claudiu Manoil61db26c2013-02-14 05:00:05 +00002829static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2830 int amount_pull, struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831{
2832 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002833 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
Dai Haruki2c2db482008-12-16 15:31:15 -08002835 /* fcb is at the beginning if exists */
2836 fcb = (struct rxfcb *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Jan Ceuleers0977f812012-06-05 03:42:12 +00002838 /* Remove the FCB from the skb
2839 * Remove the padded bytes, if there are any
2840 */
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002841 if (amount_pull) {
2842 skb_record_rx_queue(skb, fcb->rq);
Dai Haruki2c2db482008-12-16 15:31:15 -08002843 skb_pull(skb, amount_pull);
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002844 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002845
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002846 /* Get receive timestamp from the skb */
2847 if (priv->hwts_rx_en) {
2848 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2849 u64 *ns = (u64 *) skb->data;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002850
Manfred Rudigiercc772ab2010-04-08 23:10:03 +00002851 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2852 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2853 }
2854
2855 if (priv->padding)
2856 skb_pull(skb, priv->padding);
2857
Michał Mirosław8b3afe92011-04-15 04:50:50 +00002858 if (dev->features & NETIF_F_RXCSUM)
Dai Haruki2c2db482008-12-16 15:31:15 -08002859 gfar_rx_checksum(skb, fcb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002860
Dai Haruki2c2db482008-12-16 15:31:15 -08002861 /* Tell the skb what kind of packet this is */
2862 skb->protocol = eth_type_trans(skb, dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05002863
Patrick McHardyf6469682013-04-19 02:04:27 +00002864 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
David S. Miller823dcd22011-08-20 10:39:12 -07002865 * Even if vlan rx accel is disabled, on some chips
2866 * RXFCB_VLN is pseudo randomly set.
2867 */
Patrick McHardyf6469682013-04-19 02:04:27 +00002868 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
Claudiu Manoil26eb9372015-03-13 10:36:29 +02002869 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2870 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2871 be16_to_cpu(fcb->vlctl));
Jiri Pirko87c288c2011-07-20 04:54:19 +00002872
Dai Haruki2c2db482008-12-16 15:31:15 -08002873 /* Send the packet up the stack */
Claudiu Manoil953d2762013-03-21 03:12:15 +00002874 napi_gro_receive(napi, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876}
2877
2878/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Jan Ceuleers2281a0f2012-06-05 03:42:11 +00002879 * until the budget/quota has been reached. Returns the number
2880 * of frames handled
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002882int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883{
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002884 struct net_device *dev = rx_queue->dev;
Andy Fleming31de1982008-12-16 15:33:40 -08002885 struct rxbd8 *bdp, *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 struct sk_buff *skb;
Dai Haruki2c2db482008-12-16 15:31:15 -08002887 int pkt_len;
2888 int amount_pull;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 int howmany = 0;
2890 struct gfar_private *priv = netdev_priv(dev);
2891
2892 /* Get the first full descriptor */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002893 bdp = rx_queue->cur_rx;
2894 base = rx_queue->rx_bd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895
Claudiu Manoilba779712013-02-14 05:00:07 +00002896 amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
Dai Haruki2c2db482008-12-16 15:31:15 -08002897
Claudiu Manoila7312d52015-03-13 10:36:28 +02002898 while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002899 struct sk_buff *newskb;
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002900 dma_addr_t bufaddr;
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002901
Scott Wood3b6330c2007-05-16 15:06:59 -05002902 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05002903
2904 /* Add another skb for the future */
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002905 newskb = gfar_new_skb(dev, &bufaddr);
Andy Fleming815b97c2008-04-22 17:18:29 -05002906
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002907 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
Claudiu Manoila7312d52015-03-13 10:36:28 +02002909 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002910 priv->rx_buffer_size, DMA_FROM_DEVICE);
Andy Fleming81183052008-11-12 10:07:11 -06002911
Claudiu Manoila7312d52015-03-13 10:36:28 +02002912 if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
2913 be16_to_cpu(bdp->length) > priv->rx_buffer_size))
2914 bdp->status = cpu_to_be16(RXBD_LARGE);
Anton Vorontsov63b88b92010-06-11 10:51:03 +00002915
Andy Fleming815b97c2008-04-22 17:18:29 -05002916 /* We drop the frame if we failed to allocate a new buffer */
Claudiu Manoila7312d52015-03-13 10:36:28 +02002917 if (unlikely(!newskb ||
2918 !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
2919 be16_to_cpu(bdp->status) & RXBD_ERR)) {
2920 count_errors(be16_to_cpu(bdp->status), dev);
Andy Fleming815b97c2008-04-22 17:18:29 -05002921
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002922 if (unlikely(!newskb)) {
Andy Fleming815b97c2008-04-22 17:18:29 -05002923 newskb = skb;
Claudiu Manoila7312d52015-03-13 10:36:28 +02002924 bufaddr = be32_to_cpu(bdp->bufPtr);
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002925 } else if (skb)
Eric Dumazetacb600d2012-10-05 06:23:55 +00002926 dev_kfree_skb(skb);
Andy Fleming815b97c2008-04-22 17:18:29 -05002927 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 /* Increment the number of packets */
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002929 rx_queue->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 howmany++;
2931
Dai Haruki2c2db482008-12-16 15:31:15 -08002932 if (likely(skb)) {
Claudiu Manoila7312d52015-03-13 10:36:28 +02002933 pkt_len = be16_to_cpu(bdp->length) -
2934 ETH_FCS_LEN;
Dai Haruki2c2db482008-12-16 15:31:15 -08002935 /* Remove the FCS from the packet length */
2936 skb_put(skb, pkt_len);
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002937 rx_queue->stats.rx_bytes += pkt_len;
Sandeep Gopalpetf74dac02009-12-24 03:13:06 +00002938 skb_record_rx_queue(skb, rx_queue->qindex);
Wu Jiajun-B06378cd754a52012-04-19 22:54:35 +00002939 gfar_process_frame(dev, skb, amount_pull,
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002940 &rx_queue->grp->napi_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
Dai Haruki2c2db482008-12-16 15:31:15 -08002942 } else {
Joe Perches59deab22011-06-14 08:57:47 +00002943 netif_warn(priv, rx_err, dev, "Missing skb!\n");
Sandeep Gopalpeta7f38042009-12-16 01:15:07 +00002944 rx_queue->stats.rx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05002945 atomic64_inc(&priv->extra_stats.rx_skbmissing);
Dai Haruki2c2db482008-12-16 15:31:15 -08002946 }
2947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 }
2949
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002950 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
Andy Fleming815b97c2008-04-22 17:18:29 -05002952 /* Setup the new bdp */
Kevin Hao0a4b5a22014-12-11 14:08:41 +08002953 gfar_init_rxbdp(rx_queue, bdp, bufaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Matei Pavaluca45b679c92014-10-27 10:42:44 +02002955 /* Update Last Free RxBD pointer for LFC */
2956 if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
2957 gfar_write(rx_queue->rfbptr, (u32)bdp);
2958
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 /* Update to the next pointer */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002960 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
2962 /* update to point at the next skb */
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00002963 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2964 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 }
2966
2967 /* Update the current rxbd pointer to be the next one */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00002968 rx_queue->cur_rx = bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 return howmany;
2971}
2972
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002973static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03002974{
2975 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002976 container_of(napi, struct gfar_priv_grp, napi_rx);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03002977 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02002978 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03002979 int work_done = 0;
2980
2981 /* Clear IEVENT, so interrupts aren't called again
2982 * because of the packets that have already arrived
2983 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002984 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03002985
2986 work_done = gfar_clean_rx_ring(rx_queue, budget);
2987
2988 if (work_done < budget) {
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002989 u32 imask;
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03002990 napi_complete(napi);
2991 /* Clear the halt bit in RSTAT */
2992 gfar_write(&regs->rstat, gfargrp->rstat);
2993
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02002994 spin_lock_irq(&gfargrp->grplock);
2995 imask = gfar_read(&regs->imask);
2996 imask |= IMASK_RX_DEFAULT;
2997 gfar_write(&regs->imask, imask);
2998 spin_unlock_irq(&gfargrp->grplock);
Claudiu Manoil5eaedf32013-06-10 20:19:48 +03002999 }
3000
3001 return work_done;
3002}
3003
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003004static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005{
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003006 struct gfar_priv_grp *gfargrp =
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003007 container_of(napi, struct gfar_priv_grp, napi_tx);
3008 struct gfar __iomem *regs = gfargrp->regs;
Claudiu Manoil71ff9e32014-03-07 14:42:46 +02003009 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003010 u32 imask;
3011
3012 /* Clear IEVENT, so interrupts aren't called again
3013 * because of the packets that have already arrived
3014 */
3015 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3016
3017 /* run Tx cleanup to completion */
3018 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3019 gfar_clean_tx_ring(tx_queue);
3020
3021 napi_complete(napi);
3022
3023 spin_lock_irq(&gfargrp->grplock);
3024 imask = gfar_read(&regs->imask);
3025 imask |= IMASK_TX_DEFAULT;
3026 gfar_write(&regs->imask, imask);
3027 spin_unlock_irq(&gfargrp->grplock);
3028
3029 return 0;
3030}
3031
3032static int gfar_poll_rx(struct napi_struct *napi, int budget)
3033{
3034 struct gfar_priv_grp *gfargrp =
3035 container_of(napi, struct gfar_priv_grp, napi_rx);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003036 struct gfar_private *priv = gfargrp->priv;
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003037 struct gfar __iomem *regs = gfargrp->regs;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003038 struct gfar_priv_rx_q *rx_queue = NULL;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003039 int work_done = 0, work_done_per_q = 0;
Claudiu Manoil39c0a0d2013-03-21 03:12:13 +00003040 int i, budget_per_q = 0;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003041 unsigned long rstat_rxf;
3042 int num_act_queues;
Dai Harukid080cd62008-04-09 19:37:51 -05003043
Dai Haruki8c7396a2008-12-17 16:52:00 -08003044 /* Clear IEVENT, so interrupts aren't called again
Jan Ceuleers0977f812012-06-05 03:42:12 +00003045 * because of the packets that have already arrived
3046 */
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003047 gfar_write(&regs->ievent, IEVENT_RX_MASK);
Dai Haruki8c7396a2008-12-17 16:52:00 -08003048
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003049 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3050
3051 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3052 if (num_act_queues)
3053 budget_per_q = budget/num_act_queues;
3054
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003055 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3056 /* skip queue if not active */
3057 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3058 continue;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003059
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003060 rx_queue = priv->rx_queue[i];
3061 work_done_per_q =
3062 gfar_clean_rx_ring(rx_queue, budget_per_q);
3063 work_done += work_done_per_q;
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003064
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003065 /* finished processing this queue */
3066 if (work_done_per_q < budget_per_q) {
3067 /* clear active queue hw indication */
3068 gfar_write(&regs->rstat,
3069 RSTAT_CLEAR_RXF0 >> i);
3070 num_act_queues--;
Claudiu Manoil6be5ed32013-03-19 07:40:03 +00003071
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003072 if (!num_act_queues)
3073 break;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +00003074 }
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003075 }
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003076
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003077 if (!num_act_queues) {
3078 u32 imask;
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003079 napi_complete(napi);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003080
Claudiu Manoil3ba405d2013-10-14 17:05:09 +03003081 /* Clear the halt bit in RSTAT */
3082 gfar_write(&regs->rstat, gfargrp->rstat);
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003083
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003084 spin_lock_irq(&gfargrp->grplock);
3085 imask = gfar_read(&regs->imask);
3086 imask |= IMASK_RX_DEFAULT;
3087 gfar_write(&regs->imask, imask);
3088 spin_unlock_irq(&gfargrp->grplock);
Dai Harukid080cd62008-04-09 19:37:51 -05003089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090
Claudiu Manoilc233cf402013-03-19 07:40:02 +00003091 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093
Claudiu Manoilaeb12c52014-03-07 14:42:45 +02003094static int gfar_poll_tx(struct napi_struct *napi, int budget)
3095{
3096 struct gfar_priv_grp *gfargrp =
3097 container_of(napi, struct gfar_priv_grp, napi_tx);
3098 struct gfar_private *priv = gfargrp->priv;
3099 struct gfar __iomem *regs = gfargrp->regs;
3100 struct gfar_priv_tx_q *tx_queue = NULL;
3101 int has_tx_work = 0;
3102 int i;
3103
3104 /* Clear IEVENT, so interrupts aren't called again
3105 * because of the packets that have already arrived
3106 */
3107 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3108
3109 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3110 tx_queue = priv->tx_queue[i];
3111 /* run Tx cleanup to completion */
3112 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3113 gfar_clean_tx_ring(tx_queue);
3114 has_tx_work = 1;
3115 }
3116 }
3117
3118 if (!has_tx_work) {
3119 u32 imask;
3120 napi_complete(napi);
3121
3122 spin_lock_irq(&gfargrp->grplock);
3123 imask = gfar_read(&regs->imask);
3124 imask |= IMASK_TX_DEFAULT;
3125 gfar_write(&regs->imask, imask);
3126 spin_unlock_irq(&gfargrp->grplock);
3127 }
3128
3129 return 0;
3130}
3131
3132
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003133#ifdef CONFIG_NET_POLL_CONTROLLER
Jan Ceuleers0977f812012-06-05 03:42:12 +00003134/* Polling 'interrupt' - used by things like netconsole to send skbs
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003135 * without having to re-enable interrupts. It's not called while
3136 * the interrupt routine is executing.
3137 */
3138static void gfar_netpoll(struct net_device *dev)
3139{
3140 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleers3a2e16c2012-06-05 03:42:14 +00003141 int i;
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003142
3143 /* If the device has multiple interrupts, run tx/rx */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003144 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003145 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003146 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3147
3148 disable_irq(gfar_irq(grp, TX)->irq);
3149 disable_irq(gfar_irq(grp, RX)->irq);
3150 disable_irq(gfar_irq(grp, ER)->irq);
3151 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3152 enable_irq(gfar_irq(grp, ER)->irq);
3153 enable_irq(gfar_irq(grp, RX)->irq);
3154 enable_irq(gfar_irq(grp, TX)->irq);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003155 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003156 } else {
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003157 for (i = 0; i < priv->num_grps; i++) {
Paul Gortmaker62ed8392013-02-24 05:38:31 +00003158 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3159
3160 disable_irq(gfar_irq(grp, TX)->irq);
3161 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3162 enable_irq(gfar_irq(grp, TX)->irq);
Anton Vorontsov43de0042009-12-09 02:52:19 -08003163 }
Vitaly Woolf2d71c22006-11-07 13:27:02 +03003164 }
3165}
3166#endif
3167
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168/* The interrupt handler for devices with one interrupt */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003169static irqreturn_t gfar_interrupt(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003171 struct gfar_priv_grp *gfargrp = grp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
3173 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003174 u32 events = gfar_read(&gfargrp->regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003177 if (events & IEVENT_RX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003178 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179
3180 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003181 if (events & IEVENT_TX_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003182 gfar_transmit(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04003184 /* Check for errors */
3185 if (events & IEVENT_ERR_MASK)
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003186 gfar_error(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
3188 return IRQ_HANDLED;
3189}
3190
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191/* Called every time the controller might need to be made
3192 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003193 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 * function converts those variables into the appropriate
3195 * register values, and can bring down the device if needed.
3196 */
3197static void adjust_link(struct net_device *dev)
3198{
3199 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003200 struct phy_device *phydev = priv->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003202 if (unlikely(phydev->link != priv->oldlink ||
Guenter Roeck0ae93b22015-03-02 12:03:27 -08003203 (phydev->link && (phydev->duplex != priv->oldduplex ||
3204 phydev->speed != priv->oldspeed))))
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003205 gfar_update_link_state(priv);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04003206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208/* Update the hash table based on the current list of multicast
3209 * addresses we subscribe to. Also, change the promiscuity of
3210 * the device based on the flags (this function is called
Jan Ceuleers0977f812012-06-05 03:42:12 +00003211 * whenever dev->flags is changed
3212 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213static void gfar_set_multi(struct net_device *dev)
3214{
Jiri Pirko22bedad32010-04-01 21:22:57 +00003215 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003217 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 u32 tempval;
3219
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003220 if (dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 /* Set RCTRL to PROM */
3222 tempval = gfar_read(&regs->rctrl);
3223 tempval |= RCTRL_PROM;
3224 gfar_write(&regs->rctrl, tempval);
3225 } else {
3226 /* Set RCTRL to not PROM */
3227 tempval = gfar_read(&regs->rctrl);
3228 tempval &= ~(RCTRL_PROM);
3229 gfar_write(&regs->rctrl, tempval);
3230 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003231
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +00003232 if (dev->flags & IFF_ALLMULTI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003234 gfar_write(&regs->igaddr0, 0xffffffff);
3235 gfar_write(&regs->igaddr1, 0xffffffff);
3236 gfar_write(&regs->igaddr2, 0xffffffff);
3237 gfar_write(&regs->igaddr3, 0xffffffff);
3238 gfar_write(&regs->igaddr4, 0xffffffff);
3239 gfar_write(&regs->igaddr5, 0xffffffff);
3240 gfar_write(&regs->igaddr6, 0xffffffff);
3241 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 gfar_write(&regs->gaddr0, 0xffffffff);
3243 gfar_write(&regs->gaddr1, 0xffffffff);
3244 gfar_write(&regs->gaddr2, 0xffffffff);
3245 gfar_write(&regs->gaddr3, 0xffffffff);
3246 gfar_write(&regs->gaddr4, 0xffffffff);
3247 gfar_write(&regs->gaddr5, 0xffffffff);
3248 gfar_write(&regs->gaddr6, 0xffffffff);
3249 gfar_write(&regs->gaddr7, 0xffffffff);
3250 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003251 int em_num;
3252 int idx;
3253
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003255 gfar_write(&regs->igaddr0, 0x0);
3256 gfar_write(&regs->igaddr1, 0x0);
3257 gfar_write(&regs->igaddr2, 0x0);
3258 gfar_write(&regs->igaddr3, 0x0);
3259 gfar_write(&regs->igaddr4, 0x0);
3260 gfar_write(&regs->igaddr5, 0x0);
3261 gfar_write(&regs->igaddr6, 0x0);
3262 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 gfar_write(&regs->gaddr0, 0x0);
3264 gfar_write(&regs->gaddr1, 0x0);
3265 gfar_write(&regs->gaddr2, 0x0);
3266 gfar_write(&regs->gaddr3, 0x0);
3267 gfar_write(&regs->gaddr4, 0x0);
3268 gfar_write(&regs->gaddr5, 0x0);
3269 gfar_write(&regs->gaddr6, 0x0);
3270 gfar_write(&regs->gaddr7, 0x0);
3271
Andy Fleming7f7f5312005-11-11 12:38:59 -06003272 /* If we have extended hash tables, we need to
3273 * clear the exact match registers to prepare for
Jan Ceuleers0977f812012-06-05 03:42:12 +00003274 * setting them
3275 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003276 if (priv->extended_hash) {
3277 em_num = GFAR_EM_NUM + 1;
3278 gfar_clear_exact_match(dev);
3279 idx = 1;
3280 } else {
3281 idx = 0;
3282 em_num = 0;
3283 }
3284
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003285 if (netdev_mc_empty(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 return;
3287
3288 /* Parse the list, and set the appropriate bits */
Jiri Pirko22bedad32010-04-01 21:22:57 +00003289 netdev_for_each_mc_addr(ha, dev) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06003290 if (idx < em_num) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00003291 gfar_set_mac_for_addr(dev, idx, ha->addr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003292 idx++;
3293 } else
Jiri Pirko22bedad32010-04-01 21:22:57 +00003294 gfar_set_hash_for_addr(dev, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 }
3296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297}
3298
Andy Fleming7f7f5312005-11-11 12:38:59 -06003299
3300/* Clears each of the exact match registers to zero, so they
Jan Ceuleers0977f812012-06-05 03:42:12 +00003301 * don't interfere with normal reception
3302 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06003303static void gfar_clear_exact_match(struct net_device *dev)
3304{
3305 int idx;
Joe Perches6a3c910c2011-11-16 09:38:02 +00003306 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
Andy Fleming7f7f5312005-11-11 12:38:59 -06003307
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003308 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
Joe Perchesb6bc7652010-12-21 02:16:08 -08003309 gfar_set_mac_for_addr(dev, idx, zero_arr);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003310}
3311
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312/* Set the appropriate hash bit for the given addr */
3313/* The algorithm works like so:
3314 * 1) Take the Destination Address (ie the multicast address), and
3315 * do a CRC on it (little endian), and reverse the bits of the
3316 * result.
3317 * 2) Use the 8 most significant bits as a hash into a 256-entry
3318 * table. The table is controlled through 8 32-bit registers:
3319 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3320 * gaddr7. This means that the 3 most significant bits in the
3321 * hash index which gaddr register to use, and the 5 other bits
3322 * indicate which bit (assuming an IBM numbering scheme, which
3323 * for PowerPC (tm) is usually the case) in the register holds
Jan Ceuleers0977f812012-06-05 03:42:12 +00003324 * the entry.
3325 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3327{
3328 u32 tempval;
3329 struct gfar_private *priv = netdev_priv(dev);
Joe Perches6a3c910c2011-11-16 09:38:02 +00003330 u32 result = ether_crc(ETH_ALEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05003331 int width = priv->hash_width;
3332 u8 whichbit = (result >> (32 - width)) & 0x1f;
3333 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 u32 value = (1 << (31-whichbit));
3335
Kumar Gala0bbaf062005-06-20 10:54:21 -05003336 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05003338 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339}
3340
Andy Fleming7f7f5312005-11-11 12:38:59 -06003341
3342/* There are multiple MAC Address register pairs on some controllers
3343 * This function sets the numth pair to a given address
3344 */
Joe Perchesb6bc7652010-12-21 02:16:08 -08003345static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3346 const u8 *addr)
Andy Fleming7f7f5312005-11-11 12:38:59 -06003347{
3348 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003350 u32 tempval;
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003351 u32 __iomem *macptr = &regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06003352
3353 macptr += num*2;
3354
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003355 /* For a station address of 0x12345678ABCD in transmission
3356 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3357 * MACnADDR2 is set to 0x34120000.
Jan Ceuleers0977f812012-06-05 03:42:12 +00003358 */
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003359 tempval = (addr[5] << 24) | (addr[4] << 16) |
3360 (addr[3] << 8) | addr[2];
Andy Fleming7f7f5312005-11-11 12:38:59 -06003361
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003362 gfar_write(macptr, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003363
Claudiu Manoil83bfc3c2014-10-07 10:44:33 +03003364 tempval = (addr[1] << 24) | (addr[0] << 16);
Andy Fleming7f7f5312005-11-11 12:38:59 -06003365
3366 gfar_write(macptr+1, tempval);
3367}
3368
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369/* GFAR error interrupt handler */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003370static irqreturn_t gfar_error(int irq, void *grp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371{
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003372 struct gfar_priv_grp *gfargrp = grp_id;
3373 struct gfar __iomem *regs = gfargrp->regs;
3374 struct gfar_private *priv= gfargrp->priv;
3375 struct net_device *dev = priv->ndev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376
3377 /* Save ievent for future reference */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003378 u32 events = gfar_read(&regs->ievent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
3380 /* Clear IEVENT */
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003381 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
Scott Woodd87eb122008-07-11 18:04:45 -05003382
3383 /* Magic Packet is not an error. */
Andy Flemingb31a1d82008-12-16 15:29:15 -08003384 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -05003385 (events & IEVENT_MAG))
3386 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387
3388 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05003389 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
Jan Ceuleersbc4598b2012-06-05 03:42:13 +00003390 netdev_dbg(dev,
3391 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
Joe Perches59deab22011-06-14 08:57:47 +00003392 events, gfar_read(&regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393
3394 /* Update the error counters */
3395 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003396 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397
3398 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003399 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003401 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 if (events & IEVENT_XFUN) {
Joe Perches59deab22011-06-14 08:57:47 +00003403 netif_dbg(priv, tx_err, dev,
3404 "TX FIFO underrun, packet dropped\n");
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003405 dev->stats.tx_dropped++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003406 atomic64_inc(&priv->extra_stats.tx_underrun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407
Claudiu Manoilbc602282015-05-06 18:07:29 +03003408 schedule_work(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 }
Joe Perches59deab22011-06-14 08:57:47 +00003410 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 }
3412 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003413 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003414 atomic64_inc(&priv->extra_stats.rx_bsy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415
Sandeep Gopalpetf4983702009-11-02 07:03:09 +00003416 gfar_receive(irq, grp_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
Joe Perches59deab22011-06-14 08:57:47 +00003418 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3419 gfar_read(&regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 }
3421 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07003422 dev->stats.rx_errors++;
Paul Gortmaker212079d2013-02-12 15:38:19 -05003423 atomic64_inc(&priv->extra_stats.rx_babr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424
Joe Perches59deab22011-06-14 08:57:47 +00003425 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 }
3427 if (events & IEVENT_EBERR) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003428 atomic64_inc(&priv->extra_stats.eberr);
Joe Perches59deab22011-06-14 08:57:47 +00003429 netif_dbg(priv, rx_err, dev, "bus error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 }
Joe Perches59deab22011-06-14 08:57:47 +00003431 if (events & IEVENT_RXC)
3432 netif_dbg(priv, rx_status, dev, "control frame\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433
3434 if (events & IEVENT_BABT) {
Paul Gortmaker212079d2013-02-12 15:38:19 -05003435 atomic64_inc(&priv->extra_stats.tx_babt);
Joe Perches59deab22011-06-14 08:57:47 +00003436 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 }
3438 return IRQ_HANDLED;
3439}
3440
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003441static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3442{
3443 struct phy_device *phydev = priv->phydev;
3444 u32 val = 0;
3445
3446 if (!phydev->duplex)
3447 return val;
3448
3449 if (!priv->pause_aneg_en) {
3450 if (priv->tx_pause_en)
3451 val |= MACCFG1_TX_FLOW;
3452 if (priv->rx_pause_en)
3453 val |= MACCFG1_RX_FLOW;
3454 } else {
3455 u16 lcl_adv, rmt_adv;
3456 u8 flowctrl;
3457 /* get link partner capabilities */
3458 rmt_adv = 0;
3459 if (phydev->pause)
3460 rmt_adv = LPA_PAUSE_CAP;
3461 if (phydev->asym_pause)
3462 rmt_adv |= LPA_PAUSE_ASYM;
3463
Pavaluca Matei-B4661043ef8d22014-10-27 10:42:43 +02003464 lcl_adv = 0;
3465 if (phydev->advertising & ADVERTISED_Pause)
3466 lcl_adv |= ADVERTISE_PAUSE_CAP;
3467 if (phydev->advertising & ADVERTISED_Asym_Pause)
3468 lcl_adv |= ADVERTISE_PAUSE_ASYM;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003469
3470 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3471 if (flowctrl & FLOW_CTRL_TX)
3472 val |= MACCFG1_TX_FLOW;
3473 if (flowctrl & FLOW_CTRL_RX)
3474 val |= MACCFG1_RX_FLOW;
3475 }
3476
3477 return val;
3478}
3479
3480static noinline void gfar_update_link_state(struct gfar_private *priv)
3481{
3482 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3483 struct phy_device *phydev = priv->phydev;
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003484 struct gfar_priv_rx_q *rx_queue = NULL;
3485 int i;
3486 struct rxbd8 *bdp;
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003487
3488 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3489 return;
3490
3491 if (phydev->link) {
3492 u32 tempval1 = gfar_read(&regs->maccfg1);
3493 u32 tempval = gfar_read(&regs->maccfg2);
3494 u32 ecntrl = gfar_read(&regs->ecntrl);
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003495 u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003496
3497 if (phydev->duplex != priv->oldduplex) {
3498 if (!(phydev->duplex))
3499 tempval &= ~(MACCFG2_FULL_DUPLEX);
3500 else
3501 tempval |= MACCFG2_FULL_DUPLEX;
3502
3503 priv->oldduplex = phydev->duplex;
3504 }
3505
3506 if (phydev->speed != priv->oldspeed) {
3507 switch (phydev->speed) {
3508 case 1000:
3509 tempval =
3510 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3511
3512 ecntrl &= ~(ECNTRL_R100);
3513 break;
3514 case 100:
3515 case 10:
3516 tempval =
3517 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3518
3519 /* Reduced mode distinguishes
3520 * between 10 and 100
3521 */
3522 if (phydev->speed == SPEED_100)
3523 ecntrl |= ECNTRL_R100;
3524 else
3525 ecntrl &= ~(ECNTRL_R100);
3526 break;
3527 default:
3528 netif_warn(priv, link, priv->ndev,
3529 "Ack! Speed (%d) is not 10/100/1000!\n",
3530 phydev->speed);
3531 break;
3532 }
3533
3534 priv->oldspeed = phydev->speed;
3535 }
3536
3537 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3538 tempval1 |= gfar_get_flowctrl_cfg(priv);
3539
Matei Pavaluca45b679c92014-10-27 10:42:44 +02003540 /* Turn last free buffer recording on */
3541 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3542 for (i = 0; i < priv->num_rx_queues; i++) {
3543 rx_queue = priv->rx_queue[i];
3544 bdp = rx_queue->cur_rx;
3545 /* skip to previous bd */
3546 bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
3547 rx_queue->rx_bd_base,
3548 rx_queue->rx_ring_size);
3549
3550 if (rx_queue->rfbptr)
3551 gfar_write(rx_queue->rfbptr, (u32)bdp);
3552 }
3553
3554 priv->tx_actual_en = 1;
3555 }
3556
3557 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3558 priv->tx_actual_en = 0;
3559
Claudiu Manoil6ce29b02014-04-30 14:27:21 +03003560 gfar_write(&regs->maccfg1, tempval1);
3561 gfar_write(&regs->maccfg2, tempval);
3562 gfar_write(&regs->ecntrl, ecntrl);
3563
3564 if (!priv->oldlink)
3565 priv->oldlink = 1;
3566
3567 } else if (priv->oldlink) {
3568 priv->oldlink = 0;
3569 priv->oldspeed = 0;
3570 priv->oldduplex = -1;
3571 }
3572
3573 if (netif_msg_link(priv))
3574 phy_print_status(phydev);
3575}
3576
Fabian Frederick94e5a2a2015-03-17 19:37:34 +01003577static const struct of_device_id gfar_match[] =
Andy Flemingb31a1d82008-12-16 15:29:15 -08003578{
3579 {
3580 .type = "network",
3581 .compatible = "gianfar",
3582 },
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +00003583 {
3584 .compatible = "fsl,etsec2",
3585 },
Andy Flemingb31a1d82008-12-16 15:29:15 -08003586 {},
3587};
Anton Vorontsove72701a2009-10-14 14:54:52 -07003588MODULE_DEVICE_TABLE(of, gfar_match);
Andy Flemingb31a1d82008-12-16 15:29:15 -08003589
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590/* Structure for a device driver */
Grant Likely74888762011-02-22 21:05:51 -07003591static struct platform_driver gfar_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003592 .driver = {
3593 .name = "fsl-gianfar",
Grant Likely40182942010-04-13 16:13:02 -07003594 .pm = GFAR_PM_OPS,
3595 .of_match_table = gfar_match,
3596 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 .probe = gfar_probe,
3598 .remove = gfar_remove,
3599};
3600
Axel Lindb62f682011-11-27 16:44:17 +00003601module_platform_driver(gfar_driver);