Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1 | /* drivers/net/ethernet/freescale/gianfar.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Gianfar Ethernet Driver |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 4 | * This driver is designed for the non-CPM ethernet controllers |
| 5 | * on the 85xx and 83xx family of integrated processors |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Based on 8260_io/fcc_enet.c |
| 7 | * |
| 8 | * Author: Andy Fleming |
Kumar Gala | 4c8d3d9 | 2005-11-13 16:06:30 -0800 | [diff] [blame] | 9 | * Maintainer: Kumar Gala |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 10 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 12 | * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 13 | * Copyright 2007 MontaVista Software, Inc. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * |
| 15 | * This program is free software; you can redistribute it and/or modify it |
| 16 | * under the terms of the GNU General Public License as published by the |
| 17 | * Free Software Foundation; either version 2 of the License, or (at your |
| 18 | * option) any later version. |
| 19 | * |
| 20 | * Gianfar: AKA Lambda Draconis, "Dragon" |
| 21 | * RA 11 31 24.2 |
| 22 | * Dec +69 19 52 |
| 23 | * V 3.84 |
| 24 | * B-V +1.62 |
| 25 | * |
| 26 | * Theory of operation |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 27 | * |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 28 | * The driver is initialized through of_device. Configuration information |
| 29 | * is therefore conveyed through an OF-style device tree. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | * |
| 31 | * The Gianfar Ethernet Controller uses a ring of buffer |
| 32 | * descriptors. The beginning is indicated by a register |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 33 | * pointing to the physical address of the start of the ring. |
| 34 | * The end is determined by a "wrap" bit being set in the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | * last descriptor of the ring. |
| 36 | * |
| 37 | * When a packet is received, the RXF bit in the |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 38 | * IEVENT register is set, triggering an interrupt when the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | * corresponding bit in the IMASK register is also set (if |
| 40 | * interrupt coalescing is active, then the interrupt may not |
| 41 | * happen immediately, but will wait until either a set number |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 42 | * of frames or amount of time have passed). In NAPI, the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | * interrupt handler will signal there is work to be done, and |
Francois Romieu | 0aa1538 | 2008-07-11 00:33:52 +0200 | [diff] [blame] | 44 | * exit. This method will start at the last known empty |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 45 | * descriptor, and process every subsequent descriptor until there |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | * are none left with data (NAPI will stop after a set number of |
| 47 | * packets to give time to other tasks, but will eventually |
| 48 | * process all the packets). The data arrives inside a |
| 49 | * pre-allocated skb, and so after the skb is passed up to the |
| 50 | * stack, a new skb must be allocated, and the address field in |
| 51 | * the buffer descriptor must be updated to indicate this new |
| 52 | * skb. |
| 53 | * |
| 54 | * When the kernel requests that a packet be transmitted, the |
| 55 | * driver starts where it left off last time, and points the |
| 56 | * descriptor at the buffer which was passed in. The driver |
| 57 | * then informs the DMA engine that there are packets ready to |
| 58 | * be transmitted. Once the controller is finished transmitting |
| 59 | * the packet, an interrupt may be triggered (under the same |
| 60 | * conditions as for reception, but depending on the TXF bit). |
| 61 | * The driver then cleans up the buffer. |
| 62 | */ |
| 63 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 65 | #define DEBUG |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #include <linux/string.h> |
| 69 | #include <linux/errno.h> |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 70 | #include <linux/unistd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #include <linux/slab.h> |
| 72 | #include <linux/interrupt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | #include <linux/delay.h> |
| 74 | #include <linux/netdevice.h> |
| 75 | #include <linux/etherdevice.h> |
| 76 | #include <linux/skbuff.h> |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 77 | #include <linux/if_vlan.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #include <linux/spinlock.h> |
| 79 | #include <linux/mm.h> |
Rob Herring | 5af5073 | 2013-09-17 14:28:33 -0500 | [diff] [blame] | 80 | #include <linux/of_address.h> |
| 81 | #include <linux/of_irq.h> |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 82 | #include <linux/of_mdio.h> |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 83 | #include <linux/of_platform.h> |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 84 | #include <linux/ip.h> |
| 85 | #include <linux/tcp.h> |
| 86 | #include <linux/udp.h> |
Kumar Gala | 9c07b884 | 2006-01-11 11:26:25 -0800 | [diff] [blame] | 87 | #include <linux/in.h> |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 88 | #include <linux/net_tstamp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
| 90 | #include <asm/io.h> |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 91 | #include <asm/reg.h> |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 92 | #include <asm/mpc85xx.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | #include <asm/irq.h> |
| 94 | #include <asm/uaccess.h> |
| 95 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #include <linux/dma-mapping.h> |
| 97 | #include <linux/crc32.h> |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 98 | #include <linux/mii.h> |
| 99 | #include <linux/phy.h> |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 100 | #include <linux/phy_fixed.h> |
| 101 | #include <linux/of.h> |
David Daney | 4b6ba8a | 2010-10-26 15:07:13 -0700 | [diff] [blame] | 102 | #include <linux/of_net.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | |
| 104 | #include "gianfar.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
| 106 | #define TX_TIMEOUT (1*HZ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 108 | const char gfar_driver_version[] = "1.3"; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | static int gfar_enet_open(struct net_device *dev); |
| 111 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 112 | static void gfar_reset_task(struct work_struct *work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | static void gfar_timeout(struct net_device *dev); |
| 114 | static int gfar_close(struct net_device *dev); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 115 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 116 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 117 | struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | static int gfar_set_mac_address(struct net_device *dev); |
| 119 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 120 | static irqreturn_t gfar_error(int irq, void *dev_id); |
| 121 | static irqreturn_t gfar_transmit(int irq, void *dev_id); |
| 122 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | static void adjust_link(struct net_device *dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | static int init_phy(struct net_device *dev); |
Grant Likely | 7488876 | 2011-02-22 21:05:51 -0700 | [diff] [blame] | 125 | static int gfar_probe(struct platform_device *ofdev); |
Grant Likely | 2dc1158 | 2010-08-06 09:25:50 -0600 | [diff] [blame] | 126 | static int gfar_remove(struct platform_device *ofdev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 127 | static void free_skb_resources(struct gfar_private *priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | static void gfar_set_multi(struct net_device *dev); |
| 129 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 130 | static void gfar_configure_serdes(struct net_device *dev); |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 131 | static int gfar_poll_rx(struct napi_struct *napi, int budget); |
| 132 | static int gfar_poll_tx(struct napi_struct *napi, int budget); |
| 133 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); |
| 134 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 135 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 136 | static void gfar_netpoll(struct net_device *dev); |
| 137 | #endif |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 138 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 139 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
Claudiu Manoil | 61db26c | 2013-02-14 05:00:05 +0000 | [diff] [blame] | 140 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
| 141 | int amount_pull, struct napi_struct *napi); |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 142 | static void gfar_halt_nodisable(struct gfar_private *priv); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 143 | static void gfar_clear_exact_match(struct net_device *dev); |
Joe Perches | b6bc765 | 2010-12-21 02:16:08 -0800 | [diff] [blame] | 144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
| 145 | const u8 *addr); |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 146 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
| 149 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); |
| 150 | MODULE_LICENSE("GPL"); |
| 151 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 152 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 153 | dma_addr_t buf) |
| 154 | { |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 155 | u32 lstatus; |
| 156 | |
| 157 | bdp->bufPtr = buf; |
| 158 | |
| 159 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 160 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 161 | lstatus |= BD_LFLAG(RXBD_WRAP); |
| 162 | |
| 163 | eieio(); |
| 164 | |
| 165 | bdp->lstatus = lstatus; |
| 166 | } |
| 167 | |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 168 | static int gfar_init_bds(struct net_device *ndev) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 169 | { |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 170 | struct gfar_private *priv = netdev_priv(ndev); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 171 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 172 | struct gfar_priv_rx_q *rx_queue = NULL; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 173 | struct txbd8 *txbdp; |
| 174 | struct rxbd8 *rxbdp; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 175 | int i, j; |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 176 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 177 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 178 | tx_queue = priv->tx_queue[i]; |
| 179 | /* Initialize some variables in our dev structure */ |
| 180 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; |
| 181 | tx_queue->dirty_tx = tx_queue->tx_bd_base; |
| 182 | tx_queue->cur_tx = tx_queue->tx_bd_base; |
| 183 | tx_queue->skb_curtx = 0; |
| 184 | tx_queue->skb_dirtytx = 0; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 185 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 186 | /* Initialize Transmit Descriptor Ring */ |
| 187 | txbdp = tx_queue->tx_bd_base; |
| 188 | for (j = 0; j < tx_queue->tx_ring_size; j++) { |
| 189 | txbdp->lstatus = 0; |
| 190 | txbdp->bufPtr = 0; |
| 191 | txbdp++; |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 192 | } |
| 193 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 194 | /* Set the last descriptor in the ring to indicate wrap */ |
| 195 | txbdp--; |
| 196 | txbdp->status |= TXBD_WRAP; |
| 197 | } |
| 198 | |
| 199 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 200 | rx_queue = priv->rx_queue[i]; |
| 201 | rx_queue->cur_rx = rx_queue->rx_bd_base; |
| 202 | rx_queue->skb_currx = 0; |
| 203 | rxbdp = rx_queue->rx_bd_base; |
| 204 | |
| 205 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
| 206 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; |
| 207 | |
| 208 | if (skb) { |
| 209 | gfar_init_rxbdp(rx_queue, rxbdp, |
| 210 | rxbdp->bufPtr); |
| 211 | } else { |
| 212 | skb = gfar_new_skb(ndev); |
| 213 | if (!skb) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 214 | netdev_err(ndev, "Can't allocate RX buffers\n"); |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 215 | return -ENOMEM; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 216 | } |
| 217 | rx_queue->rx_skbuff[j] = skb; |
| 218 | |
| 219 | gfar_new_rxbdp(rx_queue, rxbdp, skb); |
| 220 | } |
| 221 | |
| 222 | rxbdp++; |
| 223 | } |
| 224 | |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | static int gfar_alloc_skb_resources(struct net_device *ndev) |
| 231 | { |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 232 | void *vaddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 233 | dma_addr_t addr; |
| 234 | int i, j, k; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 235 | struct gfar_private *priv = netdev_priv(ndev); |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 236 | struct device *dev = priv->dev; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 237 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 238 | struct gfar_priv_rx_q *rx_queue = NULL; |
| 239 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 240 | priv->total_tx_ring_size = 0; |
| 241 | for (i = 0; i < priv->num_tx_queues; i++) |
| 242 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; |
| 243 | |
| 244 | priv->total_rx_ring_size = 0; |
| 245 | for (i = 0; i < priv->num_rx_queues; i++) |
| 246 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 247 | |
| 248 | /* Allocate memory for the buffer descriptors */ |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 249 | vaddr = dma_alloc_coherent(dev, |
Joe Perches | d0320f7 | 2013-03-14 13:07:21 +0000 | [diff] [blame] | 250 | (priv->total_tx_ring_size * |
| 251 | sizeof(struct txbd8)) + |
| 252 | (priv->total_rx_ring_size * |
| 253 | sizeof(struct rxbd8)), |
| 254 | &addr, GFP_KERNEL); |
| 255 | if (!vaddr) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 256 | return -ENOMEM; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 257 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 258 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 259 | tx_queue = priv->tx_queue[i]; |
Joe Perches | 43d620c | 2011-06-16 19:08:06 +0000 | [diff] [blame] | 260 | tx_queue->tx_bd_base = vaddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 261 | tx_queue->tx_bd_dma_base = addr; |
| 262 | tx_queue->dev = ndev; |
| 263 | /* enet DMA only understands physical addresses */ |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 264 | addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
| 265 | vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 266 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 267 | |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 268 | /* Start the rx descriptor ring where the tx ring leaves off */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 269 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 270 | rx_queue = priv->rx_queue[i]; |
Joe Perches | 43d620c | 2011-06-16 19:08:06 +0000 | [diff] [blame] | 271 | rx_queue->rx_bd_base = vaddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 272 | rx_queue->rx_bd_dma_base = addr; |
| 273 | rx_queue->dev = ndev; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 274 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
| 275 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 276 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 277 | |
| 278 | /* Setup the skbuff rings */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 279 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 280 | tx_queue = priv->tx_queue[i]; |
Joe Perches | 14f8dc4 | 2013-02-07 11:46:27 +0000 | [diff] [blame] | 281 | tx_queue->tx_skbuff = |
| 282 | kmalloc_array(tx_queue->tx_ring_size, |
| 283 | sizeof(*tx_queue->tx_skbuff), |
| 284 | GFP_KERNEL); |
| 285 | if (!tx_queue->tx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 286 | goto cleanup; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 287 | |
| 288 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
| 289 | tx_queue->tx_skbuff[k] = NULL; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 290 | } |
| 291 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 292 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 293 | rx_queue = priv->rx_queue[i]; |
Joe Perches | 14f8dc4 | 2013-02-07 11:46:27 +0000 | [diff] [blame] | 294 | rx_queue->rx_skbuff = |
| 295 | kmalloc_array(rx_queue->rx_ring_size, |
| 296 | sizeof(*rx_queue->rx_skbuff), |
| 297 | GFP_KERNEL); |
| 298 | if (!rx_queue->rx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 299 | goto cleanup; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 300 | |
| 301 | for (j = 0; j < rx_queue->rx_ring_size; j++) |
| 302 | rx_queue->rx_skbuff[j] = NULL; |
| 303 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 304 | |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 305 | if (gfar_init_bds(ndev)) |
| 306 | goto cleanup; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 307 | |
| 308 | return 0; |
| 309 | |
| 310 | cleanup: |
| 311 | free_skb_resources(priv); |
| 312 | return -ENOMEM; |
| 313 | } |
| 314 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 315 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
| 316 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 317 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Anton Vorontsov | 18294ad | 2009-11-04 12:53:00 +0000 | [diff] [blame] | 318 | u32 __iomem *baddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 319 | int i; |
| 320 | |
| 321 | baddr = ®s->tbase0; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 322 | for (i = 0; i < priv->num_tx_queues; i++) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 323 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 324 | baddr += 2; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | baddr = ®s->rbase0; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 328 | for (i = 0; i < priv->num_rx_queues; i++) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 329 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 330 | baddr += 2; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 331 | } |
| 332 | } |
| 333 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 334 | static void gfar_rx_buff_size_config(struct gfar_private *priv) |
| 335 | { |
| 336 | int frame_size = priv->ndev->mtu + ETH_HLEN; |
| 337 | |
| 338 | /* set this when rx hw offload (TOE) functions are being used */ |
| 339 | priv->uses_rxfcb = 0; |
| 340 | |
| 341 | if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) |
| 342 | priv->uses_rxfcb = 1; |
| 343 | |
| 344 | if (priv->hwts_rx_en) |
| 345 | priv->uses_rxfcb = 1; |
| 346 | |
| 347 | if (priv->uses_rxfcb) |
| 348 | frame_size += GMAC_FCB_LEN; |
| 349 | |
| 350 | frame_size += priv->padding; |
| 351 | |
| 352 | frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + |
| 353 | INCREMENTAL_BUFFER_SIZE; |
| 354 | |
| 355 | priv->rx_buffer_size = frame_size; |
| 356 | } |
| 357 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 358 | static void gfar_mac_rx_config(struct gfar_private *priv) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 359 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 360 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 361 | u32 rctrl = 0; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 362 | |
Sandeep Gopalpet | 1ccb838 | 2009-12-16 01:14:58 +0000 | [diff] [blame] | 363 | if (priv->rx_filer_enable) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 364 | rctrl |= RCTRL_FILREN; |
Sandeep Gopalpet | 1ccb838 | 2009-12-16 01:14:58 +0000 | [diff] [blame] | 365 | /* Program the RIR0 reg with the required distribution */ |
| 366 | gfar_write(®s->rir0, DEFAULT_RIR0); |
| 367 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 368 | |
Claudiu Manoil | f5ae627 | 2013-01-23 00:18:36 +0000 | [diff] [blame] | 369 | /* Restore PROMISC mode */ |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 370 | if (priv->ndev->flags & IFF_PROMISC) |
Claudiu Manoil | f5ae627 | 2013-01-23 00:18:36 +0000 | [diff] [blame] | 371 | rctrl |= RCTRL_PROM; |
| 372 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 373 | if (priv->ndev->features & NETIF_F_RXCSUM) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 374 | rctrl |= RCTRL_CHECKSUMMING; |
| 375 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 376 | if (priv->extended_hash) |
| 377 | rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 378 | |
| 379 | if (priv->padding) { |
| 380 | rctrl &= ~RCTRL_PAL_MASK; |
| 381 | rctrl |= RCTRL_PADDING(priv->padding); |
| 382 | } |
| 383 | |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 384 | /* Enable HW time stamping if requested from user space */ |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 385 | if (priv->hwts_rx_en) |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 386 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; |
| 387 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 388 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
Sebastian Pöhn | b852b72 | 2011-07-26 00:03:13 +0000 | [diff] [blame] | 389 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 390 | |
| 391 | /* Init rctrl based on our settings */ |
| 392 | gfar_write(®s->rctrl, rctrl); |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 393 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 394 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 395 | static void gfar_mac_tx_config(struct gfar_private *priv) |
| 396 | { |
| 397 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 398 | u32 tctrl = 0; |
| 399 | |
| 400 | if (priv->ndev->features & NETIF_F_IP_CSUM) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 401 | tctrl |= TCTRL_INIT_CSUM; |
| 402 | |
Claudiu Manoil | b98b8ba | 2012-09-23 22:39:08 +0000 | [diff] [blame] | 403 | if (priv->prio_sched_en) |
| 404 | tctrl |= TCTRL_TXSCHED_PRIO; |
| 405 | else { |
| 406 | tctrl |= TCTRL_TXSCHED_WRRS; |
| 407 | gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); |
| 408 | gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); |
| 409 | } |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 410 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 411 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) |
| 412 | tctrl |= TCTRL_VLINS; |
| 413 | |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 414 | gfar_write(®s->tctrl, tctrl); |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 415 | } |
| 416 | |
Claudiu Manoil | f19015b | 2014-02-24 12:13:46 +0200 | [diff] [blame] | 417 | static void gfar_configure_coalescing(struct gfar_private *priv, |
| 418 | unsigned long tx_mask, unsigned long rx_mask) |
| 419 | { |
| 420 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 421 | u32 __iomem *baddr; |
| 422 | |
| 423 | if (priv->mode == MQ_MG_MODE) { |
| 424 | int i = 0; |
| 425 | |
| 426 | baddr = ®s->txic0; |
| 427 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { |
| 428 | gfar_write(baddr + i, 0); |
| 429 | if (likely(priv->tx_queue[i]->txcoalescing)) |
| 430 | gfar_write(baddr + i, priv->tx_queue[i]->txic); |
| 431 | } |
| 432 | |
| 433 | baddr = ®s->rxic0; |
| 434 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { |
| 435 | gfar_write(baddr + i, 0); |
| 436 | if (likely(priv->rx_queue[i]->rxcoalescing)) |
| 437 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); |
| 438 | } |
| 439 | } else { |
| 440 | /* Backward compatible case -- even if we enable |
| 441 | * multiple queues, there's only single reg to program |
| 442 | */ |
| 443 | gfar_write(®s->txic, 0); |
| 444 | if (likely(priv->tx_queue[0]->txcoalescing)) |
| 445 | gfar_write(®s->txic, priv->tx_queue[0]->txic); |
| 446 | |
| 447 | gfar_write(®s->rxic, 0); |
| 448 | if (unlikely(priv->rx_queue[0]->rxcoalescing)) |
| 449 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | void gfar_configure_coalescing_all(struct gfar_private *priv) |
| 454 | { |
| 455 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
| 456 | } |
| 457 | |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 458 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
| 459 | { |
| 460 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 461 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
| 462 | unsigned long tx_packets = 0, tx_bytes = 0; |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 463 | int i; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 464 | |
| 465 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 466 | rx_packets += priv->rx_queue[i]->stats.rx_packets; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 467 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 468 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; |
| 469 | } |
| 470 | |
| 471 | dev->stats.rx_packets = rx_packets; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 472 | dev->stats.rx_bytes = rx_bytes; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 473 | dev->stats.rx_dropped = rx_dropped; |
| 474 | |
| 475 | for (i = 0; i < priv->num_tx_queues; i++) { |
Eric Dumazet | 1ac9ad1 | 2011-01-12 12:13:14 +0000 | [diff] [blame] | 476 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
| 477 | tx_packets += priv->tx_queue[i]->stats.tx_packets; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 478 | } |
| 479 | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 480 | dev->stats.tx_bytes = tx_bytes; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 481 | dev->stats.tx_packets = tx_packets; |
| 482 | |
| 483 | return &dev->stats; |
| 484 | } |
| 485 | |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 486 | static const struct net_device_ops gfar_netdev_ops = { |
| 487 | .ndo_open = gfar_enet_open, |
| 488 | .ndo_start_xmit = gfar_start_xmit, |
| 489 | .ndo_stop = gfar_close, |
| 490 | .ndo_change_mtu = gfar_change_mtu, |
Michał Mirosław | 8b3afe9 | 2011-04-15 04:50:50 +0000 | [diff] [blame] | 491 | .ndo_set_features = gfar_set_features, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 492 | .ndo_set_rx_mode = gfar_set_multi, |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 493 | .ndo_tx_timeout = gfar_timeout, |
| 494 | .ndo_do_ioctl = gfar_ioctl, |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 495 | .ndo_get_stats = gfar_get_stats, |
Ben Hutchings | 240c102 | 2009-07-09 17:54:35 +0000 | [diff] [blame] | 496 | .ndo_set_mac_address = eth_mac_addr, |
| 497 | .ndo_validate_addr = eth_validate_addr, |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 498 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 499 | .ndo_poll_controller = gfar_netpoll, |
| 500 | #endif |
| 501 | }; |
| 502 | |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 503 | static void gfar_ints_disable(struct gfar_private *priv) |
| 504 | { |
| 505 | int i; |
| 506 | for (i = 0; i < priv->num_grps; i++) { |
| 507 | struct gfar __iomem *regs = priv->gfargrp[i].regs; |
| 508 | /* Clear IEVENT */ |
| 509 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); |
| 510 | |
| 511 | /* Initialize IMASK */ |
| 512 | gfar_write(®s->imask, IMASK_INIT_CLEAR); |
| 513 | } |
| 514 | } |
| 515 | |
| 516 | static void gfar_ints_enable(struct gfar_private *priv) |
| 517 | { |
| 518 | int i; |
| 519 | for (i = 0; i < priv->num_grps; i++) { |
| 520 | struct gfar __iomem *regs = priv->gfargrp[i].regs; |
| 521 | /* Unmask the interrupts we look for */ |
| 522 | gfar_write(®s->imask, IMASK_DEFAULT); |
| 523 | } |
| 524 | } |
| 525 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 526 | void lock_tx_qs(struct gfar_private *priv) |
| 527 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 528 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 529 | |
| 530 | for (i = 0; i < priv->num_tx_queues; i++) |
| 531 | spin_lock(&priv->tx_queue[i]->txlock); |
| 532 | } |
| 533 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 534 | void unlock_tx_qs(struct gfar_private *priv) |
| 535 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 536 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 537 | |
| 538 | for (i = 0; i < priv->num_tx_queues; i++) |
| 539 | spin_unlock(&priv->tx_queue[i]->txlock); |
| 540 | } |
| 541 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 542 | static int gfar_alloc_tx_queues(struct gfar_private *priv) |
| 543 | { |
| 544 | int i; |
| 545 | |
| 546 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 547 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), |
| 548 | GFP_KERNEL); |
| 549 | if (!priv->tx_queue[i]) |
| 550 | return -ENOMEM; |
| 551 | |
| 552 | priv->tx_queue[i]->tx_skbuff = NULL; |
| 553 | priv->tx_queue[i]->qindex = i; |
| 554 | priv->tx_queue[i]->dev = priv->ndev; |
| 555 | spin_lock_init(&(priv->tx_queue[i]->txlock)); |
| 556 | } |
| 557 | return 0; |
| 558 | } |
| 559 | |
| 560 | static int gfar_alloc_rx_queues(struct gfar_private *priv) |
| 561 | { |
| 562 | int i; |
| 563 | |
| 564 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 565 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), |
| 566 | GFP_KERNEL); |
| 567 | if (!priv->rx_queue[i]) |
| 568 | return -ENOMEM; |
| 569 | |
| 570 | priv->rx_queue[i]->rx_skbuff = NULL; |
| 571 | priv->rx_queue[i]->qindex = i; |
| 572 | priv->rx_queue[i]->dev = priv->ndev; |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 573 | } |
| 574 | return 0; |
| 575 | } |
| 576 | |
| 577 | static void gfar_free_tx_queues(struct gfar_private *priv) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 578 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 579 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 580 | |
| 581 | for (i = 0; i < priv->num_tx_queues; i++) |
| 582 | kfree(priv->tx_queue[i]); |
| 583 | } |
| 584 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 585 | static void gfar_free_rx_queues(struct gfar_private *priv) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 586 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 587 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 588 | |
| 589 | for (i = 0; i < priv->num_rx_queues; i++) |
| 590 | kfree(priv->rx_queue[i]); |
| 591 | } |
| 592 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 593 | static void unmap_group_regs(struct gfar_private *priv) |
| 594 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 595 | int i; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 596 | |
| 597 | for (i = 0; i < MAXGROUPS; i++) |
| 598 | if (priv->gfargrp[i].regs) |
| 599 | iounmap(priv->gfargrp[i].regs); |
| 600 | } |
| 601 | |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 602 | static void free_gfar_dev(struct gfar_private *priv) |
| 603 | { |
| 604 | int i, j; |
| 605 | |
| 606 | for (i = 0; i < priv->num_grps; i++) |
| 607 | for (j = 0; j < GFAR_NUM_IRQS; j++) { |
| 608 | kfree(priv->gfargrp[i].irqinfo[j]); |
| 609 | priv->gfargrp[i].irqinfo[j] = NULL; |
| 610 | } |
| 611 | |
| 612 | free_netdev(priv->ndev); |
| 613 | } |
| 614 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 615 | static void disable_napi(struct gfar_private *priv) |
| 616 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 617 | int i; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 618 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 619 | for (i = 0; i < priv->num_grps; i++) { |
| 620 | napi_disable(&priv->gfargrp[i].napi_rx); |
| 621 | napi_disable(&priv->gfargrp[i].napi_tx); |
| 622 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 623 | } |
| 624 | |
| 625 | static void enable_napi(struct gfar_private *priv) |
| 626 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 627 | int i; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 628 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 629 | for (i = 0; i < priv->num_grps; i++) { |
| 630 | napi_enable(&priv->gfargrp[i].napi_rx); |
| 631 | napi_enable(&priv->gfargrp[i].napi_tx); |
| 632 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | static int gfar_parse_group(struct device_node *np, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 636 | struct gfar_private *priv, const char *model) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 637 | { |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 638 | struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 639 | u32 *queue_mask; |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 640 | int i; |
| 641 | |
Paul Gortmaker | 7c1e7e9 | 2013-02-04 09:49:42 +0000 | [diff] [blame] | 642 | for (i = 0; i < GFAR_NUM_IRQS; i++) { |
| 643 | grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), |
| 644 | GFP_KERNEL); |
| 645 | if (!grp->irqinfo[i]) |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 646 | return -ENOMEM; |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 647 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 648 | |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 649 | grp->regs = of_iomap(np, 0); |
| 650 | if (!grp->regs) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 651 | return -ENOMEM; |
| 652 | |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 653 | gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 654 | |
| 655 | /* If we aren't the FEC we have multiple interrupts */ |
| 656 | if (model && strcasecmp(model, "FEC")) { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 657 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); |
| 658 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); |
| 659 | if (gfar_irq(grp, TX)->irq == NO_IRQ || |
| 660 | gfar_irq(grp, RX)->irq == NO_IRQ || |
| 661 | gfar_irq(grp, ER)->irq == NO_IRQ) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 662 | return -EINVAL; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 663 | } |
| 664 | |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 665 | grp->priv = priv; |
| 666 | spin_lock_init(&grp->grplock); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 667 | if (priv->mode == MQ_MG_MODE) { |
| 668 | queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 669 | grp->rx_bit_map = queue_mask ? |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 670 | *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); |
| 671 | queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 672 | grp->tx_bit_map = queue_mask ? |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 673 | *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 674 | } else { |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 675 | grp->rx_bit_map = 0xFF; |
| 676 | grp->tx_bit_map = 0xFF; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 677 | } |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 678 | |
| 679 | /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses |
| 680 | * right to left, so we need to revert the 8 bits to get the q index |
| 681 | */ |
| 682 | grp->rx_bit_map = bitrev8(grp->rx_bit_map); |
| 683 | grp->tx_bit_map = bitrev8(grp->tx_bit_map); |
| 684 | |
| 685 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, |
| 686 | * also assign queues to groups |
| 687 | */ |
| 688 | for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { |
| 689 | grp->num_rx_queues++; |
| 690 | grp->rstat |= (RSTAT_CLEAR_RHALT >> i); |
| 691 | priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); |
| 692 | priv->rx_queue[i]->grp = grp; |
| 693 | } |
| 694 | |
| 695 | for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { |
| 696 | grp->num_tx_queues++; |
| 697 | grp->tstat |= (TSTAT_CLEAR_THALT >> i); |
| 698 | priv->tqueue |= (TQUEUE_EN0 >> i); |
| 699 | priv->tx_queue[i]->grp = grp; |
| 700 | } |
| 701 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 702 | priv->num_grps++; |
| 703 | |
| 704 | return 0; |
| 705 | } |
| 706 | |
Grant Likely | 2dc1158 | 2010-08-06 09:25:50 -0600 | [diff] [blame] | 707 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 708 | { |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 709 | const char *model; |
| 710 | const char *ctype; |
| 711 | const void *mac_addr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 712 | int err = 0, i; |
| 713 | struct net_device *dev = NULL; |
| 714 | struct gfar_private *priv = NULL; |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 715 | struct device_node *np = ofdev->dev.of_node; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 716 | struct device_node *child = NULL; |
Andy Fleming | 4d7902f | 2009-02-04 16:43:44 -0800 | [diff] [blame] | 717 | const u32 *stash; |
| 718 | const u32 *stash_len; |
| 719 | const u32 *stash_idx; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 720 | unsigned int num_tx_qs, num_rx_qs; |
| 721 | u32 *tx_queues, *rx_queues; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 722 | |
| 723 | if (!np || !of_device_is_available(np)) |
| 724 | return -ENODEV; |
| 725 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 726 | /* parse the num of tx and rx queues */ |
| 727 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); |
| 728 | num_tx_qs = tx_queues ? *tx_queues : 1; |
| 729 | |
| 730 | if (num_tx_qs > MAX_TX_QS) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 731 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", |
| 732 | num_tx_qs, MAX_TX_QS); |
| 733 | pr_err("Cannot do alloc_etherdev, aborting\n"); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 734 | return -EINVAL; |
| 735 | } |
| 736 | |
| 737 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); |
| 738 | num_rx_qs = rx_queues ? *rx_queues : 1; |
| 739 | |
| 740 | if (num_rx_qs > MAX_RX_QS) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 741 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", |
| 742 | num_rx_qs, MAX_RX_QS); |
| 743 | pr_err("Cannot do alloc_etherdev, aborting\n"); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 744 | return -EINVAL; |
| 745 | } |
| 746 | |
| 747 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); |
| 748 | dev = *pdev; |
| 749 | if (NULL == dev) |
| 750 | return -ENOMEM; |
| 751 | |
| 752 | priv = netdev_priv(dev); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 753 | priv->ndev = dev; |
| 754 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 755 | priv->num_tx_queues = num_tx_qs; |
Ben Hutchings | fe06912 | 2010-09-27 08:27:37 +0000 | [diff] [blame] | 756 | netif_set_real_num_rx_queues(dev, num_rx_qs); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 757 | priv->num_rx_queues = num_rx_qs; |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 758 | |
| 759 | err = gfar_alloc_tx_queues(priv); |
| 760 | if (err) |
| 761 | goto tx_alloc_failed; |
| 762 | |
| 763 | err = gfar_alloc_rx_queues(priv); |
| 764 | if (err) |
| 765 | goto rx_alloc_failed; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 766 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 767 | /* Init Rx queue filer rule set linked list */ |
Sebastian Poehn | 4aa3a71 | 2011-06-20 13:57:59 -0700 | [diff] [blame] | 768 | INIT_LIST_HEAD(&priv->rx_list.list); |
| 769 | priv->rx_list.count = 0; |
| 770 | mutex_init(&priv->rx_queue_access); |
| 771 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 772 | model = of_get_property(np, "model", NULL); |
| 773 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 774 | for (i = 0; i < MAXGROUPS; i++) |
| 775 | priv->gfargrp[i].regs = NULL; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 776 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 777 | /* Parse and initialize group specific information */ |
| 778 | if (of_device_is_compatible(np, "fsl,etsec2")) { |
| 779 | priv->mode = MQ_MG_MODE; |
| 780 | for_each_child_of_node(np, child) { |
| 781 | err = gfar_parse_group(child, priv, model); |
| 782 | if (err) |
| 783 | goto err_grp_init; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 784 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 785 | } else { |
| 786 | priv->mode = SQ_SG_MODE; |
| 787 | err = gfar_parse_group(np, priv, model); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 788 | if (err) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 789 | goto err_grp_init; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 790 | } |
| 791 | |
Andy Fleming | 4d7902f | 2009-02-04 16:43:44 -0800 | [diff] [blame] | 792 | stash = of_get_property(np, "bd-stash", NULL); |
| 793 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 794 | if (stash) { |
Andy Fleming | 4d7902f | 2009-02-04 16:43:44 -0800 | [diff] [blame] | 795 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
| 796 | priv->bd_stash_en = 1; |
| 797 | } |
| 798 | |
| 799 | stash_len = of_get_property(np, "rx-stash-len", NULL); |
| 800 | |
| 801 | if (stash_len) |
| 802 | priv->rx_stash_size = *stash_len; |
| 803 | |
| 804 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); |
| 805 | |
| 806 | if (stash_idx) |
| 807 | priv->rx_stash_index = *stash_idx; |
| 808 | |
| 809 | if (stash_len || stash_idx) |
| 810 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; |
| 811 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 812 | mac_addr = of_get_mac_address(np); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 813 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 814 | if (mac_addr) |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 815 | memcpy(dev->dev_addr, mac_addr, ETH_ALEN); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 816 | |
| 817 | if (model && !strcasecmp(model, "TSEC")) |
Claudiu Manoil | 34018fd | 2014-02-17 12:53:15 +0200 | [diff] [blame] | 818 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 819 | FSL_GIANFAR_DEV_HAS_COALESCE | |
| 820 | FSL_GIANFAR_DEV_HAS_RMON | |
| 821 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; |
| 822 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 823 | if (model && !strcasecmp(model, "eTSEC")) |
Claudiu Manoil | 34018fd | 2014-02-17 12:53:15 +0200 | [diff] [blame] | 824 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 825 | FSL_GIANFAR_DEV_HAS_COALESCE | |
| 826 | FSL_GIANFAR_DEV_HAS_RMON | |
| 827 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 828 | FSL_GIANFAR_DEV_HAS_CSUM | |
| 829 | FSL_GIANFAR_DEV_HAS_VLAN | |
| 830 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | |
| 831 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
| 832 | FSL_GIANFAR_DEV_HAS_TIMER; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 833 | |
| 834 | ctype = of_get_property(np, "phy-connection-type", NULL); |
| 835 | |
| 836 | /* We only care about rgmii-id. The rest are autodetected */ |
| 837 | if (ctype && !strcmp(ctype, "rgmii-id")) |
| 838 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; |
| 839 | else |
| 840 | priv->interface = PHY_INTERFACE_MODE_MII; |
| 841 | |
| 842 | if (of_get_property(np, "fsl,magic-packet", NULL)) |
| 843 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; |
| 844 | |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 845 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 846 | |
| 847 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 848 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 849 | |
| 850 | return 0; |
| 851 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 852 | err_grp_init: |
| 853 | unmap_group_regs(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 854 | rx_alloc_failed: |
| 855 | gfar_free_rx_queues(priv); |
| 856 | tx_alloc_failed: |
| 857 | gfar_free_tx_queues(priv); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 858 | free_gfar_dev(priv); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 859 | return err; |
| 860 | } |
| 861 | |
Ben Hutchings | ca0c88c | 2013-11-18 23:05:27 +0000 | [diff] [blame] | 862 | static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 863 | { |
| 864 | struct hwtstamp_config config; |
| 865 | struct gfar_private *priv = netdev_priv(netdev); |
| 866 | |
| 867 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
| 868 | return -EFAULT; |
| 869 | |
| 870 | /* reserved for future extensions */ |
| 871 | if (config.flags) |
| 872 | return -EINVAL; |
| 873 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 874 | switch (config.tx_type) { |
| 875 | case HWTSTAMP_TX_OFF: |
| 876 | priv->hwts_tx_en = 0; |
| 877 | break; |
| 878 | case HWTSTAMP_TX_ON: |
| 879 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
| 880 | return -ERANGE; |
| 881 | priv->hwts_tx_en = 1; |
| 882 | break; |
| 883 | default: |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 884 | return -ERANGE; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 885 | } |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 886 | |
| 887 | switch (config.rx_filter) { |
| 888 | case HWTSTAMP_FILTER_NONE: |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 889 | if (priv->hwts_rx_en) { |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 890 | priv->hwts_rx_en = 0; |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 891 | reset_gfar(netdev); |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 892 | } |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 893 | break; |
| 894 | default: |
| 895 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
| 896 | return -ERANGE; |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 897 | if (!priv->hwts_rx_en) { |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 898 | priv->hwts_rx_en = 1; |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 899 | reset_gfar(netdev); |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 900 | } |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 901 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
| 902 | break; |
| 903 | } |
| 904 | |
| 905 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
| 906 | -EFAULT : 0; |
| 907 | } |
| 908 | |
Ben Hutchings | ca0c88c | 2013-11-18 23:05:27 +0000 | [diff] [blame] | 909 | static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
| 910 | { |
| 911 | struct hwtstamp_config config; |
| 912 | struct gfar_private *priv = netdev_priv(netdev); |
| 913 | |
| 914 | config.flags = 0; |
| 915 | config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; |
| 916 | config.rx_filter = (priv->hwts_rx_en ? |
| 917 | HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); |
| 918 | |
| 919 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
| 920 | -EFAULT : 0; |
| 921 | } |
| 922 | |
Clifford Wolf | 0faac9f | 2009-01-09 10:23:11 +0000 | [diff] [blame] | 923 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 924 | { |
| 925 | struct gfar_private *priv = netdev_priv(dev); |
| 926 | |
| 927 | if (!netif_running(dev)) |
| 928 | return -EINVAL; |
| 929 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 930 | if (cmd == SIOCSHWTSTAMP) |
Ben Hutchings | ca0c88c | 2013-11-18 23:05:27 +0000 | [diff] [blame] | 931 | return gfar_hwtstamp_set(dev, rq); |
| 932 | if (cmd == SIOCGHWTSTAMP) |
| 933 | return gfar_hwtstamp_get(dev, rq); |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 934 | |
Clifford Wolf | 0faac9f | 2009-01-09 10:23:11 +0000 | [diff] [blame] | 935 | if (!priv->phydev) |
| 936 | return -ENODEV; |
| 937 | |
Richard Cochran | 28b0411 | 2010-07-17 08:48:55 +0000 | [diff] [blame] | 938 | return phy_mii_ioctl(priv->phydev, rq, cmd); |
Clifford Wolf | 0faac9f | 2009-01-09 10:23:11 +0000 | [diff] [blame] | 939 | } |
| 940 | |
Anton Vorontsov | 18294ad | 2009-11-04 12:53:00 +0000 | [diff] [blame] | 941 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
| 942 | u32 class) |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 943 | { |
| 944 | u32 rqfpr = FPR_FILER_MASK; |
| 945 | u32 rqfcr = 0x0; |
| 946 | |
| 947 | rqfar--; |
| 948 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 949 | priv->ftp_rqfpr[rqfar] = rqfpr; |
| 950 | priv->ftp_rqfcr[rqfar] = rqfcr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 951 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 952 | |
| 953 | rqfar--; |
| 954 | rqfcr = RQFCR_CMP_NOMATCH; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 955 | priv->ftp_rqfpr[rqfar] = rqfpr; |
| 956 | priv->ftp_rqfcr[rqfar] = rqfcr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 957 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 958 | |
| 959 | rqfar--; |
| 960 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; |
| 961 | rqfpr = class; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 962 | priv->ftp_rqfcr[rqfar] = rqfcr; |
| 963 | priv->ftp_rqfpr[rqfar] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 964 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 965 | |
| 966 | rqfar--; |
| 967 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; |
| 968 | rqfpr = class; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 969 | priv->ftp_rqfcr[rqfar] = rqfcr; |
| 970 | priv->ftp_rqfpr[rqfar] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 971 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 972 | |
| 973 | return rqfar; |
| 974 | } |
| 975 | |
| 976 | static void gfar_init_filer_table(struct gfar_private *priv) |
| 977 | { |
| 978 | int i = 0x0; |
| 979 | u32 rqfar = MAX_FILER_IDX; |
| 980 | u32 rqfcr = 0x0; |
| 981 | u32 rqfpr = FPR_FILER_MASK; |
| 982 | |
| 983 | /* Default rule */ |
| 984 | rqfcr = RQFCR_CMP_MATCH; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 985 | priv->ftp_rqfcr[rqfar] = rqfcr; |
| 986 | priv->ftp_rqfpr[rqfar] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 987 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 988 | |
| 989 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); |
| 990 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); |
| 991 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); |
| 992 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); |
| 993 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); |
| 994 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); |
| 995 | |
Uwe Kleine-König | 85dd08e | 2010-06-11 12:16:55 +0200 | [diff] [blame] | 996 | /* cur_filer_idx indicated the first non-masked rule */ |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 997 | priv->cur_filer_idx = rqfar; |
| 998 | |
| 999 | /* Rest are masked rules */ |
| 1000 | rqfcr = RQFCR_CMP_NOMATCH; |
| 1001 | for (i = 0; i < rqfar; i++) { |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 1002 | priv->ftp_rqfcr[i] = rqfcr; |
| 1003 | priv->ftp_rqfpr[i] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1004 | gfar_write_filer(priv, i, rqfcr, rqfpr); |
| 1005 | } |
| 1006 | } |
| 1007 | |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1008 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1009 | { |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1010 | unsigned int pvr = mfspr(SPRN_PVR); |
| 1011 | unsigned int svr = mfspr(SPRN_SVR); |
| 1012 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ |
| 1013 | unsigned int rev = svr & 0xffff; |
| 1014 | |
| 1015 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ |
| 1016 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1017 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1018 | priv->errata |= GFAR_ERRATA_74; |
| 1019 | |
Anton Vorontsov | deb90ea | 2010-06-30 06:39:13 +0000 | [diff] [blame] | 1020 | /* MPC8313 and MPC837x all rev */ |
| 1021 | if ((pvr == 0x80850010 && mod == 0x80b0) || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1022 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
Anton Vorontsov | deb90ea | 2010-06-30 06:39:13 +0000 | [diff] [blame] | 1023 | priv->errata |= GFAR_ERRATA_76; |
| 1024 | |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1025 | /* MPC8313 Rev < 2.0 */ |
| 1026 | if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 1027 | priv->errata |= GFAR_ERRATA_12; |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1028 | } |
| 1029 | |
| 1030 | static void __gfar_detect_errata_85xx(struct gfar_private *priv) |
| 1031 | { |
| 1032 | unsigned int svr = mfspr(SPRN_SVR); |
| 1033 | |
| 1034 | if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) |
| 1035 | priv->errata |= GFAR_ERRATA_12; |
Claudiu Manoil | 53fad77 | 2013-10-09 20:20:42 +0300 | [diff] [blame] | 1036 | if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || |
| 1037 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) |
| 1038 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1039 | } |
| 1040 | |
| 1041 | static void gfar_detect_errata(struct gfar_private *priv) |
| 1042 | { |
| 1043 | struct device *dev = &priv->ofdev->dev; |
| 1044 | |
| 1045 | /* no plans to fix */ |
| 1046 | priv->errata |= GFAR_ERRATA_A002; |
| 1047 | |
| 1048 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) |
| 1049 | __gfar_detect_errata_85xx(priv); |
| 1050 | else /* non-mpc85xx parts, i.e. e300 core based */ |
| 1051 | __gfar_detect_errata_83xx(priv); |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 1052 | |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1053 | if (priv->errata) |
| 1054 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", |
| 1055 | priv->errata); |
| 1056 | } |
| 1057 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1058 | void gfar_mac_reset(struct gfar_private *priv) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | { |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1060 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1061 | u32 tempval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | |
| 1063 | /* Reset MAC layer */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1064 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | |
Andy Fleming | b98ac70 | 2009-02-04 16:38:05 -0800 | [diff] [blame] | 1066 | /* We need to delay at least 3 TX clocks */ |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1067 | udelay(3); |
Andy Fleming | b98ac70 | 2009-02-04 16:38:05 -0800 | [diff] [blame] | 1068 | |
Claudiu Manoil | 23402bd | 2013-08-12 13:53:26 +0300 | [diff] [blame] | 1069 | /* the soft reset bit is not self-resetting, so we need to |
| 1070 | * clear it before resuming normal operation |
| 1071 | */ |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1072 | gfar_write(®s->maccfg1, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1074 | udelay(3); |
| 1075 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 1076 | /* Compute rx_buff_size based on config flags */ |
| 1077 | gfar_rx_buff_size_config(priv); |
| 1078 | |
| 1079 | /* Initialize the max receive frame/buffer lengths */ |
| 1080 | gfar_write(®s->maxfrm, priv->rx_buffer_size); |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1081 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
| 1082 | |
| 1083 | /* Initialize the Minimum Frame Length Register */ |
| 1084 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
| 1085 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 | /* Initialize MACCFG2. */ |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1087 | tempval = MACCFG2_INIT_SETTINGS; |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 1088 | |
| 1089 | /* If the mtu is larger than the max size for standard |
| 1090 | * ethernet frames (ie, a jumbo frame), then set maccfg2 |
| 1091 | * to allow huge frames, and to check the length |
| 1092 | */ |
| 1093 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || |
| 1094 | gfar_has_errata(priv, GFAR_ERRATA_74)) |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1095 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 1096 | |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1097 | gfar_write(®s->maccfg2, tempval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1099 | /* Clear mac addr hash registers */ |
| 1100 | gfar_write(®s->igaddr0, 0); |
| 1101 | gfar_write(®s->igaddr1, 0); |
| 1102 | gfar_write(®s->igaddr2, 0); |
| 1103 | gfar_write(®s->igaddr3, 0); |
| 1104 | gfar_write(®s->igaddr4, 0); |
| 1105 | gfar_write(®s->igaddr5, 0); |
| 1106 | gfar_write(®s->igaddr6, 0); |
| 1107 | gfar_write(®s->igaddr7, 0); |
| 1108 | |
| 1109 | gfar_write(®s->gaddr0, 0); |
| 1110 | gfar_write(®s->gaddr1, 0); |
| 1111 | gfar_write(®s->gaddr2, 0); |
| 1112 | gfar_write(®s->gaddr3, 0); |
| 1113 | gfar_write(®s->gaddr4, 0); |
| 1114 | gfar_write(®s->gaddr5, 0); |
| 1115 | gfar_write(®s->gaddr6, 0); |
| 1116 | gfar_write(®s->gaddr7, 0); |
| 1117 | |
| 1118 | if (priv->extended_hash) |
| 1119 | gfar_clear_exact_match(priv->ndev); |
| 1120 | |
| 1121 | gfar_mac_rx_config(priv); |
| 1122 | |
| 1123 | gfar_mac_tx_config(priv); |
| 1124 | |
| 1125 | gfar_set_mac_address(priv->ndev); |
| 1126 | |
| 1127 | gfar_set_multi(priv->ndev); |
| 1128 | |
| 1129 | /* clear ievent and imask before configuring coalescing */ |
| 1130 | gfar_ints_disable(priv); |
| 1131 | |
| 1132 | /* Configure the coalescing support */ |
| 1133 | gfar_configure_coalescing_all(priv); |
| 1134 | } |
| 1135 | |
| 1136 | static void gfar_hw_init(struct gfar_private *priv) |
| 1137 | { |
| 1138 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 1139 | u32 attrs; |
| 1140 | |
| 1141 | /* Stop the DMA engine now, in case it was running before |
| 1142 | * (The firmware could have used it, and left it running). |
| 1143 | */ |
| 1144 | gfar_halt(priv); |
| 1145 | |
| 1146 | gfar_mac_reset(priv); |
| 1147 | |
| 1148 | /* Zero out the rmon mib registers if it has them */ |
| 1149 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
| 1150 | memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); |
| 1151 | |
| 1152 | /* Mask off the CAM interrupts */ |
| 1153 | gfar_write(®s->rmon.cam1, 0xffffffff); |
| 1154 | gfar_write(®s->rmon.cam2, 0xffffffff); |
| 1155 | } |
| 1156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | /* Initialize ECNTRL */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1158 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | |
Claudiu Manoil | 34018fd | 2014-02-17 12:53:15 +0200 | [diff] [blame] | 1160 | /* Set the extraction length and index */ |
| 1161 | attrs = ATTRELI_EL(priv->rx_stash_size) | |
| 1162 | ATTRELI_EI(priv->rx_stash_index); |
| 1163 | |
| 1164 | gfar_write(®s->attreli, attrs); |
| 1165 | |
| 1166 | /* Start with defaults, and add stashing |
| 1167 | * depending on driver parameters |
| 1168 | */ |
| 1169 | attrs = ATTR_INIT_SETTINGS; |
| 1170 | |
| 1171 | if (priv->bd_stash_en) |
| 1172 | attrs |= ATTR_BDSTASH; |
| 1173 | |
| 1174 | if (priv->rx_stash_size != 0) |
| 1175 | attrs |= ATTR_BUFSTASH; |
| 1176 | |
| 1177 | gfar_write(®s->attr, attrs); |
| 1178 | |
| 1179 | /* FIFO configs */ |
| 1180 | gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); |
| 1181 | gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); |
| 1182 | gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); |
| 1183 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1184 | /* Program the interrupt steering regs, only for MG devices */ |
| 1185 | if (priv->num_grps > 1) |
| 1186 | gfar_write_isrg(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1187 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1189 | static void __init gfar_init_addr_hash_table(struct gfar_private *priv) |
| 1190 | { |
| 1191 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1192 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1193 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1194 | priv->extended_hash = 1; |
| 1195 | priv->hash_width = 9; |
| 1196 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1197 | priv->hash_regs[0] = ®s->igaddr0; |
| 1198 | priv->hash_regs[1] = ®s->igaddr1; |
| 1199 | priv->hash_regs[2] = ®s->igaddr2; |
| 1200 | priv->hash_regs[3] = ®s->igaddr3; |
| 1201 | priv->hash_regs[4] = ®s->igaddr4; |
| 1202 | priv->hash_regs[5] = ®s->igaddr5; |
| 1203 | priv->hash_regs[6] = ®s->igaddr6; |
| 1204 | priv->hash_regs[7] = ®s->igaddr7; |
| 1205 | priv->hash_regs[8] = ®s->gaddr0; |
| 1206 | priv->hash_regs[9] = ®s->gaddr1; |
| 1207 | priv->hash_regs[10] = ®s->gaddr2; |
| 1208 | priv->hash_regs[11] = ®s->gaddr3; |
| 1209 | priv->hash_regs[12] = ®s->gaddr4; |
| 1210 | priv->hash_regs[13] = ®s->gaddr5; |
| 1211 | priv->hash_regs[14] = ®s->gaddr6; |
| 1212 | priv->hash_regs[15] = ®s->gaddr7; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1213 | |
| 1214 | } else { |
| 1215 | priv->extended_hash = 0; |
| 1216 | priv->hash_width = 8; |
| 1217 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1218 | priv->hash_regs[0] = ®s->gaddr0; |
| 1219 | priv->hash_regs[1] = ®s->gaddr1; |
| 1220 | priv->hash_regs[2] = ®s->gaddr2; |
| 1221 | priv->hash_regs[3] = ®s->gaddr3; |
| 1222 | priv->hash_regs[4] = ®s->gaddr4; |
| 1223 | priv->hash_regs[5] = ®s->gaddr5; |
| 1224 | priv->hash_regs[6] = ®s->gaddr6; |
| 1225 | priv->hash_regs[7] = ®s->gaddr7; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1226 | } |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1227 | } |
| 1228 | |
| 1229 | /* Set up the ethernet device structure, private data, |
| 1230 | * and anything else we need before we start |
| 1231 | */ |
| 1232 | static int gfar_probe(struct platform_device *ofdev) |
| 1233 | { |
| 1234 | struct net_device *dev = NULL; |
| 1235 | struct gfar_private *priv = NULL; |
| 1236 | int err = 0, i; |
| 1237 | |
| 1238 | err = gfar_of_init(ofdev, &dev); |
| 1239 | |
| 1240 | if (err) |
| 1241 | return err; |
| 1242 | |
| 1243 | priv = netdev_priv(dev); |
| 1244 | priv->ndev = dev; |
| 1245 | priv->ofdev = ofdev; |
| 1246 | priv->dev = &ofdev->dev; |
| 1247 | SET_NETDEV_DEV(dev, &ofdev->dev); |
| 1248 | |
| 1249 | spin_lock_init(&priv->bflock); |
| 1250 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
| 1251 | |
| 1252 | platform_set_drvdata(ofdev, priv); |
| 1253 | |
| 1254 | gfar_detect_errata(priv); |
| 1255 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1256 | /* Set the dev->base_addr to the gfar reg region */ |
| 1257 | dev->base_addr = (unsigned long) priv->gfargrp[0].regs; |
| 1258 | |
| 1259 | /* Fill in the dev structure */ |
| 1260 | dev->watchdog_timeo = TX_TIMEOUT; |
| 1261 | dev->mtu = 1500; |
| 1262 | dev->netdev_ops = &gfar_netdev_ops; |
| 1263 | dev->ethtool_ops = &gfar_ethtool_ops; |
| 1264 | |
| 1265 | /* Register for napi ...We are registering NAPI for each grp */ |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 1266 | if (priv->mode == SQ_SG_MODE) { |
| 1267 | netif_napi_add(dev, &priv->gfargrp[0].napi_rx, gfar_poll_rx_sq, |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1268 | GFAR_DEV_WEIGHT); |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 1269 | netif_napi_add(dev, &priv->gfargrp[0].napi_tx, gfar_poll_tx_sq, |
| 1270 | 2); |
| 1271 | } else { |
| 1272 | for (i = 0; i < priv->num_grps; i++) { |
| 1273 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, |
| 1274 | gfar_poll_rx, GFAR_DEV_WEIGHT); |
| 1275 | netif_napi_add(dev, &priv->gfargrp[i].napi_tx, |
| 1276 | gfar_poll_tx, 2); |
| 1277 | } |
| 1278 | } |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1279 | |
| 1280 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
| 1281 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
| 1282 | NETIF_F_RXCSUM; |
| 1283 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | |
| 1284 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
| 1285 | } |
| 1286 | |
| 1287 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
| 1288 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | |
| 1289 | NETIF_F_HW_VLAN_CTAG_RX; |
| 1290 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
| 1291 | } |
| 1292 | |
| 1293 | gfar_init_addr_hash_table(priv); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1294 | |
Claudiu Manoil | 532c37b | 2014-02-17 12:53:16 +0200 | [diff] [blame] | 1295 | /* Insert receive time stamps into padding alignment bytes */ |
| 1296 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
| 1297 | priv->padding = 8; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1298 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 1299 | if (dev->features & NETIF_F_IP_CSUM || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1300 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
Wu Jiajun-B06378 | bee9e58 | 2012-05-21 23:00:48 +0000 | [diff] [blame] | 1301 | dev->needed_headroom = GMAC_FCB_LEN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | |
| 1303 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1304 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1305 | /* Initializing some of the rx/tx queue level parameters */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1306 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 1307 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; |
| 1308 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; |
| 1309 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; |
| 1310 | priv->tx_queue[i]->txic = DEFAULT_TXIC; |
| 1311 | } |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1312 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1313 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1314 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; |
| 1315 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; |
| 1316 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
| 1317 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1319 | /* always enable rx filer */ |
Sebastian Poehn | 4aa3a71 | 2011-06-20 13:57:59 -0700 | [diff] [blame] | 1320 | priv->rx_filer_enable = 1; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1321 | /* Enable most messages by default */ |
| 1322 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
Claudiu Manoil | b98b8ba | 2012-09-23 22:39:08 +0000 | [diff] [blame] | 1323 | /* use pritority h/w tx queue scheduling for single queue devices */ |
| 1324 | if (priv->num_tx_queues == 1) |
| 1325 | priv->prio_sched_en = 1; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1326 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1327 | set_bit(GFAR_DOWN, &priv->state); |
| 1328 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1329 | gfar_hw_init(priv); |
Trent Piepho | d3eab82 | 2008-10-02 11:12:24 +0000 | [diff] [blame] | 1330 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | err = register_netdev(dev); |
| 1332 | |
| 1333 | if (err) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1334 | pr_err("%s: Cannot register net device, aborting\n", dev->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | goto register_fail; |
| 1336 | } |
| 1337 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1338 | /* Carrier starts down, phylib will bring it up */ |
| 1339 | netif_carrier_off(dev); |
| 1340 | |
Anton Vorontsov | 2884e5c | 2009-02-01 00:52:34 -0800 | [diff] [blame] | 1341 | device_init_wakeup(&dev->dev, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1342 | priv->device_flags & |
| 1343 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
Anton Vorontsov | 2884e5c | 2009-02-01 00:52:34 -0800 | [diff] [blame] | 1344 | |
Dai Haruki | c50a5d9 | 2008-12-17 16:51:32 -0800 | [diff] [blame] | 1345 | /* fill out IRQ number and name fields */ |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1346 | for (i = 0; i < priv->num_grps; i++) { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1347 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1348 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1349 | sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", |
Joe Perches | 0015e55 | 2012-03-25 07:10:07 +0000 | [diff] [blame] | 1350 | dev->name, "_g", '0' + i, "_tx"); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1351 | sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", |
Joe Perches | 0015e55 | 2012-03-25 07:10:07 +0000 | [diff] [blame] | 1352 | dev->name, "_g", '0' + i, "_rx"); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1353 | sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", |
Joe Perches | 0015e55 | 2012-03-25 07:10:07 +0000 | [diff] [blame] | 1354 | dev->name, "_g", '0' + i, "_er"); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1355 | } else |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1356 | strcpy(gfar_irq(grp, TX)->name, dev->name); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1357 | } |
Dai Haruki | c50a5d9 | 2008-12-17 16:51:32 -0800 | [diff] [blame] | 1358 | |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1359 | /* Initialize the filer table */ |
| 1360 | gfar_init_filer_table(priv); |
| 1361 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | /* Print out the device info */ |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1363 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1365 | /* Even more device info helps when determining which kernel |
| 1366 | * provided which set of benchmarks. |
| 1367 | */ |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1368 | netdev_info(dev, "Running with NAPI enabled\n"); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1369 | for (i = 0; i < priv->num_rx_queues; i++) |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1370 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", |
| 1371 | i, priv->rx_queue[i]->rx_ring_size); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1372 | for (i = 0; i < priv->num_tx_queues; i++) |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1373 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", |
| 1374 | i, priv->tx_queue[i]->tx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | |
| 1376 | return 0; |
| 1377 | |
| 1378 | register_fail: |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1379 | unmap_group_regs(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1380 | gfar_free_rx_queues(priv); |
| 1381 | gfar_free_tx_queues(priv); |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1382 | if (priv->phy_node) |
| 1383 | of_node_put(priv->phy_node); |
| 1384 | if (priv->tbi_node) |
| 1385 | of_node_put(priv->tbi_node); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1386 | free_gfar_dev(priv); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1387 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | } |
| 1389 | |
Grant Likely | 2dc1158 | 2010-08-06 09:25:50 -0600 | [diff] [blame] | 1390 | static int gfar_remove(struct platform_device *ofdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | { |
Jingoo Han | 8513fbd | 2013-05-23 00:52:31 +0000 | [diff] [blame] | 1392 | struct gfar_private *priv = platform_get_drvdata(ofdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1394 | if (priv->phy_node) |
| 1395 | of_node_put(priv->phy_node); |
| 1396 | if (priv->tbi_node) |
| 1397 | of_node_put(priv->tbi_node); |
| 1398 | |
David S. Miller | d9d8e04 | 2009-09-06 01:41:02 -0700 | [diff] [blame] | 1399 | unregister_netdev(priv->ndev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1400 | unmap_group_regs(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1401 | gfar_free_rx_queues(priv); |
| 1402 | gfar_free_tx_queues(priv); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1403 | free_gfar_dev(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | |
| 1405 | return 0; |
| 1406 | } |
| 1407 | |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1408 | #ifdef CONFIG_PM |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1409 | |
| 1410 | static int gfar_suspend(struct device *dev) |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1411 | { |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1412 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 1413 | struct net_device *ndev = priv->ndev; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1414 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1415 | unsigned long flags; |
| 1416 | u32 tempval; |
| 1417 | |
| 1418 | int magic_packet = priv->wol_en && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1419 | (priv->device_flags & |
| 1420 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1421 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1422 | netif_device_detach(ndev); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1423 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1424 | if (netif_running(ndev)) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1425 | |
| 1426 | local_irq_save(flags); |
| 1427 | lock_tx_qs(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1428 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1429 | gfar_halt_nodisable(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1430 | |
| 1431 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1432 | tempval = gfar_read(®s->maccfg1); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1433 | |
| 1434 | tempval &= ~MACCFG1_TX_EN; |
| 1435 | |
| 1436 | if (!magic_packet) |
| 1437 | tempval &= ~MACCFG1_RX_EN; |
| 1438 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1439 | gfar_write(®s->maccfg1, tempval); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1440 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1441 | unlock_tx_qs(priv); |
| 1442 | local_irq_restore(flags); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1443 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1444 | disable_napi(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1445 | |
| 1446 | if (magic_packet) { |
| 1447 | /* Enable interrupt on Magic Packet */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1448 | gfar_write(®s->imask, IMASK_MAG); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1449 | |
| 1450 | /* Enable Magic Packet mode */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1451 | tempval = gfar_read(®s->maccfg2); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1452 | tempval |= MACCFG2_MPEN; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1453 | gfar_write(®s->maccfg2, tempval); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1454 | } else { |
| 1455 | phy_stop(priv->phydev); |
| 1456 | } |
| 1457 | } |
| 1458 | |
| 1459 | return 0; |
| 1460 | } |
| 1461 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1462 | static int gfar_resume(struct device *dev) |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1463 | { |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1464 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 1465 | struct net_device *ndev = priv->ndev; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1466 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1467 | unsigned long flags; |
| 1468 | u32 tempval; |
| 1469 | int magic_packet = priv->wol_en && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1470 | (priv->device_flags & |
| 1471 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1472 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1473 | if (!netif_running(ndev)) { |
| 1474 | netif_device_attach(ndev); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1475 | return 0; |
| 1476 | } |
| 1477 | |
| 1478 | if (!magic_packet && priv->phydev) |
| 1479 | phy_start(priv->phydev); |
| 1480 | |
| 1481 | /* Disable Magic Packet mode, in case something |
| 1482 | * else woke us up. |
| 1483 | */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1484 | local_irq_save(flags); |
| 1485 | lock_tx_qs(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1486 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1487 | tempval = gfar_read(®s->maccfg2); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1488 | tempval &= ~MACCFG2_MPEN; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1489 | gfar_write(®s->maccfg2, tempval); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1490 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1491 | gfar_start(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1492 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1493 | unlock_tx_qs(priv); |
| 1494 | local_irq_restore(flags); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1495 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1496 | netif_device_attach(ndev); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1497 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1498 | enable_napi(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1499 | |
| 1500 | return 0; |
| 1501 | } |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1502 | |
| 1503 | static int gfar_restore(struct device *dev) |
| 1504 | { |
| 1505 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 1506 | struct net_device *ndev = priv->ndev; |
| 1507 | |
Wang Dongsheng | 103cdd1 | 2012-11-09 04:43:51 +0000 | [diff] [blame] | 1508 | if (!netif_running(ndev)) { |
| 1509 | netif_device_attach(ndev); |
| 1510 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1511 | return 0; |
Wang Dongsheng | 103cdd1 | 2012-11-09 04:43:51 +0000 | [diff] [blame] | 1512 | } |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1513 | |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 1514 | if (gfar_init_bds(ndev)) { |
| 1515 | free_skb_resources(priv); |
| 1516 | return -ENOMEM; |
| 1517 | } |
| 1518 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1519 | gfar_mac_reset(priv); |
| 1520 | |
| 1521 | gfar_init_tx_rx_base(priv); |
| 1522 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1523 | gfar_start(priv); |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1524 | |
| 1525 | priv->oldlink = 0; |
| 1526 | priv->oldspeed = 0; |
| 1527 | priv->oldduplex = -1; |
| 1528 | |
| 1529 | if (priv->phydev) |
| 1530 | phy_start(priv->phydev); |
| 1531 | |
| 1532 | netif_device_attach(ndev); |
Anton Vorontsov | 5ea681d | 2009-11-10 14:11:05 +0000 | [diff] [blame] | 1533 | enable_napi(priv); |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1534 | |
| 1535 | return 0; |
| 1536 | } |
| 1537 | |
| 1538 | static struct dev_pm_ops gfar_pm_ops = { |
| 1539 | .suspend = gfar_suspend, |
| 1540 | .resume = gfar_resume, |
| 1541 | .freeze = gfar_suspend, |
| 1542 | .thaw = gfar_resume, |
| 1543 | .restore = gfar_restore, |
| 1544 | }; |
| 1545 | |
| 1546 | #define GFAR_PM_OPS (&gfar_pm_ops) |
| 1547 | |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1548 | #else |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1549 | |
| 1550 | #define GFAR_PM_OPS NULL |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1551 | |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1552 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1554 | /* Reads the controller's registers to determine what interface |
| 1555 | * connects it to the PHY. |
| 1556 | */ |
| 1557 | static phy_interface_t gfar_get_interface(struct net_device *dev) |
| 1558 | { |
| 1559 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1560 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1561 | u32 ecntrl; |
| 1562 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1563 | ecntrl = gfar_read(®s->ecntrl); |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1564 | |
| 1565 | if (ecntrl & ECNTRL_SGMII_MODE) |
| 1566 | return PHY_INTERFACE_MODE_SGMII; |
| 1567 | |
| 1568 | if (ecntrl & ECNTRL_TBI_MODE) { |
| 1569 | if (ecntrl & ECNTRL_REDUCED_MODE) |
| 1570 | return PHY_INTERFACE_MODE_RTBI; |
| 1571 | else |
| 1572 | return PHY_INTERFACE_MODE_TBI; |
| 1573 | } |
| 1574 | |
| 1575 | if (ecntrl & ECNTRL_REDUCED_MODE) { |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1576 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1577 | return PHY_INTERFACE_MODE_RMII; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1578 | } |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1579 | else { |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1580 | phy_interface_t interface = priv->interface; |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1581 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1582 | /* This isn't autodetected right now, so it must |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1583 | * be set by the device tree or platform code. |
| 1584 | */ |
| 1585 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) |
| 1586 | return PHY_INTERFACE_MODE_RGMII_ID; |
| 1587 | |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1588 | return PHY_INTERFACE_MODE_RGMII; |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1589 | } |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1590 | } |
| 1591 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1592 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1593 | return PHY_INTERFACE_MODE_GMII; |
| 1594 | |
| 1595 | return PHY_INTERFACE_MODE_MII; |
| 1596 | } |
| 1597 | |
| 1598 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1599 | /* Initializes driver's PHY state, and attaches to the PHY. |
| 1600 | * Returns 0 on success. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | */ |
| 1602 | static int init_phy(struct net_device *dev) |
| 1603 | { |
| 1604 | struct gfar_private *priv = netdev_priv(dev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1605 | uint gigabit_support = |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1606 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
Claudiu Manoil | 23402bd | 2013-08-12 13:53:26 +0300 | [diff] [blame] | 1607 | GFAR_SUPPORTED_GBIT : 0; |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1608 | phy_interface_t interface; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | |
| 1610 | priv->oldlink = 0; |
| 1611 | priv->oldspeed = 0; |
| 1612 | priv->oldduplex = -1; |
| 1613 | |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1614 | interface = gfar_get_interface(dev); |
| 1615 | |
Anton Vorontsov | 1db780f | 2009-07-16 21:31:42 +0000 | [diff] [blame] | 1616 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
| 1617 | interface); |
| 1618 | if (!priv->phydev) |
| 1619 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, |
| 1620 | interface); |
| 1621 | if (!priv->phydev) { |
| 1622 | dev_err(&dev->dev, "could not attach to PHY\n"); |
| 1623 | return -ENODEV; |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1624 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1625 | |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1626 | if (interface == PHY_INTERFACE_MODE_SGMII) |
| 1627 | gfar_configure_serdes(dev); |
| 1628 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1629 | /* Remove any features not supported by the controller */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1630 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
| 1631 | priv->phydev->advertising = priv->phydev->supported; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | |
| 1633 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1634 | } |
| 1635 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1636 | /* Initialize TBI PHY interface for communicating with the |
Paul Gortmaker | d031358 | 2008-04-17 00:08:10 -0400 | [diff] [blame] | 1637 | * SERDES lynx PHY on the chip. We communicate with this PHY |
| 1638 | * through the MDIO bus on each controller, treating it as a |
| 1639 | * "normal" PHY at the address found in the TBIPA register. We assume |
| 1640 | * that the TBIPA register is valid. Either the MDIO bus code will set |
| 1641 | * it to a value that doesn't conflict with other PHYs on the bus, or the |
| 1642 | * value doesn't matter, as there are no other PHYs on the bus. |
| 1643 | */ |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1644 | static void gfar_configure_serdes(struct net_device *dev) |
| 1645 | { |
| 1646 | struct gfar_private *priv = netdev_priv(dev); |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1647 | struct phy_device *tbiphy; |
Trent Piepho | c132419 | 2008-10-30 18:17:06 -0700 | [diff] [blame] | 1648 | |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1649 | if (!priv->tbi_node) { |
| 1650 | dev_warn(&dev->dev, "error: SGMII mode requires that the " |
| 1651 | "device tree specify a tbi-handle\n"); |
| 1652 | return; |
| 1653 | } |
| 1654 | |
| 1655 | tbiphy = of_phy_find_device(priv->tbi_node); |
| 1656 | if (!tbiphy) { |
| 1657 | dev_err(&dev->dev, "error: Could not get TBI device\n"); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1658 | return; |
| 1659 | } |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1660 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1661 | /* If the link is already up, we must already be ok, and don't need to |
Trent Piepho | bdb59f9 | 2008-10-30 18:17:07 -0700 | [diff] [blame] | 1662 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
| 1663 | * everything for us? Resetting it takes the link down and requires |
| 1664 | * several seconds for it to come back. |
| 1665 | */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1666 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1667 | return; |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1668 | |
Paul Gortmaker | d031358 | 2008-04-17 00:08:10 -0400 | [diff] [blame] | 1669 | /* Single clk mode, mii mode off(for serdes communication) */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1670 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1671 | |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1672 | phy_write(tbiphy, MII_ADVERTISE, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1673 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
| 1674 | ADVERTISE_1000XPSE_ASYM); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1675 | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1676 | phy_write(tbiphy, MII_BMCR, |
| 1677 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | |
| 1678 | BMCR_SPEED1000); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1679 | } |
| 1680 | |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1681 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
| 1682 | { |
| 1683 | u32 res; |
| 1684 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1685 | /* Normaly TSEC should not hang on GRS commands, so we should |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1686 | * actually wait for IEVENT_GRSC flag. |
| 1687 | */ |
Claudiu Manoil | ad3660c | 2013-10-09 20:20:40 +0300 | [diff] [blame] | 1688 | if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1689 | return 0; |
| 1690 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1691 | /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1692 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
| 1693 | * and the Rx can be safely reset. |
| 1694 | */ |
| 1695 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); |
| 1696 | res &= 0x7f807f80; |
| 1697 | if ((res & 0xffff) == (res >> 16)) |
| 1698 | return 1; |
| 1699 | |
| 1700 | return 0; |
| 1701 | } |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1702 | |
| 1703 | /* Halt the receive and transmit queues */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1704 | static void gfar_halt_nodisable(struct gfar_private *priv) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1705 | { |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 1706 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1707 | u32 tempval; |
| 1708 | |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 1709 | gfar_ints_disable(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1710 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | /* Stop the DMA, and wait for it to stop */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1712 | tempval = gfar_read(®s->dmactrl); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1713 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != |
| 1714 | (DMACTRL_GRS | DMACTRL_GTS)) { |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1715 | int ret; |
| 1716 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1718 | gfar_write(®s->dmactrl, tempval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1720 | do { |
| 1721 | ret = spin_event_timeout(((gfar_read(®s->ievent) & |
| 1722 | (IEVENT_GRSC | IEVENT_GTSC)) == |
| 1723 | (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); |
| 1724 | if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) |
| 1725 | ret = __gfar_is_rx_idle(priv); |
| 1726 | } while (!ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | } |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1728 | } |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1729 | |
| 1730 | /* Halt the receive and transmit queues */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1731 | void gfar_halt(struct gfar_private *priv) |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1732 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1733 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1734 | u32 tempval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1736 | /* Dissable the Rx/Tx hw queues */ |
| 1737 | gfar_write(®s->rqueue, 0); |
| 1738 | gfar_write(®s->tqueue, 0); |
Scott Wood | 2a54adc | 2008-08-12 15:10:46 -0500 | [diff] [blame] | 1739 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1740 | mdelay(10); |
| 1741 | |
| 1742 | gfar_halt_nodisable(priv); |
| 1743 | |
| 1744 | /* Disable Rx/Tx DMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | tempval = gfar_read(®s->maccfg1); |
| 1746 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); |
| 1747 | gfar_write(®s->maccfg1, tempval); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1748 | } |
| 1749 | |
| 1750 | void stop_gfar(struct net_device *dev) |
| 1751 | { |
| 1752 | struct gfar_private *priv = netdev_priv(dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1753 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1754 | netif_tx_stop_all_queues(dev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1755 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1756 | smp_mb__before_clear_bit(); |
| 1757 | set_bit(GFAR_DOWN, &priv->state); |
| 1758 | smp_mb__after_clear_bit(); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1759 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1760 | disable_napi(priv); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1761 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1762 | /* disable ints and gracefully shut down Rx/Tx DMA */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1763 | gfar_halt(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1764 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1765 | phy_stop(priv->phydev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1766 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1767 | free_skb_resources(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | } |
| 1769 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1770 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1771 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | struct txbd8 *txbdp; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1773 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1774 | int i, j; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1775 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1776 | txbdp = tx_queue->tx_bd_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1778 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
| 1779 | if (!tx_queue->tx_skbuff[i]) |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1780 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1782 | dma_unmap_single(priv->dev, txbdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1783 | txbdp->length, DMA_TO_DEVICE); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1784 | txbdp->lstatus = 0; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1785 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1786 | j++) { |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1787 | txbdp++; |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1788 | dma_unmap_page(priv->dev, txbdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1789 | txbdp->length, DMA_TO_DEVICE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1790 | } |
Andy Fleming | ad5da7a | 2008-05-07 13:20:55 -0500 | [diff] [blame] | 1791 | txbdp++; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1792 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
| 1793 | tx_queue->tx_skbuff[i] = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1794 | } |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1795 | kfree(tx_queue->tx_skbuff); |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 1796 | tx_queue->tx_skbuff = NULL; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1797 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1799 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
| 1800 | { |
| 1801 | struct rxbd8 *rxbdp; |
| 1802 | struct gfar_private *priv = netdev_priv(rx_queue->dev); |
| 1803 | int i; |
| 1804 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1805 | rxbdp = rx_queue->rx_bd_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1806 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1807 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
| 1808 | if (rx_queue->rx_skbuff[i]) { |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1809 | dma_unmap_single(priv->dev, rxbdp->bufPtr, |
| 1810 | priv->rx_buffer_size, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1811 | DMA_FROM_DEVICE); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1812 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
| 1813 | rx_queue->rx_skbuff[i] = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1814 | } |
Anton Vorontsov | e69edd2 | 2009-10-12 06:00:30 +0000 | [diff] [blame] | 1815 | rxbdp->lstatus = 0; |
| 1816 | rxbdp->bufPtr = 0; |
| 1817 | rxbdp++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1818 | } |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1819 | kfree(rx_queue->rx_skbuff); |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 1820 | rx_queue->rx_skbuff = NULL; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1821 | } |
Anton Vorontsov | e69edd2 | 2009-10-12 06:00:30 +0000 | [diff] [blame] | 1822 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1823 | /* If there are any tx skbs or rx skbs still around, free them. |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1824 | * Then free tx_skbuff and rx_skbuff |
| 1825 | */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1826 | static void free_skb_resources(struct gfar_private *priv) |
| 1827 | { |
| 1828 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 1829 | struct gfar_priv_rx_q *rx_queue = NULL; |
| 1830 | int i; |
| 1831 | |
| 1832 | /* Go through all the buffer descriptors and free their data buffers */ |
| 1833 | for (i = 0; i < priv->num_tx_queues; i++) { |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 1834 | struct netdev_queue *txq; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1835 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1836 | tx_queue = priv->tx_queue[i]; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 1837 | txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1838 | if (tx_queue->tx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1839 | free_skb_tx_queue(tx_queue); |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 1840 | netdev_tx_reset_queue(txq); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1841 | } |
| 1842 | |
| 1843 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1844 | rx_queue = priv->rx_queue[i]; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1845 | if (rx_queue->rx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1846 | free_skb_rx_queue(rx_queue); |
| 1847 | } |
| 1848 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1849 | dma_free_coherent(priv->dev, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1850 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
| 1851 | sizeof(struct rxbd8) * priv->total_rx_ring_size, |
| 1852 | priv->tx_queue[0]->tx_bd_base, |
| 1853 | priv->tx_queue[0]->tx_bd_dma_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1854 | } |
| 1855 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1856 | void gfar_start(struct gfar_private *priv) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1857 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1858 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1859 | u32 tempval; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1860 | int i = 0; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1861 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1862 | /* Enable Rx/Tx hw queues */ |
| 1863 | gfar_write(®s->rqueue, priv->rqueue); |
| 1864 | gfar_write(®s->tqueue, priv->tqueue); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1865 | |
| 1866 | /* Initialize DMACTRL to have WWR and WOP */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1867 | tempval = gfar_read(®s->dmactrl); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1868 | tempval |= DMACTRL_INIT_SETTINGS; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1869 | gfar_write(®s->dmactrl, tempval); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1870 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1871 | /* Make sure we aren't stopped */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1872 | tempval = gfar_read(®s->dmactrl); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1873 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1874 | gfar_write(®s->dmactrl, tempval); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1875 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1876 | for (i = 0; i < priv->num_grps; i++) { |
| 1877 | regs = priv->gfargrp[i].regs; |
| 1878 | /* Clear THLT/RHLT, so that the DMA starts polling now */ |
| 1879 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); |
| 1880 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1881 | } |
Dai Haruki | 12dea57 | 2008-12-16 15:30:20 -0800 | [diff] [blame] | 1882 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1883 | /* Enable Rx/Tx DMA */ |
| 1884 | tempval = gfar_read(®s->maccfg1); |
| 1885 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); |
| 1886 | gfar_write(®s->maccfg1, tempval); |
| 1887 | |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 1888 | gfar_ints_enable(priv); |
| 1889 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1890 | priv->ndev->trans_start = jiffies; /* prevent tx timeout */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1891 | } |
| 1892 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 1893 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
| 1894 | { |
| 1895 | free_irq(gfar_irq(grp, TX)->irq, grp); |
| 1896 | free_irq(gfar_irq(grp, RX)->irq, grp); |
| 1897 | free_irq(gfar_irq(grp, ER)->irq, grp); |
| 1898 | } |
| 1899 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1900 | static int register_grp_irqs(struct gfar_priv_grp *grp) |
| 1901 | { |
| 1902 | struct gfar_private *priv = grp->priv; |
| 1903 | struct net_device *dev = priv->ndev; |
Anton Vorontsov | ccc05c6 | 2009-10-12 06:00:26 +0000 | [diff] [blame] | 1904 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1905 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1906 | /* If the device has multiple interrupts, register for |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1907 | * them. Otherwise, only register for the one |
| 1908 | */ |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1909 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1910 | /* Install our interrupt handlers for Error, |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1911 | * Transmit, and Receive |
| 1912 | */ |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1913 | err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
| 1914 | gfar_irq(grp, ER)->name, grp); |
| 1915 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1916 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1917 | gfar_irq(grp, ER)->irq); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1918 | |
Julia Lawall | 2145f1a | 2010-08-05 10:26:20 +0000 | [diff] [blame] | 1919 | goto err_irq_fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1920 | } |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1921 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
| 1922 | gfar_irq(grp, TX)->name, grp); |
| 1923 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1924 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1925 | gfar_irq(grp, TX)->irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1926 | goto tx_irq_fail; |
| 1927 | } |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1928 | err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, |
| 1929 | gfar_irq(grp, RX)->name, grp); |
| 1930 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1931 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1932 | gfar_irq(grp, RX)->irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 | goto rx_irq_fail; |
| 1934 | } |
| 1935 | } else { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1936 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
| 1937 | gfar_irq(grp, TX)->name, grp); |
| 1938 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1939 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1940 | gfar_irq(grp, TX)->irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1941 | goto err_irq_fail; |
| 1942 | } |
| 1943 | } |
| 1944 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1945 | return 0; |
| 1946 | |
| 1947 | rx_irq_fail: |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1948 | free_irq(gfar_irq(grp, TX)->irq, grp); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1949 | tx_irq_fail: |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1950 | free_irq(gfar_irq(grp, ER)->irq, grp); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1951 | err_irq_fail: |
| 1952 | return err; |
| 1953 | |
| 1954 | } |
| 1955 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 1956 | static void gfar_free_irq(struct gfar_private *priv) |
| 1957 | { |
| 1958 | int i; |
| 1959 | |
| 1960 | /* Free the IRQs */ |
| 1961 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
| 1962 | for (i = 0; i < priv->num_grps; i++) |
| 1963 | free_grp_irqs(&priv->gfargrp[i]); |
| 1964 | } else { |
| 1965 | for (i = 0; i < priv->num_grps; i++) |
| 1966 | free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, |
| 1967 | &priv->gfargrp[i]); |
| 1968 | } |
| 1969 | } |
| 1970 | |
| 1971 | static int gfar_request_irq(struct gfar_private *priv) |
| 1972 | { |
| 1973 | int err, i, j; |
| 1974 | |
| 1975 | for (i = 0; i < priv->num_grps; i++) { |
| 1976 | err = register_grp_irqs(&priv->gfargrp[i]); |
| 1977 | if (err) { |
| 1978 | for (j = 0; j < i; j++) |
| 1979 | free_grp_irqs(&priv->gfargrp[j]); |
| 1980 | return err; |
| 1981 | } |
| 1982 | } |
| 1983 | |
| 1984 | return 0; |
| 1985 | } |
| 1986 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1987 | /* Bring the controller up and running */ |
| 1988 | int startup_gfar(struct net_device *ndev) |
| 1989 | { |
| 1990 | struct gfar_private *priv = netdev_priv(ndev); |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 1991 | int err; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1992 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1993 | gfar_mac_reset(priv); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1994 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1995 | err = gfar_alloc_skb_resources(ndev); |
| 1996 | if (err) |
| 1997 | return err; |
| 1998 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1999 | gfar_init_tx_rx_base(priv); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2000 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2001 | smp_mb__before_clear_bit(); |
| 2002 | clear_bit(GFAR_DOWN, &priv->state); |
| 2003 | smp_mb__after_clear_bit(); |
| 2004 | |
| 2005 | /* Start Rx/Tx DMA and enable the interrupts */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 2006 | gfar_start(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2007 | |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 2008 | phy_start(priv->phydev); |
| 2009 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2010 | enable_napi(priv); |
| 2011 | |
| 2012 | netif_tx_wake_all_queues(ndev); |
| 2013 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2014 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2015 | } |
| 2016 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2017 | /* Called when something needs to use the ethernet device |
| 2018 | * Returns 0 for success. |
| 2019 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2020 | static int gfar_enet_open(struct net_device *dev) |
| 2021 | { |
Li Yang | 94e8cc3 | 2007-10-12 21:53:51 +0800 | [diff] [blame] | 2022 | struct gfar_private *priv = netdev_priv(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | int err; |
| 2024 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 | err = init_phy(dev); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2026 | if (err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2027 | return err; |
| 2028 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 2029 | err = gfar_request_irq(priv); |
| 2030 | if (err) |
| 2031 | return err; |
| 2032 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2033 | err = startup_gfar(dev); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2034 | if (err) |
Anton Vorontsov | db0e8e3 | 2007-10-17 23:57:46 +0400 | [diff] [blame] | 2035 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2036 | |
Anton Vorontsov | 2884e5c | 2009-02-01 00:52:34 -0800 | [diff] [blame] | 2037 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
| 2038 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | return err; |
| 2040 | } |
| 2041 | |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2042 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2043 | { |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2044 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
Kumar Gala | 6c31d55 | 2009-04-28 08:04:10 -0700 | [diff] [blame] | 2045 | |
| 2046 | memset(fcb, 0, GMAC_FCB_LEN); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2047 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2048 | return fcb; |
| 2049 | } |
| 2050 | |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2051 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2052 | int fcb_length) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2053 | { |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2054 | /* If we're here, it's a IP packet with a TCP or UDP |
| 2055 | * payload. We set it to checksum, using a pseudo-header |
| 2056 | * we provide |
| 2057 | */ |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 2058 | u8 flags = TXFCB_DEFAULT; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2059 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2060 | /* Tell the controller what the protocol is |
| 2061 | * And provide the already calculated phcs |
| 2062 | */ |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 2063 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2064 | flags |= TXFCB_UDP; |
Arnaldo Carvalho de Melo | 4bedb45 | 2007-03-13 14:28:48 -0300 | [diff] [blame] | 2065 | fcb->phcs = udp_hdr(skb)->check; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2066 | } else |
Kumar Gala | 8da32de | 2007-06-29 00:12:04 -0500 | [diff] [blame] | 2067 | fcb->phcs = tcp_hdr(skb)->check; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2068 | |
| 2069 | /* l3os is the distance between the start of the |
| 2070 | * frame (skb->data) and the start of the IP hdr. |
| 2071 | * l4os is the distance between the start of the |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2072 | * l3 hdr and the l4 hdr |
| 2073 | */ |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2074 | fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); |
Arnaldo Carvalho de Melo | cfe1fc7 | 2007-03-16 17:26:39 -0300 | [diff] [blame] | 2075 | fcb->l4os = skb_network_header_len(skb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2076 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2077 | fcb->flags = flags; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2078 | } |
| 2079 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2080 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2081 | { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2082 | fcb->flags |= TXFCB_VLN; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2083 | fcb->vlctl = vlan_tx_tag_get(skb); |
| 2084 | } |
| 2085 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2086 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2087 | struct txbd8 *base, int ring_size) |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2088 | { |
| 2089 | struct txbd8 *new_bd = bdp + stride; |
| 2090 | |
| 2091 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; |
| 2092 | } |
| 2093 | |
| 2094 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2095 | int ring_size) |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2096 | { |
| 2097 | return skip_txbd(bdp, 1, base, ring_size); |
| 2098 | } |
| 2099 | |
Claudiu Manoil | 02d88fb | 2013-08-05 17:20:09 +0300 | [diff] [blame] | 2100 | /* eTSEC12: csum generation not supported for some fcb offsets */ |
| 2101 | static inline bool gfar_csum_errata_12(struct gfar_private *priv, |
| 2102 | unsigned long fcb_addr) |
| 2103 | { |
| 2104 | return (gfar_has_errata(priv, GFAR_ERRATA_12) && |
| 2105 | (fcb_addr % 0x20) > 0x18); |
| 2106 | } |
| 2107 | |
| 2108 | /* eTSEC76: csum generation for frames larger than 2500 may |
| 2109 | * cause excess delays before start of transmission |
| 2110 | */ |
| 2111 | static inline bool gfar_csum_errata_76(struct gfar_private *priv, |
| 2112 | unsigned int len) |
| 2113 | { |
| 2114 | return (gfar_has_errata(priv, GFAR_ERRATA_76) && |
| 2115 | (len > 2500)); |
| 2116 | } |
| 2117 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2118 | /* This is called by the kernel when a frame is ready for transmission. |
| 2119 | * It is pointed to by the dev->hard_start_xmit function pointer |
| 2120 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2121 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 2122 | { |
| 2123 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2124 | struct gfar_priv_tx_q *tx_queue = NULL; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2125 | struct netdev_queue *txq; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 2126 | struct gfar __iomem *regs = NULL; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2127 | struct txfcb *fcb = NULL; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2128 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
Dai Haruki | 5a5efed | 2008-12-16 15:34:50 -0800 | [diff] [blame] | 2129 | u32 lstatus; |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2130 | int i, rq = 0; |
| 2131 | int do_tstamp, do_csum, do_vlan; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2132 | u32 bufaddr; |
Andy Fleming | fef6108 | 2006-04-20 16:44:29 -0500 | [diff] [blame] | 2133 | unsigned long flags; |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2134 | unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2135 | |
| 2136 | rq = skb->queue_mapping; |
| 2137 | tx_queue = priv->tx_queue[rq]; |
| 2138 | txq = netdev_get_tx_queue(dev, rq); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2139 | base = tx_queue->tx_bd_base; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2140 | regs = tx_queue->grp->regs; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2141 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2142 | do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
| 2143 | do_vlan = vlan_tx_tag_present(skb); |
| 2144 | do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
| 2145 | priv->hwts_tx_en; |
| 2146 | |
| 2147 | if (do_csum || do_vlan) |
| 2148 | fcb_len = GMAC_FCB_LEN; |
| 2149 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2150 | /* check if time stamp should be generated */ |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2151 | if (unlikely(do_tstamp)) |
| 2152 | fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2153 | |
Li Yang | 5b28bea | 2009-03-27 15:54:30 -0700 | [diff] [blame] | 2154 | /* make space for additional header when fcb is needed */ |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2155 | if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2156 | struct sk_buff *skb_new; |
| 2157 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2158 | skb_new = skb_realloc_headroom(skb, fcb_len); |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2159 | if (!skb_new) { |
| 2160 | dev->stats.tx_errors++; |
David S. Miller | bd14ba8 | 2009-03-27 01:10:58 -0700 | [diff] [blame] | 2161 | kfree_skb(skb); |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2162 | return NETDEV_TX_OK; |
| 2163 | } |
Manfred Rudigier | db83d13 | 2012-01-09 23:26:50 +0000 | [diff] [blame] | 2164 | |
Eric Dumazet | 313b037 | 2012-07-05 11:45:13 +0000 | [diff] [blame] | 2165 | if (skb->sk) |
| 2166 | skb_set_owner_w(skb_new, skb->sk); |
| 2167 | consume_skb(skb); |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2168 | skb = skb_new; |
| 2169 | } |
| 2170 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2171 | /* total number of fragments in the SKB */ |
| 2172 | nr_frags = skb_shinfo(skb)->nr_frags; |
| 2173 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2174 | /* calculate the required number of TxBDs for this skb */ |
| 2175 | if (unlikely(do_tstamp)) |
| 2176 | nr_txbds = nr_frags + 2; |
| 2177 | else |
| 2178 | nr_txbds = nr_frags + 1; |
| 2179 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2180 | /* check if there is space to queue this packet */ |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2181 | if (nr_txbds > tx_queue->num_txbdfree) { |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2182 | /* no space, stop the queue */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2183 | netif_tx_stop_queue(txq); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2184 | dev->stats.tx_fifo_errors++; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2185 | return NETDEV_TX_BUSY; |
| 2186 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2187 | |
| 2188 | /* Update transmit stats */ |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2189 | bytes_sent = skb->len; |
| 2190 | tx_queue->stats.tx_bytes += bytes_sent; |
| 2191 | /* keep Tx bytes on wire for BQL accounting */ |
| 2192 | GFAR_CB(skb)->bytes_sent = bytes_sent; |
Eric Dumazet | 1ac9ad1 | 2011-01-12 12:13:14 +0000 | [diff] [blame] | 2193 | tx_queue->stats.tx_packets++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2194 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2195 | txbdp = txbdp_start = tx_queue->cur_tx; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2196 | lstatus = txbdp->lstatus; |
| 2197 | |
| 2198 | /* Time stamp insertion requires one additional TxBD */ |
| 2199 | if (unlikely(do_tstamp)) |
| 2200 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2201 | tx_queue->tx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2202 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2203 | if (nr_frags == 0) { |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2204 | if (unlikely(do_tstamp)) |
| 2205 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2206 | TXBD_INTERRUPT); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2207 | else |
| 2208 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2209 | } else { |
| 2210 | /* Place the fragment addresses and lengths into the TxBDs */ |
| 2211 | for (i = 0; i < nr_frags; i++) { |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2212 | unsigned int frag_len; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2213 | /* Point at the next BD, wrapping as needed */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2214 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2216 | frag_len = skb_shinfo(skb)->frags[i].size; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2217 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2218 | lstatus = txbdp->lstatus | frag_len | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2219 | BD_LFLAG(TXBD_READY); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2220 | |
| 2221 | /* Handle the last BD specially */ |
| 2222 | if (i == nr_frags - 1) |
| 2223 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
| 2224 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2225 | bufaddr = skb_frag_dma_map(priv->dev, |
Ian Campbell | 2234a72 | 2011-08-29 23:18:29 +0000 | [diff] [blame] | 2226 | &skb_shinfo(skb)->frags[i], |
| 2227 | 0, |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2228 | frag_len, |
Ian Campbell | 2234a72 | 2011-08-29 23:18:29 +0000 | [diff] [blame] | 2229 | DMA_TO_DEVICE); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2230 | |
| 2231 | /* set the TxBD length and buffer pointer */ |
| 2232 | txbdp->bufPtr = bufaddr; |
| 2233 | txbdp->lstatus = lstatus; |
| 2234 | } |
| 2235 | |
| 2236 | lstatus = txbdp_start->lstatus; |
| 2237 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 | |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2239 | /* Add TxPAL between FCB and frame if required */ |
| 2240 | if (unlikely(do_tstamp)) { |
| 2241 | skb_push(skb, GMAC_TXPAL_LEN); |
| 2242 | memset(skb->data, 0, GMAC_TXPAL_LEN); |
| 2243 | } |
| 2244 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2245 | /* Add TxFCB if required */ |
| 2246 | if (fcb_len) { |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2247 | fcb = gfar_add_fcb(skb); |
Claudiu Manoil | 02d88fb | 2013-08-05 17:20:09 +0300 | [diff] [blame] | 2248 | lstatus |= BD_LFLAG(TXBD_TOE); |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2249 | } |
| 2250 | |
| 2251 | /* Set up checksumming */ |
| 2252 | if (do_csum) { |
| 2253 | gfar_tx_checksum(skb, fcb, fcb_len); |
Claudiu Manoil | 02d88fb | 2013-08-05 17:20:09 +0300 | [diff] [blame] | 2254 | |
| 2255 | if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || |
| 2256 | unlikely(gfar_csum_errata_76(priv, skb->len))) { |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 2257 | __skb_pull(skb, GMAC_FCB_LEN); |
| 2258 | skb_checksum_help(skb); |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2259 | if (do_vlan || do_tstamp) { |
| 2260 | /* put back a new fcb for vlan/tstamp TOE */ |
| 2261 | fcb = gfar_add_fcb(skb); |
| 2262 | } else { |
| 2263 | /* Tx TOE not used */ |
| 2264 | lstatus &= ~(BD_LFLAG(TXBD_TOE)); |
| 2265 | fcb = NULL; |
| 2266 | } |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 2267 | } |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2268 | } |
| 2269 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2270 | if (do_vlan) |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2271 | gfar_tx_vlan(skb, fcb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2272 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2273 | /* Setup tx hardware time stamping if requested */ |
| 2274 | if (unlikely(do_tstamp)) { |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2275 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2276 | fcb->ptp = 1; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2277 | } |
| 2278 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2279 | txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2280 | skb_headlen(skb), DMA_TO_DEVICE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2281 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2282 | /* If time stamping is requested one additional TxBD must be set up. The |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2283 | * first TxBD points to the FCB and must have a data length of |
| 2284 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with |
| 2285 | * the full frame length. |
| 2286 | */ |
| 2287 | if (unlikely(do_tstamp)) { |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2288 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2289 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2290 | (skb_headlen(skb) - fcb_len); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2291 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
| 2292 | } else { |
| 2293 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
| 2294 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2295 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2296 | netdev_tx_sent_queue(txq, bytes_sent); |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2297 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2298 | /* We can work in parallel with gfar_clean_tx_ring(), except |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2299 | * when modifying num_txbdfree. Note that we didn't grab the lock |
| 2300 | * when we were reading the num_txbdfree and checking for available |
| 2301 | * space, that's because outside of this function it can only grow, |
| 2302 | * and once we've got needed space, it cannot suddenly disappear. |
| 2303 | * |
| 2304 | * The lock also protects us from gfar_error(), which can modify |
| 2305 | * regs->tstat and thus retrigger the transfers, which is why we |
| 2306 | * also must grab the lock before setting ready bit for the first |
| 2307 | * to be transmitted BD. |
| 2308 | */ |
| 2309 | spin_lock_irqsave(&tx_queue->txlock, flags); |
| 2310 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2311 | /* The powerpc-specific eieio() is used, as wmb() has too strong |
Scott Wood | 3b6330c | 2007-05-16 15:06:59 -0500 | [diff] [blame] | 2312 | * semantics (it requires synchronization between cacheable and |
| 2313 | * uncacheable mappings, which eieio doesn't provide and which we |
| 2314 | * don't need), thus requiring a more expensive sync instruction. At |
| 2315 | * some point, the set of architecture-independent barrier functions |
| 2316 | * should be expanded to include weaker barriers. |
| 2317 | */ |
Scott Wood | 3b6330c | 2007-05-16 15:06:59 -0500 | [diff] [blame] | 2318 | eieio(); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2319 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2320 | txbdp_start->lstatus = lstatus; |
| 2321 | |
Anton Vorontsov | 0eddba5 | 2010-03-03 08:18:58 +0000 | [diff] [blame] | 2322 | eieio(); /* force lstatus write before tx_skbuff */ |
| 2323 | |
| 2324 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
| 2325 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2326 | /* Update the current skb pointer to the next entry we will use |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2327 | * (wrapping if necessary) |
| 2328 | */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2329 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2330 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2331 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2332 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2333 | |
| 2334 | /* reduce TxBD free count */ |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2335 | tx_queue->num_txbdfree -= (nr_txbds); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2336 | |
| 2337 | /* If the next BD still needs to be cleaned up, then the bds |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2338 | * are full. We need to tell the kernel to stop sending us stuff. |
| 2339 | */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2340 | if (!tx_queue->num_txbdfree) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2341 | netif_tx_stop_queue(txq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2342 | |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 2343 | dev->stats.tx_fifo_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2344 | } |
| 2345 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2346 | /* Tell the DMA to go go go */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2347 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2348 | |
| 2349 | /* Unlock priv */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2350 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2351 | |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2352 | return NETDEV_TX_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2353 | } |
| 2354 | |
| 2355 | /* Stops the kernel queue, and halts the controller */ |
| 2356 | static int gfar_close(struct net_device *dev) |
| 2357 | { |
| 2358 | struct gfar_private *priv = netdev_priv(dev); |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2359 | |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2360 | cancel_work_sync(&priv->reset_task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2361 | stop_gfar(dev); |
| 2362 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 2363 | /* Disconnect from the PHY */ |
| 2364 | phy_disconnect(priv->phydev); |
| 2365 | priv->phydev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2366 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 2367 | gfar_free_irq(priv); |
| 2368 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2369 | return 0; |
| 2370 | } |
| 2371 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2372 | /* Changes the mac address if the controller is not running. */ |
Andy Fleming | f162b9d | 2008-05-02 13:00:30 -0500 | [diff] [blame] | 2373 | static int gfar_set_mac_address(struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2374 | { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2375 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2376 | |
| 2377 | return 0; |
| 2378 | } |
| 2379 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2380 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
| 2381 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2382 | struct gfar_private *priv = netdev_priv(dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2383 | int frame_size = new_mtu + ETH_HLEN; |
| 2384 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2385 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2386 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2387 | return -EINVAL; |
| 2388 | } |
| 2389 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2390 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
| 2391 | cpu_relax(); |
| 2392 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 2393 | if (dev->flags & IFF_UP) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2394 | stop_gfar(dev); |
| 2395 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2396 | dev->mtu = new_mtu; |
| 2397 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 2398 | if (dev->flags & IFF_UP) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2399 | startup_gfar(dev); |
| 2400 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2401 | clear_bit_unlock(GFAR_RESETTING, &priv->state); |
| 2402 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2403 | return 0; |
| 2404 | } |
| 2405 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2406 | void reset_gfar(struct net_device *ndev) |
| 2407 | { |
| 2408 | struct gfar_private *priv = netdev_priv(ndev); |
| 2409 | |
| 2410 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
| 2411 | cpu_relax(); |
| 2412 | |
| 2413 | stop_gfar(ndev); |
| 2414 | startup_gfar(ndev); |
| 2415 | |
| 2416 | clear_bit_unlock(GFAR_RESETTING, &priv->state); |
| 2417 | } |
| 2418 | |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2419 | /* gfar_reset_task gets scheduled when a packet has not been |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2420 | * transmitted after a set amount of time. |
| 2421 | * For now, assume that clearing out all the structures, and |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2422 | * starting over will fix the problem. |
| 2423 | */ |
| 2424 | static void gfar_reset_task(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2425 | { |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2426 | struct gfar_private *priv = container_of(work, struct gfar_private, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2427 | reset_task); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2428 | reset_gfar(priv->ndev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2429 | } |
| 2430 | |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2431 | static void gfar_timeout(struct net_device *dev) |
| 2432 | { |
| 2433 | struct gfar_private *priv = netdev_priv(dev); |
| 2434 | |
| 2435 | dev->stats.tx_errors++; |
| 2436 | schedule_work(&priv->reset_task); |
| 2437 | } |
| 2438 | |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2439 | static void gfar_align_skb(struct sk_buff *skb) |
| 2440 | { |
| 2441 | /* We need the data buffer to be aligned properly. We will reserve |
| 2442 | * as many bytes as needed to align the data properly |
| 2443 | */ |
| 2444 | skb_reserve(skb, RXBUF_ALIGNMENT - |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2445 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2446 | } |
| 2447 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2448 | /* Interrupt Handler for Transmit complete */ |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2449 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2450 | { |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2451 | struct net_device *dev = tx_queue->dev; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2452 | struct netdev_queue *txq; |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2453 | struct gfar_private *priv = netdev_priv(dev); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2454 | struct txbd8 *bdp, *next = NULL; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2455 | struct txbd8 *lbdp = NULL; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2456 | struct txbd8 *base = tx_queue->tx_bd_base; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2457 | struct sk_buff *skb; |
| 2458 | int skb_dirtytx; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2459 | int tx_ring_size = tx_queue->tx_ring_size; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2460 | int frags = 0, nr_txbds = 0; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2461 | int i; |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2462 | int howmany = 0; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2463 | int tqi = tx_queue->qindex; |
| 2464 | unsigned int bytes_sent = 0; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2465 | u32 lstatus; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2466 | size_t buflen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2467 | |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2468 | txq = netdev_get_tx_queue(dev, tqi); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2469 | bdp = tx_queue->dirty_tx; |
| 2470 | skb_dirtytx = tx_queue->skb_dirtytx; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2471 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2472 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2473 | unsigned long flags; |
| 2474 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2475 | frags = skb_shinfo(skb)->nr_frags; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2476 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2477 | /* When time stamping, one additional TxBD must be freed. |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2478 | * Also, we need to dma_unmap_single() the TxPAL. |
| 2479 | */ |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2480 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2481 | nr_txbds = frags + 2; |
| 2482 | else |
| 2483 | nr_txbds = frags + 1; |
| 2484 | |
| 2485 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2486 | |
| 2487 | lstatus = lbdp->lstatus; |
| 2488 | |
| 2489 | /* Only clean completed frames */ |
| 2490 | if ((lstatus & BD_LFLAG(TXBD_READY)) && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2491 | (lstatus & BD_LENGTH_MASK)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 | break; |
| 2493 | |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2494 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2495 | next = next_txbd(bdp, base, tx_ring_size); |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2496 | buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2497 | } else |
| 2498 | buflen = bdp->length; |
| 2499 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2500 | dma_unmap_single(priv->dev, bdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2501 | buflen, DMA_TO_DEVICE); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2502 | |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2503 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2504 | struct skb_shared_hwtstamps shhwtstamps; |
| 2505 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2506 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2507 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
| 2508 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2509 | skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2510 | skb_tstamp_tx(skb, &shhwtstamps); |
| 2511 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
| 2512 | bdp = next; |
| 2513 | } |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2514 | |
| 2515 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
| 2516 | bdp = next_txbd(bdp, base, tx_ring_size); |
| 2517 | |
| 2518 | for (i = 0; i < frags; i++) { |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2519 | dma_unmap_page(priv->dev, bdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2520 | bdp->length, DMA_TO_DEVICE); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2521 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
| 2522 | bdp = next_txbd(bdp, base, tx_ring_size); |
| 2523 | } |
| 2524 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2525 | bytes_sent += GFAR_CB(skb)->bytes_sent; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2526 | |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2527 | dev_kfree_skb_any(skb); |
Andy Fleming | 0fd56bb | 2009-02-04 16:43:16 -0800 | [diff] [blame] | 2528 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2529 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2530 | |
| 2531 | skb_dirtytx = (skb_dirtytx + 1) & |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2532 | TX_RING_MOD_MASK(tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2533 | |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2534 | howmany++; |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2535 | spin_lock_irqsave(&tx_queue->txlock, flags); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2536 | tx_queue->num_txbdfree += nr_txbds; |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2537 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2538 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2539 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2540 | /* If we freed a buffer, we can restart transmission, if necessary */ |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2541 | if (tx_queue->num_txbdfree && |
| 2542 | netif_tx_queue_stopped(txq) && |
| 2543 | !(test_bit(GFAR_DOWN, &priv->state))) |
| 2544 | netif_wake_subqueue(priv->ndev, tqi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2545 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2546 | /* Update dirty indicators */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2547 | tx_queue->skb_dirtytx = skb_dirtytx; |
| 2548 | tx_queue->dirty_tx = bdp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2549 | |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2550 | netdev_tx_completed_queue(txq, howmany, bytes_sent); |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2551 | } |
| 2552 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2553 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2554 | struct sk_buff *skb) |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2555 | { |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2556 | struct net_device *dev = rx_queue->dev; |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2557 | struct gfar_private *priv = netdev_priv(dev); |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 2558 | dma_addr_t buf; |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2559 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2560 | buf = dma_map_single(priv->dev, skb->data, |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 2561 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2562 | gfar_init_rxbdp(rx_queue, bdp, buf); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2563 | } |
| 2564 | |
Jan Ceuleers | 2281a0f | 2012-06-05 03:42:11 +0000 | [diff] [blame] | 2565 | static struct sk_buff *gfar_alloc_skb(struct net_device *dev) |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2566 | { |
| 2567 | struct gfar_private *priv = netdev_priv(dev); |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2568 | struct sk_buff *skb; |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2569 | |
| 2570 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
| 2571 | if (!skb) |
| 2572 | return NULL; |
| 2573 | |
| 2574 | gfar_align_skb(skb); |
| 2575 | |
| 2576 | return skb; |
| 2577 | } |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2578 | |
Jan Ceuleers | 2281a0f | 2012-06-05 03:42:11 +0000 | [diff] [blame] | 2579 | struct sk_buff *gfar_new_skb(struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2580 | { |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2581 | return gfar_alloc_skb(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2582 | } |
| 2583 | |
Li Yang | 298e1a9 | 2007-10-16 14:18:13 +0800 | [diff] [blame] | 2584 | static inline void count_errors(unsigned short status, struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2585 | { |
Li Yang | 298e1a9 | 2007-10-16 14:18:13 +0800 | [diff] [blame] | 2586 | struct gfar_private *priv = netdev_priv(dev); |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 2587 | struct net_device_stats *stats = &dev->stats; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2588 | struct gfar_extra_stats *estats = &priv->extra_stats; |
| 2589 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2590 | /* If the packet was truncated, none of the other errors matter */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2591 | if (status & RXBD_TRUNCATED) { |
| 2592 | stats->rx_length_errors++; |
| 2593 | |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2594 | atomic64_inc(&estats->rx_trunc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2595 | |
| 2596 | return; |
| 2597 | } |
| 2598 | /* Count the errors, if there were any */ |
| 2599 | if (status & (RXBD_LARGE | RXBD_SHORT)) { |
| 2600 | stats->rx_length_errors++; |
| 2601 | |
| 2602 | if (status & RXBD_LARGE) |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2603 | atomic64_inc(&estats->rx_large); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2604 | else |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2605 | atomic64_inc(&estats->rx_short); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2606 | } |
| 2607 | if (status & RXBD_NONOCTET) { |
| 2608 | stats->rx_frame_errors++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2609 | atomic64_inc(&estats->rx_nonoctet); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2610 | } |
| 2611 | if (status & RXBD_CRCERR) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2612 | atomic64_inc(&estats->rx_crcerr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2613 | stats->rx_crc_errors++; |
| 2614 | } |
| 2615 | if (status & RXBD_OVERRUN) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2616 | atomic64_inc(&estats->rx_overrun); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2617 | stats->rx_crc_errors++; |
| 2618 | } |
| 2619 | } |
| 2620 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 2621 | irqreturn_t gfar_receive(int irq, void *grp_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2622 | { |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2623 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
| 2624 | unsigned long flags; |
| 2625 | u32 imask; |
| 2626 | |
| 2627 | if (likely(napi_schedule_prep(&grp->napi_rx))) { |
| 2628 | spin_lock_irqsave(&grp->grplock, flags); |
| 2629 | imask = gfar_read(&grp->regs->imask); |
| 2630 | imask &= IMASK_RX_DISABLED; |
| 2631 | gfar_write(&grp->regs->imask, imask); |
| 2632 | spin_unlock_irqrestore(&grp->grplock, flags); |
| 2633 | __napi_schedule(&grp->napi_rx); |
| 2634 | } else { |
| 2635 | /* Clear IEVENT, so interrupts aren't called again |
| 2636 | * because of the packets that have already arrived. |
| 2637 | */ |
| 2638 | gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); |
| 2639 | } |
| 2640 | |
| 2641 | return IRQ_HANDLED; |
| 2642 | } |
| 2643 | |
| 2644 | /* Interrupt Handler for Transmit complete */ |
| 2645 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
| 2646 | { |
| 2647 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
| 2648 | unsigned long flags; |
| 2649 | u32 imask; |
| 2650 | |
| 2651 | if (likely(napi_schedule_prep(&grp->napi_tx))) { |
| 2652 | spin_lock_irqsave(&grp->grplock, flags); |
| 2653 | imask = gfar_read(&grp->regs->imask); |
| 2654 | imask &= IMASK_TX_DISABLED; |
| 2655 | gfar_write(&grp->regs->imask, imask); |
| 2656 | spin_unlock_irqrestore(&grp->grplock, flags); |
| 2657 | __napi_schedule(&grp->napi_tx); |
| 2658 | } else { |
| 2659 | /* Clear IEVENT, so interrupts aren't called again |
| 2660 | * because of the packets that have already arrived. |
| 2661 | */ |
| 2662 | gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); |
| 2663 | } |
| 2664 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2665 | return IRQ_HANDLED; |
| 2666 | } |
| 2667 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2668 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
| 2669 | { |
| 2670 | /* If valid headers were found, and valid sums |
| 2671 | * were verified, then we tell the kernel that no |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2672 | * checksumming is necessary. Otherwise, it is [FIXME] |
| 2673 | */ |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2674 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2675 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 2676 | else |
Eric Dumazet | bc8acf2 | 2010-09-02 13:07:41 -0700 | [diff] [blame] | 2677 | skb_checksum_none_assert(skb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2678 | } |
| 2679 | |
| 2680 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2681 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
Claudiu Manoil | 61db26c | 2013-02-14 05:00:05 +0000 | [diff] [blame] | 2682 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
| 2683 | int amount_pull, struct napi_struct *napi) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2684 | { |
| 2685 | struct gfar_private *priv = netdev_priv(dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2686 | struct rxfcb *fcb = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2687 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2688 | /* fcb is at the beginning if exists */ |
| 2689 | fcb = (struct rxfcb *)skb->data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2690 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2691 | /* Remove the FCB from the skb |
| 2692 | * Remove the padded bytes, if there are any |
| 2693 | */ |
Sandeep Gopalpet | f74dac0 | 2009-12-24 03:13:06 +0000 | [diff] [blame] | 2694 | if (amount_pull) { |
| 2695 | skb_record_rx_queue(skb, fcb->rq); |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2696 | skb_pull(skb, amount_pull); |
Sandeep Gopalpet | f74dac0 | 2009-12-24 03:13:06 +0000 | [diff] [blame] | 2697 | } |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2698 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 2699 | /* Get receive timestamp from the skb */ |
| 2700 | if (priv->hwts_rx_en) { |
| 2701 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
| 2702 | u64 *ns = (u64 *) skb->data; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2703 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 2704 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
| 2705 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); |
| 2706 | } |
| 2707 | |
| 2708 | if (priv->padding) |
| 2709 | skb_pull(skb, priv->padding); |
| 2710 | |
Michał Mirosław | 8b3afe9 | 2011-04-15 04:50:50 +0000 | [diff] [blame] | 2711 | if (dev->features & NETIF_F_RXCSUM) |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2712 | gfar_rx_checksum(skb, fcb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2713 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2714 | /* Tell the skb what kind of packet this is */ |
| 2715 | skb->protocol = eth_type_trans(skb, dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2716 | |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 2717 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
David S. Miller | 823dcd2 | 2011-08-20 10:39:12 -0700 | [diff] [blame] | 2718 | * Even if vlan rx accel is disabled, on some chips |
| 2719 | * RXFCB_VLN is pseudo randomly set. |
| 2720 | */ |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 2721 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && |
David S. Miller | 823dcd2 | 2011-08-20 10:39:12 -0700 | [diff] [blame] | 2722 | fcb->flags & RXFCB_VLN) |
David S. Miller | e5905c8 | 2013-04-22 19:24:19 -0400 | [diff] [blame] | 2723 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); |
Jiri Pirko | 87c288c | 2011-07-20 04:54:19 +0000 | [diff] [blame] | 2724 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2725 | /* Send the packet up the stack */ |
Claudiu Manoil | 953d276 | 2013-03-21 03:12:15 +0000 | [diff] [blame] | 2726 | napi_gro_receive(napi, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2727 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2728 | } |
| 2729 | |
| 2730 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring |
Jan Ceuleers | 2281a0f | 2012-06-05 03:42:11 +0000 | [diff] [blame] | 2731 | * until the budget/quota has been reached. Returns the number |
| 2732 | * of frames handled |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2733 | */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2734 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2735 | { |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2736 | struct net_device *dev = rx_queue->dev; |
Andy Fleming | 31de198 | 2008-12-16 15:33:40 -0800 | [diff] [blame] | 2737 | struct rxbd8 *bdp, *base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2738 | struct sk_buff *skb; |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2739 | int pkt_len; |
| 2740 | int amount_pull; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2741 | int howmany = 0; |
| 2742 | struct gfar_private *priv = netdev_priv(dev); |
| 2743 | |
| 2744 | /* Get the first full descriptor */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2745 | bdp = rx_queue->cur_rx; |
| 2746 | base = rx_queue->rx_bd_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2747 | |
Claudiu Manoil | ba77971 | 2013-02-14 05:00:07 +0000 | [diff] [blame] | 2748 | amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2749 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2750 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2751 | struct sk_buff *newskb; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2752 | |
Scott Wood | 3b6330c | 2007-05-16 15:06:59 -0500 | [diff] [blame] | 2753 | rmb(); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2754 | |
| 2755 | /* Add another skb for the future */ |
| 2756 | newskb = gfar_new_skb(dev); |
| 2757 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2758 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2759 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2760 | dma_unmap_single(priv->dev, bdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2761 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
Andy Fleming | 8118305 | 2008-11-12 10:07:11 -0600 | [diff] [blame] | 2762 | |
Anton Vorontsov | 63b88b9 | 2010-06-11 10:51:03 +0000 | [diff] [blame] | 2763 | if (unlikely(!(bdp->status & RXBD_ERR) && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2764 | bdp->length > priv->rx_buffer_size)) |
Anton Vorontsov | 63b88b9 | 2010-06-11 10:51:03 +0000 | [diff] [blame] | 2765 | bdp->status = RXBD_LARGE; |
| 2766 | |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2767 | /* We drop the frame if we failed to allocate a new buffer */ |
| 2768 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2769 | bdp->status & RXBD_ERR)) { |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2770 | count_errors(bdp->status, dev); |
| 2771 | |
| 2772 | if (unlikely(!newskb)) |
| 2773 | newskb = skb; |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2774 | else if (skb) |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2775 | dev_kfree_skb(skb); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2776 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2777 | /* Increment the number of packets */ |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 2778 | rx_queue->stats.rx_packets++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2779 | howmany++; |
| 2780 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2781 | if (likely(skb)) { |
| 2782 | pkt_len = bdp->length - ETH_FCS_LEN; |
| 2783 | /* Remove the FCS from the packet length */ |
| 2784 | skb_put(skb, pkt_len); |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 2785 | rx_queue->stats.rx_bytes += pkt_len; |
Sandeep Gopalpet | f74dac0 | 2009-12-24 03:13:06 +0000 | [diff] [blame] | 2786 | skb_record_rx_queue(skb, rx_queue->qindex); |
Wu Jiajun-B06378 | cd754a5 | 2012-04-19 22:54:35 +0000 | [diff] [blame] | 2787 | gfar_process_frame(dev, skb, amount_pull, |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2788 | &rx_queue->grp->napi_rx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2789 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2790 | } else { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2791 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 2792 | rx_queue->stats.rx_dropped++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2793 | atomic64_inc(&priv->extra_stats.rx_skbmissing); |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2794 | } |
| 2795 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2796 | } |
| 2797 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2798 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2799 | |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2800 | /* Setup the new bdp */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2801 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2802 | |
| 2803 | /* Update to the next pointer */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2804 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2805 | |
| 2806 | /* update to point at the next skb */ |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2807 | rx_queue->skb_currx = (rx_queue->skb_currx + 1) & |
| 2808 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2809 | } |
| 2810 | |
| 2811 | /* Update the current rxbd pointer to be the next one */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2812 | rx_queue->cur_rx = bdp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2813 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2814 | return howmany; |
| 2815 | } |
| 2816 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2817 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2818 | { |
| 2819 | struct gfar_priv_grp *gfargrp = |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2820 | container_of(napi, struct gfar_priv_grp, napi_rx); |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2821 | struct gfar __iomem *regs = gfargrp->regs; |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2822 | struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; |
| 2823 | int work_done = 0; |
| 2824 | |
| 2825 | /* Clear IEVENT, so interrupts aren't called again |
| 2826 | * because of the packets that have already arrived |
| 2827 | */ |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2828 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2829 | |
| 2830 | work_done = gfar_clean_rx_ring(rx_queue, budget); |
| 2831 | |
| 2832 | if (work_done < budget) { |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2833 | u32 imask; |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2834 | napi_complete(napi); |
| 2835 | /* Clear the halt bit in RSTAT */ |
| 2836 | gfar_write(®s->rstat, gfargrp->rstat); |
| 2837 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2838 | spin_lock_irq(&gfargrp->grplock); |
| 2839 | imask = gfar_read(®s->imask); |
| 2840 | imask |= IMASK_RX_DEFAULT; |
| 2841 | gfar_write(®s->imask, imask); |
| 2842 | spin_unlock_irq(&gfargrp->grplock); |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2843 | } |
| 2844 | |
| 2845 | return work_done; |
| 2846 | } |
| 2847 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2848 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2849 | { |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2850 | struct gfar_priv_grp *gfargrp = |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2851 | container_of(napi, struct gfar_priv_grp, napi_tx); |
| 2852 | struct gfar __iomem *regs = gfargrp->regs; |
| 2853 | struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; |
| 2854 | u32 imask; |
| 2855 | |
| 2856 | /* Clear IEVENT, so interrupts aren't called again |
| 2857 | * because of the packets that have already arrived |
| 2858 | */ |
| 2859 | gfar_write(®s->ievent, IEVENT_TX_MASK); |
| 2860 | |
| 2861 | /* run Tx cleanup to completion */ |
| 2862 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) |
| 2863 | gfar_clean_tx_ring(tx_queue); |
| 2864 | |
| 2865 | napi_complete(napi); |
| 2866 | |
| 2867 | spin_lock_irq(&gfargrp->grplock); |
| 2868 | imask = gfar_read(®s->imask); |
| 2869 | imask |= IMASK_TX_DEFAULT; |
| 2870 | gfar_write(®s->imask, imask); |
| 2871 | spin_unlock_irq(&gfargrp->grplock); |
| 2872 | |
| 2873 | return 0; |
| 2874 | } |
| 2875 | |
| 2876 | static int gfar_poll_rx(struct napi_struct *napi, int budget) |
| 2877 | { |
| 2878 | struct gfar_priv_grp *gfargrp = |
| 2879 | container_of(napi, struct gfar_priv_grp, napi_rx); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2880 | struct gfar_private *priv = gfargrp->priv; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2881 | struct gfar __iomem *regs = gfargrp->regs; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2882 | struct gfar_priv_rx_q *rx_queue = NULL; |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2883 | int work_done = 0, work_done_per_q = 0; |
Claudiu Manoil | 39c0a0d | 2013-03-21 03:12:13 +0000 | [diff] [blame] | 2884 | int i, budget_per_q = 0; |
Claudiu Manoil | 6be5ed3 | 2013-03-19 07:40:03 +0000 | [diff] [blame] | 2885 | unsigned long rstat_rxf; |
| 2886 | int num_act_queues; |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2887 | |
Dai Haruki | 8c7396a | 2008-12-17 16:52:00 -0800 | [diff] [blame] | 2888 | /* Clear IEVENT, so interrupts aren't called again |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2889 | * because of the packets that have already arrived |
| 2890 | */ |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2891 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
Dai Haruki | 8c7396a | 2008-12-17 16:52:00 -0800 | [diff] [blame] | 2892 | |
Claudiu Manoil | 6be5ed3 | 2013-03-19 07:40:03 +0000 | [diff] [blame] | 2893 | rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; |
| 2894 | |
| 2895 | num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); |
| 2896 | if (num_act_queues) |
| 2897 | budget_per_q = budget/num_act_queues; |
| 2898 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2899 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
| 2900 | /* skip queue if not active */ |
| 2901 | if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) |
| 2902 | continue; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2903 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2904 | rx_queue = priv->rx_queue[i]; |
| 2905 | work_done_per_q = |
| 2906 | gfar_clean_rx_ring(rx_queue, budget_per_q); |
| 2907 | work_done += work_done_per_q; |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2908 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2909 | /* finished processing this queue */ |
| 2910 | if (work_done_per_q < budget_per_q) { |
| 2911 | /* clear active queue hw indication */ |
| 2912 | gfar_write(®s->rstat, |
| 2913 | RSTAT_CLEAR_RXF0 >> i); |
| 2914 | num_act_queues--; |
Claudiu Manoil | 6be5ed3 | 2013-03-19 07:40:03 +0000 | [diff] [blame] | 2915 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2916 | if (!num_act_queues) |
| 2917 | break; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2918 | } |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2919 | } |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2920 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2921 | if (!num_act_queues) { |
| 2922 | u32 imask; |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2923 | napi_complete(napi); |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2924 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2925 | /* Clear the halt bit in RSTAT */ |
| 2926 | gfar_write(®s->rstat, gfargrp->rstat); |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2927 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2928 | spin_lock_irq(&gfargrp->grplock); |
| 2929 | imask = gfar_read(®s->imask); |
| 2930 | imask |= IMASK_RX_DEFAULT; |
| 2931 | gfar_write(®s->imask, imask); |
| 2932 | spin_unlock_irq(&gfargrp->grplock); |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2933 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2934 | |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2935 | return work_done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2936 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2937 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame^] | 2938 | static int gfar_poll_tx(struct napi_struct *napi, int budget) |
| 2939 | { |
| 2940 | struct gfar_priv_grp *gfargrp = |
| 2941 | container_of(napi, struct gfar_priv_grp, napi_tx); |
| 2942 | struct gfar_private *priv = gfargrp->priv; |
| 2943 | struct gfar __iomem *regs = gfargrp->regs; |
| 2944 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 2945 | int has_tx_work = 0; |
| 2946 | int i; |
| 2947 | |
| 2948 | /* Clear IEVENT, so interrupts aren't called again |
| 2949 | * because of the packets that have already arrived |
| 2950 | */ |
| 2951 | gfar_write(®s->ievent, IEVENT_TX_MASK); |
| 2952 | |
| 2953 | for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { |
| 2954 | tx_queue = priv->tx_queue[i]; |
| 2955 | /* run Tx cleanup to completion */ |
| 2956 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { |
| 2957 | gfar_clean_tx_ring(tx_queue); |
| 2958 | has_tx_work = 1; |
| 2959 | } |
| 2960 | } |
| 2961 | |
| 2962 | if (!has_tx_work) { |
| 2963 | u32 imask; |
| 2964 | napi_complete(napi); |
| 2965 | |
| 2966 | spin_lock_irq(&gfargrp->grplock); |
| 2967 | imask = gfar_read(®s->imask); |
| 2968 | imask |= IMASK_TX_DEFAULT; |
| 2969 | gfar_write(®s->imask, imask); |
| 2970 | spin_unlock_irq(&gfargrp->grplock); |
| 2971 | } |
| 2972 | |
| 2973 | return 0; |
| 2974 | } |
| 2975 | |
| 2976 | |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 2977 | #ifdef CONFIG_NET_POLL_CONTROLLER |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2978 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 2979 | * without having to re-enable interrupts. It's not called while |
| 2980 | * the interrupt routine is executing. |
| 2981 | */ |
| 2982 | static void gfar_netpoll(struct net_device *dev) |
| 2983 | { |
| 2984 | struct gfar_private *priv = netdev_priv(dev); |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 2985 | int i; |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 2986 | |
| 2987 | /* If the device has multiple interrupts, run tx/rx */ |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 2988 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2989 | for (i = 0; i < priv->num_grps; i++) { |
Paul Gortmaker | 62ed839 | 2013-02-24 05:38:31 +0000 | [diff] [blame] | 2990 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
| 2991 | |
| 2992 | disable_irq(gfar_irq(grp, TX)->irq); |
| 2993 | disable_irq(gfar_irq(grp, RX)->irq); |
| 2994 | disable_irq(gfar_irq(grp, ER)->irq); |
| 2995 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
| 2996 | enable_irq(gfar_irq(grp, ER)->irq); |
| 2997 | enable_irq(gfar_irq(grp, RX)->irq); |
| 2998 | enable_irq(gfar_irq(grp, TX)->irq); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2999 | } |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 3000 | } else { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3001 | for (i = 0; i < priv->num_grps; i++) { |
Paul Gortmaker | 62ed839 | 2013-02-24 05:38:31 +0000 | [diff] [blame] | 3002 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
| 3003 | |
| 3004 | disable_irq(gfar_irq(grp, TX)->irq); |
| 3005 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
| 3006 | enable_irq(gfar_irq(grp, TX)->irq); |
Anton Vorontsov | 43de004 | 2009-12-09 02:52:19 -0800 | [diff] [blame] | 3007 | } |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 3008 | } |
| 3009 | } |
| 3010 | #endif |
| 3011 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3012 | /* The interrupt handler for devices with one interrupt */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3013 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3014 | { |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3015 | struct gfar_priv_grp *gfargrp = grp_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3016 | |
| 3017 | /* Save ievent for future reference */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3018 | u32 events = gfar_read(&gfargrp->regs->ievent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3019 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3020 | /* Check for reception */ |
Sergei Shtylyov | 538cc7e | 2007-02-15 17:56:01 +0400 | [diff] [blame] | 3021 | if (events & IEVENT_RX_MASK) |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3022 | gfar_receive(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3023 | |
| 3024 | /* Check for transmit completion */ |
Sergei Shtylyov | 538cc7e | 2007-02-15 17:56:01 +0400 | [diff] [blame] | 3025 | if (events & IEVENT_TX_MASK) |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3026 | gfar_transmit(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3027 | |
Sergei Shtylyov | 538cc7e | 2007-02-15 17:56:01 +0400 | [diff] [blame] | 3028 | /* Check for errors */ |
| 3029 | if (events & IEVENT_ERR_MASK) |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3030 | gfar_error(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3031 | |
| 3032 | return IRQ_HANDLED; |
| 3033 | } |
| 3034 | |
Claudiu Manoil | 23402bd | 2013-08-12 13:53:26 +0300 | [diff] [blame] | 3035 | static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
| 3036 | { |
| 3037 | struct phy_device *phydev = priv->phydev; |
| 3038 | u32 val = 0; |
| 3039 | |
| 3040 | if (!phydev->duplex) |
| 3041 | return val; |
| 3042 | |
| 3043 | if (!priv->pause_aneg_en) { |
| 3044 | if (priv->tx_pause_en) |
| 3045 | val |= MACCFG1_TX_FLOW; |
| 3046 | if (priv->rx_pause_en) |
| 3047 | val |= MACCFG1_RX_FLOW; |
| 3048 | } else { |
| 3049 | u16 lcl_adv, rmt_adv; |
| 3050 | u8 flowctrl; |
| 3051 | /* get link partner capabilities */ |
| 3052 | rmt_adv = 0; |
| 3053 | if (phydev->pause) |
| 3054 | rmt_adv = LPA_PAUSE_CAP; |
| 3055 | if (phydev->asym_pause) |
| 3056 | rmt_adv |= LPA_PAUSE_ASYM; |
| 3057 | |
| 3058 | lcl_adv = mii_advertise_flowctrl(phydev->advertising); |
| 3059 | |
| 3060 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); |
| 3061 | if (flowctrl & FLOW_CTRL_TX) |
| 3062 | val |= MACCFG1_TX_FLOW; |
| 3063 | if (flowctrl & FLOW_CTRL_RX) |
| 3064 | val |= MACCFG1_RX_FLOW; |
| 3065 | } |
| 3066 | |
| 3067 | return val; |
| 3068 | } |
| 3069 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3070 | /* Called every time the controller might need to be made |
| 3071 | * aware of new link state. The PHY code conveys this |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3072 | * information through variables in the phydev structure, and this |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3073 | * function converts those variables into the appropriate |
| 3074 | * register values, and can bring down the device if needed. |
| 3075 | */ |
| 3076 | static void adjust_link(struct net_device *dev) |
| 3077 | { |
| 3078 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3079 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3080 | struct phy_device *phydev = priv->phydev; |
| 3081 | int new_state = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3082 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 3083 | if (test_bit(GFAR_RESETTING, &priv->state)) |
| 3084 | return; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 3085 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3086 | if (phydev->link) { |
Claudiu Manoil | 23402bd | 2013-08-12 13:53:26 +0300 | [diff] [blame] | 3087 | u32 tempval1 = gfar_read(®s->maccfg1); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3088 | u32 tempval = gfar_read(®s->maccfg2); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3089 | u32 ecntrl = gfar_read(®s->ecntrl); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3090 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3091 | /* Now we make sure that we can be in full duplex mode. |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3092 | * If not, we operate in half-duplex mode. |
| 3093 | */ |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3094 | if (phydev->duplex != priv->oldduplex) { |
| 3095 | new_state = 1; |
| 3096 | if (!(phydev->duplex)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3097 | tempval &= ~(MACCFG2_FULL_DUPLEX); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3098 | else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3099 | tempval |= MACCFG2_FULL_DUPLEX; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3100 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3101 | priv->oldduplex = phydev->duplex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3102 | } |
| 3103 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3104 | if (phydev->speed != priv->oldspeed) { |
| 3105 | new_state = 1; |
| 3106 | switch (phydev->speed) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3107 | case 1000: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3108 | tempval = |
| 3109 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); |
Li Yang | f430e49 | 2009-01-06 14:08:10 -0800 | [diff] [blame] | 3110 | |
| 3111 | ecntrl &= ~(ECNTRL_R100); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3112 | break; |
| 3113 | case 100: |
| 3114 | case 10: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3115 | tempval = |
| 3116 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3117 | |
| 3118 | /* Reduced mode distinguishes |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3119 | * between 10 and 100 |
| 3120 | */ |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3121 | if (phydev->speed == SPEED_100) |
| 3122 | ecntrl |= ECNTRL_R100; |
| 3123 | else |
| 3124 | ecntrl &= ~(ECNTRL_R100); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3125 | break; |
| 3126 | default: |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3127 | netif_warn(priv, link, dev, |
| 3128 | "Ack! Speed (%d) is not 10/100/1000!\n", |
| 3129 | phydev->speed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3130 | break; |
| 3131 | } |
| 3132 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3133 | priv->oldspeed = phydev->speed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3134 | } |
| 3135 | |
Claudiu Manoil | 23402bd | 2013-08-12 13:53:26 +0300 | [diff] [blame] | 3136 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
| 3137 | tempval1 |= gfar_get_flowctrl_cfg(priv); |
| 3138 | |
| 3139 | gfar_write(®s->maccfg1, tempval1); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3140 | gfar_write(®s->maccfg2, tempval); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3141 | gfar_write(®s->ecntrl, ecntrl); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3142 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3143 | if (!priv->oldlink) { |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3144 | new_state = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3145 | priv->oldlink = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3146 | } |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3147 | } else if (priv->oldlink) { |
| 3148 | new_state = 1; |
| 3149 | priv->oldlink = 0; |
| 3150 | priv->oldspeed = 0; |
| 3151 | priv->oldduplex = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3152 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3153 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3154 | if (new_state && netif_msg_link(priv)) |
| 3155 | phy_print_status(phydev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3156 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3157 | |
| 3158 | /* Update the hash table based on the current list of multicast |
| 3159 | * addresses we subscribe to. Also, change the promiscuity of |
| 3160 | * the device based on the flags (this function is called |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3161 | * whenever dev->flags is changed |
| 3162 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3163 | static void gfar_set_multi(struct net_device *dev) |
| 3164 | { |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3165 | struct netdev_hw_addr *ha; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3166 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3167 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3168 | u32 tempval; |
| 3169 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 3170 | if (dev->flags & IFF_PROMISC) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3171 | /* Set RCTRL to PROM */ |
| 3172 | tempval = gfar_read(®s->rctrl); |
| 3173 | tempval |= RCTRL_PROM; |
| 3174 | gfar_write(®s->rctrl, tempval); |
| 3175 | } else { |
| 3176 | /* Set RCTRL to not PROM */ |
| 3177 | tempval = gfar_read(®s->rctrl); |
| 3178 | tempval &= ~(RCTRL_PROM); |
| 3179 | gfar_write(®s->rctrl, tempval); |
| 3180 | } |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 3181 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 3182 | if (dev->flags & IFF_ALLMULTI) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3183 | /* Set the hash to rx all multicast frames */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3184 | gfar_write(®s->igaddr0, 0xffffffff); |
| 3185 | gfar_write(®s->igaddr1, 0xffffffff); |
| 3186 | gfar_write(®s->igaddr2, 0xffffffff); |
| 3187 | gfar_write(®s->igaddr3, 0xffffffff); |
| 3188 | gfar_write(®s->igaddr4, 0xffffffff); |
| 3189 | gfar_write(®s->igaddr5, 0xffffffff); |
| 3190 | gfar_write(®s->igaddr6, 0xffffffff); |
| 3191 | gfar_write(®s->igaddr7, 0xffffffff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3192 | gfar_write(®s->gaddr0, 0xffffffff); |
| 3193 | gfar_write(®s->gaddr1, 0xffffffff); |
| 3194 | gfar_write(®s->gaddr2, 0xffffffff); |
| 3195 | gfar_write(®s->gaddr3, 0xffffffff); |
| 3196 | gfar_write(®s->gaddr4, 0xffffffff); |
| 3197 | gfar_write(®s->gaddr5, 0xffffffff); |
| 3198 | gfar_write(®s->gaddr6, 0xffffffff); |
| 3199 | gfar_write(®s->gaddr7, 0xffffffff); |
| 3200 | } else { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3201 | int em_num; |
| 3202 | int idx; |
| 3203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3204 | /* zero out the hash */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3205 | gfar_write(®s->igaddr0, 0x0); |
| 3206 | gfar_write(®s->igaddr1, 0x0); |
| 3207 | gfar_write(®s->igaddr2, 0x0); |
| 3208 | gfar_write(®s->igaddr3, 0x0); |
| 3209 | gfar_write(®s->igaddr4, 0x0); |
| 3210 | gfar_write(®s->igaddr5, 0x0); |
| 3211 | gfar_write(®s->igaddr6, 0x0); |
| 3212 | gfar_write(®s->igaddr7, 0x0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3213 | gfar_write(®s->gaddr0, 0x0); |
| 3214 | gfar_write(®s->gaddr1, 0x0); |
| 3215 | gfar_write(®s->gaddr2, 0x0); |
| 3216 | gfar_write(®s->gaddr3, 0x0); |
| 3217 | gfar_write(®s->gaddr4, 0x0); |
| 3218 | gfar_write(®s->gaddr5, 0x0); |
| 3219 | gfar_write(®s->gaddr6, 0x0); |
| 3220 | gfar_write(®s->gaddr7, 0x0); |
| 3221 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3222 | /* If we have extended hash tables, we need to |
| 3223 | * clear the exact match registers to prepare for |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3224 | * setting them |
| 3225 | */ |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3226 | if (priv->extended_hash) { |
| 3227 | em_num = GFAR_EM_NUM + 1; |
| 3228 | gfar_clear_exact_match(dev); |
| 3229 | idx = 1; |
| 3230 | } else { |
| 3231 | idx = 0; |
| 3232 | em_num = 0; |
| 3233 | } |
| 3234 | |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 3235 | if (netdev_mc_empty(dev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3236 | return; |
| 3237 | |
| 3238 | /* Parse the list, and set the appropriate bits */ |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3239 | netdev_for_each_mc_addr(ha, dev) { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3240 | if (idx < em_num) { |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3241 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3242 | idx++; |
| 3243 | } else |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3244 | gfar_set_hash_for_addr(dev, ha->addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3245 | } |
| 3246 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3247 | } |
| 3248 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3249 | |
| 3250 | /* Clears each of the exact match registers to zero, so they |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3251 | * don't interfere with normal reception |
| 3252 | */ |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3253 | static void gfar_clear_exact_match(struct net_device *dev) |
| 3254 | { |
| 3255 | int idx; |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 3256 | static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3257 | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 3258 | for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
Joe Perches | b6bc765 | 2010-12-21 02:16:08 -0800 | [diff] [blame] | 3259 | gfar_set_mac_for_addr(dev, idx, zero_arr); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3260 | } |
| 3261 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3262 | /* Set the appropriate hash bit for the given addr */ |
| 3263 | /* The algorithm works like so: |
| 3264 | * 1) Take the Destination Address (ie the multicast address), and |
| 3265 | * do a CRC on it (little endian), and reverse the bits of the |
| 3266 | * result. |
| 3267 | * 2) Use the 8 most significant bits as a hash into a 256-entry |
| 3268 | * table. The table is controlled through 8 32-bit registers: |
| 3269 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is |
| 3270 | * gaddr7. This means that the 3 most significant bits in the |
| 3271 | * hash index which gaddr register to use, and the 5 other bits |
| 3272 | * indicate which bit (assuming an IBM numbering scheme, which |
| 3273 | * for PowerPC (tm) is usually the case) in the register holds |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3274 | * the entry. |
| 3275 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3276 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
| 3277 | { |
| 3278 | u32 tempval; |
| 3279 | struct gfar_private *priv = netdev_priv(dev); |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 3280 | u32 result = ether_crc(ETH_ALEN, addr); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3281 | int width = priv->hash_width; |
| 3282 | u8 whichbit = (result >> (32 - width)) & 0x1f; |
| 3283 | u8 whichreg = result >> (32 - width + 5); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3284 | u32 value = (1 << (31-whichbit)); |
| 3285 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3286 | tempval = gfar_read(priv->hash_regs[whichreg]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3287 | tempval |= value; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3288 | gfar_write(priv->hash_regs[whichreg], tempval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3289 | } |
| 3290 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3291 | |
| 3292 | /* There are multiple MAC Address register pairs on some controllers |
| 3293 | * This function sets the numth pair to a given address |
| 3294 | */ |
Joe Perches | b6bc765 | 2010-12-21 02:16:08 -0800 | [diff] [blame] | 3295 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
| 3296 | const u8 *addr) |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3297 | { |
| 3298 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3299 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3300 | int idx; |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 3301 | char tmpbuf[ETH_ALEN]; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3302 | u32 tempval; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3303 | u32 __iomem *macptr = ®s->macstnaddr1; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3304 | |
| 3305 | macptr += num*2; |
| 3306 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3307 | /* Now copy it into the mac registers backwards, cuz |
| 3308 | * little endian is silly |
| 3309 | */ |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 3310 | for (idx = 0; idx < ETH_ALEN; idx++) |
| 3311 | tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3312 | |
| 3313 | gfar_write(macptr, *((u32 *) (tmpbuf))); |
| 3314 | |
| 3315 | tempval = *((u32 *) (tmpbuf + 4)); |
| 3316 | |
| 3317 | gfar_write(macptr+1, tempval); |
| 3318 | } |
| 3319 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3320 | /* GFAR error interrupt handler */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3321 | static irqreturn_t gfar_error(int irq, void *grp_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3322 | { |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3323 | struct gfar_priv_grp *gfargrp = grp_id; |
| 3324 | struct gfar __iomem *regs = gfargrp->regs; |
| 3325 | struct gfar_private *priv= gfargrp->priv; |
| 3326 | struct net_device *dev = priv->ndev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3327 | |
| 3328 | /* Save ievent for future reference */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3329 | u32 events = gfar_read(®s->ievent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3330 | |
| 3331 | /* Clear IEVENT */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3332 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 3333 | |
| 3334 | /* Magic Packet is not an error. */ |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3335 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 3336 | (events & IEVENT_MAG)) |
| 3337 | events &= ~IEVENT_MAG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3338 | |
| 3339 | /* Hmm... */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3340 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 3341 | netdev_dbg(dev, |
| 3342 | "error interrupt (ievent=0x%08x imask=0x%08x)\n", |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3343 | events, gfar_read(®s->imask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3344 | |
| 3345 | /* Update the error counters */ |
| 3346 | if (events & IEVENT_TXE) { |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3347 | dev->stats.tx_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3348 | |
| 3349 | if (events & IEVENT_LC) |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3350 | dev->stats.tx_window_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3351 | if (events & IEVENT_CRL) |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3352 | dev->stats.tx_aborted_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3353 | if (events & IEVENT_XFUN) { |
Anton Vorontsov | 836cf7f | 2009-11-10 14:11:08 +0000 | [diff] [blame] | 3354 | unsigned long flags; |
| 3355 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3356 | netif_dbg(priv, tx_err, dev, |
| 3357 | "TX FIFO underrun, packet dropped\n"); |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3358 | dev->stats.tx_dropped++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3359 | atomic64_inc(&priv->extra_stats.tx_underrun); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3360 | |
Anton Vorontsov | 836cf7f | 2009-11-10 14:11:08 +0000 | [diff] [blame] | 3361 | local_irq_save(flags); |
| 3362 | lock_tx_qs(priv); |
| 3363 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3364 | /* Reactivate the Tx Queues */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 3365 | gfar_write(®s->tstat, gfargrp->tstat); |
Anton Vorontsov | 836cf7f | 2009-11-10 14:11:08 +0000 | [diff] [blame] | 3366 | |
| 3367 | unlock_tx_qs(priv); |
| 3368 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3369 | } |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3370 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3371 | } |
| 3372 | if (events & IEVENT_BSY) { |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3373 | dev->stats.rx_errors++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3374 | atomic64_inc(&priv->extra_stats.rx_bsy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3375 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3376 | gfar_receive(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3377 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3378 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
| 3379 | gfar_read(®s->rstat)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3380 | } |
| 3381 | if (events & IEVENT_BABR) { |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3382 | dev->stats.rx_errors++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3383 | atomic64_inc(&priv->extra_stats.rx_babr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3384 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3385 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3386 | } |
| 3387 | if (events & IEVENT_EBERR) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3388 | atomic64_inc(&priv->extra_stats.eberr); |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3389 | netif_dbg(priv, rx_err, dev, "bus error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3390 | } |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3391 | if (events & IEVENT_RXC) |
| 3392 | netif_dbg(priv, rx_status, dev, "control frame\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3393 | |
| 3394 | if (events & IEVENT_BABT) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3395 | atomic64_inc(&priv->extra_stats.tx_babt); |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3396 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3397 | } |
| 3398 | return IRQ_HANDLED; |
| 3399 | } |
| 3400 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3401 | static struct of_device_id gfar_match[] = |
| 3402 | { |
| 3403 | { |
| 3404 | .type = "network", |
| 3405 | .compatible = "gianfar", |
| 3406 | }, |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3407 | { |
| 3408 | .compatible = "fsl,etsec2", |
| 3409 | }, |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3410 | {}, |
| 3411 | }; |
Anton Vorontsov | e72701a | 2009-10-14 14:54:52 -0700 | [diff] [blame] | 3412 | MODULE_DEVICE_TABLE(of, gfar_match); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3413 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3414 | /* Structure for a device driver */ |
Grant Likely | 7488876 | 2011-02-22 21:05:51 -0700 | [diff] [blame] | 3415 | static struct platform_driver gfar_driver = { |
Grant Likely | 4018294 | 2010-04-13 16:13:02 -0700 | [diff] [blame] | 3416 | .driver = { |
| 3417 | .name = "fsl-gianfar", |
| 3418 | .owner = THIS_MODULE, |
| 3419 | .pm = GFAR_PM_OPS, |
| 3420 | .of_match_table = gfar_match, |
| 3421 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3422 | .probe = gfar_probe, |
| 3423 | .remove = gfar_remove, |
| 3424 | }; |
| 3425 | |
Axel Lin | db62f68 | 2011-11-27 16:44:17 +0000 | [diff] [blame] | 3426 | module_platform_driver(gfar_driver); |