Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1 | /* drivers/net/ethernet/freescale/gianfar.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Gianfar Ethernet Driver |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 4 | * This driver is designed for the non-CPM ethernet controllers |
| 5 | * on the 85xx and 83xx family of integrated processors |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Based on 8260_io/fcc_enet.c |
| 7 | * |
| 8 | * Author: Andy Fleming |
Kumar Gala | 4c8d3d9 | 2005-11-13 16:06:30 -0800 | [diff] [blame] | 9 | * Maintainer: Kumar Gala |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 10 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 12 | * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 13 | * Copyright 2007 MontaVista Software, Inc. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * |
| 15 | * This program is free software; you can redistribute it and/or modify it |
| 16 | * under the terms of the GNU General Public License as published by the |
| 17 | * Free Software Foundation; either version 2 of the License, or (at your |
| 18 | * option) any later version. |
| 19 | * |
| 20 | * Gianfar: AKA Lambda Draconis, "Dragon" |
| 21 | * RA 11 31 24.2 |
| 22 | * Dec +69 19 52 |
| 23 | * V 3.84 |
| 24 | * B-V +1.62 |
| 25 | * |
| 26 | * Theory of operation |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 27 | * |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 28 | * The driver is initialized through of_device. Configuration information |
| 29 | * is therefore conveyed through an OF-style device tree. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | * |
| 31 | * The Gianfar Ethernet Controller uses a ring of buffer |
| 32 | * descriptors. The beginning is indicated by a register |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 33 | * pointing to the physical address of the start of the ring. |
| 34 | * The end is determined by a "wrap" bit being set in the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | * last descriptor of the ring. |
| 36 | * |
| 37 | * When a packet is received, the RXF bit in the |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 38 | * IEVENT register is set, triggering an interrupt when the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | * corresponding bit in the IMASK register is also set (if |
| 40 | * interrupt coalescing is active, then the interrupt may not |
| 41 | * happen immediately, but will wait until either a set number |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 42 | * of frames or amount of time have passed). In NAPI, the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | * interrupt handler will signal there is work to be done, and |
Francois Romieu | 0aa1538 | 2008-07-11 00:33:52 +0200 | [diff] [blame] | 44 | * exit. This method will start at the last known empty |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 45 | * descriptor, and process every subsequent descriptor until there |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | * are none left with data (NAPI will stop after a set number of |
| 47 | * packets to give time to other tasks, but will eventually |
| 48 | * process all the packets). The data arrives inside a |
| 49 | * pre-allocated skb, and so after the skb is passed up to the |
| 50 | * stack, a new skb must be allocated, and the address field in |
| 51 | * the buffer descriptor must be updated to indicate this new |
| 52 | * skb. |
| 53 | * |
| 54 | * When the kernel requests that a packet be transmitted, the |
| 55 | * driver starts where it left off last time, and points the |
| 56 | * descriptor at the buffer which was passed in. The driver |
| 57 | * then informs the DMA engine that there are packets ready to |
| 58 | * be transmitted. Once the controller is finished transmitting |
| 59 | * the packet, an interrupt may be triggered (under the same |
| 60 | * conditions as for reception, but depending on the TXF bit). |
| 61 | * The driver then cleans up the buffer. |
| 62 | */ |
| 63 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 65 | #define DEBUG |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #include <linux/string.h> |
| 69 | #include <linux/errno.h> |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 70 | #include <linux/unistd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #include <linux/slab.h> |
| 72 | #include <linux/interrupt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | #include <linux/delay.h> |
| 74 | #include <linux/netdevice.h> |
| 75 | #include <linux/etherdevice.h> |
| 76 | #include <linux/skbuff.h> |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 77 | #include <linux/if_vlan.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #include <linux/spinlock.h> |
| 79 | #include <linux/mm.h> |
Rob Herring | 5af5073 | 2013-09-17 14:28:33 -0500 | [diff] [blame] | 80 | #include <linux/of_address.h> |
| 81 | #include <linux/of_irq.h> |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 82 | #include <linux/of_mdio.h> |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 83 | #include <linux/of_platform.h> |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 84 | #include <linux/ip.h> |
| 85 | #include <linux/tcp.h> |
| 86 | #include <linux/udp.h> |
Kumar Gala | 9c07b884 | 2006-01-11 11:26:25 -0800 | [diff] [blame] | 87 | #include <linux/in.h> |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 88 | #include <linux/net_tstamp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
| 90 | #include <asm/io.h> |
Claudiu Manoil | d6ef0bc | 2014-10-07 10:44:32 +0300 | [diff] [blame] | 91 | #ifdef CONFIG_PPC |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 92 | #include <asm/reg.h> |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 93 | #include <asm/mpc85xx.h> |
Claudiu Manoil | d6ef0bc | 2014-10-07 10:44:32 +0300 | [diff] [blame] | 94 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | #include <asm/irq.h> |
| 96 | #include <asm/uaccess.h> |
| 97 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | #include <linux/dma-mapping.h> |
| 99 | #include <linux/crc32.h> |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 100 | #include <linux/mii.h> |
| 101 | #include <linux/phy.h> |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 102 | #include <linux/phy_fixed.h> |
| 103 | #include <linux/of.h> |
David Daney | 4b6ba8a | 2010-10-26 15:07:13 -0700 | [diff] [blame] | 104 | #include <linux/of_net.h> |
Claudiu Manoil | fd31a95 | 2014-10-07 10:44:31 +0300 | [diff] [blame] | 105 | #include <linux/of_address.h> |
| 106 | #include <linux/of_irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
| 108 | #include "gianfar.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
| 110 | #define TX_TIMEOUT (1*HZ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 112 | const char gfar_driver_version[] = "1.3"; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | static int gfar_enet_open(struct net_device *dev); |
| 115 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 116 | static void gfar_reset_task(struct work_struct *work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | static void gfar_timeout(struct net_device *dev); |
| 118 | static int gfar_close(struct net_device *dev); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 119 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 120 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 121 | struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | static int gfar_set_mac_address(struct net_device *dev); |
| 123 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 124 | static irqreturn_t gfar_error(int irq, void *dev_id); |
| 125 | static irqreturn_t gfar_transmit(int irq, void *dev_id); |
| 126 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | static void adjust_link(struct net_device *dev); |
Claudiu Manoil | 6ce29b0 | 2014-04-30 14:27:21 +0300 | [diff] [blame] | 128 | static noinline void gfar_update_link_state(struct gfar_private *priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | static int init_phy(struct net_device *dev); |
Grant Likely | 7488876 | 2011-02-22 21:05:51 -0700 | [diff] [blame] | 130 | static int gfar_probe(struct platform_device *ofdev); |
Grant Likely | 2dc1158 | 2010-08-06 09:25:50 -0600 | [diff] [blame] | 131 | static int gfar_remove(struct platform_device *ofdev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 132 | static void free_skb_resources(struct gfar_private *priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | static void gfar_set_multi(struct net_device *dev); |
| 134 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 135 | static void gfar_configure_serdes(struct net_device *dev); |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 136 | static int gfar_poll_rx(struct napi_struct *napi, int budget); |
| 137 | static int gfar_poll_tx(struct napi_struct *napi, int budget); |
| 138 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); |
| 139 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 140 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 141 | static void gfar_netpoll(struct net_device *dev); |
| 142 | #endif |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 143 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 144 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
Claudiu Manoil | 61db26c | 2013-02-14 05:00:05 +0000 | [diff] [blame] | 145 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
| 146 | int amount_pull, struct napi_struct *napi); |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 147 | static void gfar_halt_nodisable(struct gfar_private *priv); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 148 | static void gfar_clear_exact_match(struct net_device *dev); |
Joe Perches | b6bc765 | 2010-12-21 02:16:08 -0800 | [diff] [blame] | 149 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
| 150 | const u8 *addr); |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 151 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
| 154 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); |
| 155 | MODULE_LICENSE("GPL"); |
| 156 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 157 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 158 | dma_addr_t buf) |
| 159 | { |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 160 | u32 lstatus; |
| 161 | |
| 162 | bdp->bufPtr = buf; |
| 163 | |
| 164 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 165 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 166 | lstatus |= BD_LFLAG(RXBD_WRAP); |
| 167 | |
Claudiu Manoil | d55398b | 2014-10-07 10:44:35 +0300 | [diff] [blame] | 168 | gfar_wmb(); |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 169 | |
| 170 | bdp->lstatus = lstatus; |
| 171 | } |
| 172 | |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 173 | static int gfar_init_bds(struct net_device *ndev) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 174 | { |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 175 | struct gfar_private *priv = netdev_priv(ndev); |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 176 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 177 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 178 | struct gfar_priv_rx_q *rx_queue = NULL; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 179 | struct txbd8 *txbdp; |
| 180 | struct rxbd8 *rxbdp; |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 181 | u32 *rfbptr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 182 | int i, j; |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 183 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 184 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 185 | tx_queue = priv->tx_queue[i]; |
| 186 | /* Initialize some variables in our dev structure */ |
| 187 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; |
| 188 | tx_queue->dirty_tx = tx_queue->tx_bd_base; |
| 189 | tx_queue->cur_tx = tx_queue->tx_bd_base; |
| 190 | tx_queue->skb_curtx = 0; |
| 191 | tx_queue->skb_dirtytx = 0; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 192 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 193 | /* Initialize Transmit Descriptor Ring */ |
| 194 | txbdp = tx_queue->tx_bd_base; |
| 195 | for (j = 0; j < tx_queue->tx_ring_size; j++) { |
| 196 | txbdp->lstatus = 0; |
| 197 | txbdp->bufPtr = 0; |
| 198 | txbdp++; |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 199 | } |
| 200 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 201 | /* Set the last descriptor in the ring to indicate wrap */ |
| 202 | txbdp--; |
| 203 | txbdp->status |= TXBD_WRAP; |
| 204 | } |
| 205 | |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 206 | rfbptr = ®s->rfbptr0; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 207 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 208 | rx_queue = priv->rx_queue[i]; |
| 209 | rx_queue->cur_rx = rx_queue->rx_bd_base; |
| 210 | rx_queue->skb_currx = 0; |
| 211 | rxbdp = rx_queue->rx_bd_base; |
| 212 | |
| 213 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
| 214 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; |
| 215 | |
| 216 | if (skb) { |
| 217 | gfar_init_rxbdp(rx_queue, rxbdp, |
| 218 | rxbdp->bufPtr); |
| 219 | } else { |
| 220 | skb = gfar_new_skb(ndev); |
| 221 | if (!skb) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 222 | netdev_err(ndev, "Can't allocate RX buffers\n"); |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 223 | return -ENOMEM; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 224 | } |
| 225 | rx_queue->rx_skbuff[j] = skb; |
| 226 | |
| 227 | gfar_new_rxbdp(rx_queue, rxbdp, skb); |
| 228 | } |
| 229 | |
| 230 | rxbdp++; |
| 231 | } |
| 232 | |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 233 | rx_queue->rfbptr = rfbptr; |
| 234 | rfbptr += 2; |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 235 | } |
| 236 | |
| 237 | return 0; |
| 238 | } |
| 239 | |
| 240 | static int gfar_alloc_skb_resources(struct net_device *ndev) |
| 241 | { |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 242 | void *vaddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 243 | dma_addr_t addr; |
| 244 | int i, j, k; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 245 | struct gfar_private *priv = netdev_priv(ndev); |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 246 | struct device *dev = priv->dev; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 247 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 248 | struct gfar_priv_rx_q *rx_queue = NULL; |
| 249 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 250 | priv->total_tx_ring_size = 0; |
| 251 | for (i = 0; i < priv->num_tx_queues; i++) |
| 252 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; |
| 253 | |
| 254 | priv->total_rx_ring_size = 0; |
| 255 | for (i = 0; i < priv->num_rx_queues; i++) |
| 256 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 257 | |
| 258 | /* Allocate memory for the buffer descriptors */ |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 259 | vaddr = dma_alloc_coherent(dev, |
Joe Perches | d0320f7 | 2013-03-14 13:07:21 +0000 | [diff] [blame] | 260 | (priv->total_tx_ring_size * |
| 261 | sizeof(struct txbd8)) + |
| 262 | (priv->total_rx_ring_size * |
| 263 | sizeof(struct rxbd8)), |
| 264 | &addr, GFP_KERNEL); |
| 265 | if (!vaddr) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 266 | return -ENOMEM; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 267 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 268 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 269 | tx_queue = priv->tx_queue[i]; |
Joe Perches | 43d620c | 2011-06-16 19:08:06 +0000 | [diff] [blame] | 270 | tx_queue->tx_bd_base = vaddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 271 | tx_queue->tx_bd_dma_base = addr; |
| 272 | tx_queue->dev = ndev; |
| 273 | /* enet DMA only understands physical addresses */ |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 274 | addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
| 275 | vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 276 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 277 | |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 278 | /* Start the rx descriptor ring where the tx ring leaves off */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 279 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 280 | rx_queue = priv->rx_queue[i]; |
Joe Perches | 43d620c | 2011-06-16 19:08:06 +0000 | [diff] [blame] | 281 | rx_queue->rx_bd_base = vaddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 282 | rx_queue->rx_bd_dma_base = addr; |
| 283 | rx_queue->dev = ndev; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 284 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
| 285 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 286 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 287 | |
| 288 | /* Setup the skbuff rings */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 289 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 290 | tx_queue = priv->tx_queue[i]; |
Joe Perches | 14f8dc4 | 2013-02-07 11:46:27 +0000 | [diff] [blame] | 291 | tx_queue->tx_skbuff = |
| 292 | kmalloc_array(tx_queue->tx_ring_size, |
| 293 | sizeof(*tx_queue->tx_skbuff), |
| 294 | GFP_KERNEL); |
| 295 | if (!tx_queue->tx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 296 | goto cleanup; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 297 | |
| 298 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
| 299 | tx_queue->tx_skbuff[k] = NULL; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 300 | } |
| 301 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 302 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 303 | rx_queue = priv->rx_queue[i]; |
Joe Perches | 14f8dc4 | 2013-02-07 11:46:27 +0000 | [diff] [blame] | 304 | rx_queue->rx_skbuff = |
| 305 | kmalloc_array(rx_queue->rx_ring_size, |
| 306 | sizeof(*rx_queue->rx_skbuff), |
| 307 | GFP_KERNEL); |
| 308 | if (!rx_queue->rx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 309 | goto cleanup; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 310 | |
| 311 | for (j = 0; j < rx_queue->rx_ring_size; j++) |
| 312 | rx_queue->rx_skbuff[j] = NULL; |
| 313 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 314 | |
Anton Vorontsov | 8728327 | 2009-10-12 06:00:39 +0000 | [diff] [blame] | 315 | if (gfar_init_bds(ndev)) |
| 316 | goto cleanup; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 317 | |
| 318 | return 0; |
| 319 | |
| 320 | cleanup: |
| 321 | free_skb_resources(priv); |
| 322 | return -ENOMEM; |
| 323 | } |
| 324 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 325 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
| 326 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 327 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Anton Vorontsov | 18294ad | 2009-11-04 12:53:00 +0000 | [diff] [blame] | 328 | u32 __iomem *baddr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 329 | int i; |
| 330 | |
| 331 | baddr = ®s->tbase0; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 332 | for (i = 0; i < priv->num_tx_queues; i++) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 333 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 334 | baddr += 2; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | baddr = ®s->rbase0; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 338 | for (i = 0; i < priv->num_rx_queues; i++) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 339 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 340 | baddr += 2; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 341 | } |
| 342 | } |
| 343 | |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 344 | static void gfar_init_rqprm(struct gfar_private *priv) |
| 345 | { |
| 346 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 347 | u32 __iomem *baddr; |
| 348 | int i; |
| 349 | |
| 350 | baddr = ®s->rqprm0; |
| 351 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 352 | gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | |
| 353 | (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); |
| 354 | baddr++; |
| 355 | } |
| 356 | } |
| 357 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 358 | static void gfar_rx_buff_size_config(struct gfar_private *priv) |
| 359 | { |
Claudiu Manoil | f5b720b | 2014-10-15 19:11:46 +0300 | [diff] [blame] | 360 | int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 361 | |
| 362 | /* set this when rx hw offload (TOE) functions are being used */ |
| 363 | priv->uses_rxfcb = 0; |
| 364 | |
| 365 | if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) |
| 366 | priv->uses_rxfcb = 1; |
| 367 | |
| 368 | if (priv->hwts_rx_en) |
| 369 | priv->uses_rxfcb = 1; |
| 370 | |
| 371 | if (priv->uses_rxfcb) |
| 372 | frame_size += GMAC_FCB_LEN; |
| 373 | |
| 374 | frame_size += priv->padding; |
| 375 | |
| 376 | frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + |
| 377 | INCREMENTAL_BUFFER_SIZE; |
| 378 | |
| 379 | priv->rx_buffer_size = frame_size; |
| 380 | } |
| 381 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 382 | static void gfar_mac_rx_config(struct gfar_private *priv) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 383 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 384 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 385 | u32 rctrl = 0; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 386 | |
Sandeep Gopalpet | 1ccb838 | 2009-12-16 01:14:58 +0000 | [diff] [blame] | 387 | if (priv->rx_filer_enable) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 388 | rctrl |= RCTRL_FILREN; |
Sandeep Gopalpet | 1ccb838 | 2009-12-16 01:14:58 +0000 | [diff] [blame] | 389 | /* Program the RIR0 reg with the required distribution */ |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 390 | if (priv->poll_mode == GFAR_SQ_POLLING) |
| 391 | gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); |
| 392 | else /* GFAR_MQ_POLLING */ |
| 393 | gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); |
Sandeep Gopalpet | 1ccb838 | 2009-12-16 01:14:58 +0000 | [diff] [blame] | 394 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 395 | |
Claudiu Manoil | f5ae627 | 2013-01-23 00:18:36 +0000 | [diff] [blame] | 396 | /* Restore PROMISC mode */ |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 397 | if (priv->ndev->flags & IFF_PROMISC) |
Claudiu Manoil | f5ae627 | 2013-01-23 00:18:36 +0000 | [diff] [blame] | 398 | rctrl |= RCTRL_PROM; |
| 399 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 400 | if (priv->ndev->features & NETIF_F_RXCSUM) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 401 | rctrl |= RCTRL_CHECKSUMMING; |
| 402 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 403 | if (priv->extended_hash) |
| 404 | rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 405 | |
| 406 | if (priv->padding) { |
| 407 | rctrl &= ~RCTRL_PAL_MASK; |
| 408 | rctrl |= RCTRL_PADDING(priv->padding); |
| 409 | } |
| 410 | |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 411 | /* Enable HW time stamping if requested from user space */ |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 412 | if (priv->hwts_rx_en) |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 413 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; |
| 414 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 415 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
Sebastian Pöhn | b852b72 | 2011-07-26 00:03:13 +0000 | [diff] [blame] | 416 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 417 | |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 418 | /* Clear the LFC bit */ |
| 419 | gfar_write(®s->rctrl, rctrl); |
| 420 | /* Init flow control threshold values */ |
| 421 | gfar_init_rqprm(priv); |
| 422 | gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); |
| 423 | rctrl |= RCTRL_LFC; |
| 424 | |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 425 | /* Init rctrl based on our settings */ |
| 426 | gfar_write(®s->rctrl, rctrl); |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 427 | } |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 428 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 429 | static void gfar_mac_tx_config(struct gfar_private *priv) |
| 430 | { |
| 431 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 432 | u32 tctrl = 0; |
| 433 | |
| 434 | if (priv->ndev->features & NETIF_F_IP_CSUM) |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 435 | tctrl |= TCTRL_INIT_CSUM; |
| 436 | |
Claudiu Manoil | b98b8ba | 2012-09-23 22:39:08 +0000 | [diff] [blame] | 437 | if (priv->prio_sched_en) |
| 438 | tctrl |= TCTRL_TXSCHED_PRIO; |
| 439 | else { |
| 440 | tctrl |= TCTRL_TXSCHED_WRRS; |
| 441 | gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); |
| 442 | gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); |
| 443 | } |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 444 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 445 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) |
| 446 | tctrl |= TCTRL_VLINS; |
| 447 | |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 448 | gfar_write(®s->tctrl, tctrl); |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 449 | } |
| 450 | |
Claudiu Manoil | f19015b | 2014-02-24 12:13:46 +0200 | [diff] [blame] | 451 | static void gfar_configure_coalescing(struct gfar_private *priv, |
| 452 | unsigned long tx_mask, unsigned long rx_mask) |
| 453 | { |
| 454 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 455 | u32 __iomem *baddr; |
| 456 | |
| 457 | if (priv->mode == MQ_MG_MODE) { |
| 458 | int i = 0; |
| 459 | |
| 460 | baddr = ®s->txic0; |
| 461 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { |
| 462 | gfar_write(baddr + i, 0); |
| 463 | if (likely(priv->tx_queue[i]->txcoalescing)) |
| 464 | gfar_write(baddr + i, priv->tx_queue[i]->txic); |
| 465 | } |
| 466 | |
| 467 | baddr = ®s->rxic0; |
| 468 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { |
| 469 | gfar_write(baddr + i, 0); |
| 470 | if (likely(priv->rx_queue[i]->rxcoalescing)) |
| 471 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); |
| 472 | } |
| 473 | } else { |
| 474 | /* Backward compatible case -- even if we enable |
| 475 | * multiple queues, there's only single reg to program |
| 476 | */ |
| 477 | gfar_write(®s->txic, 0); |
| 478 | if (likely(priv->tx_queue[0]->txcoalescing)) |
| 479 | gfar_write(®s->txic, priv->tx_queue[0]->txic); |
| 480 | |
| 481 | gfar_write(®s->rxic, 0); |
| 482 | if (unlikely(priv->rx_queue[0]->rxcoalescing)) |
| 483 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | void gfar_configure_coalescing_all(struct gfar_private *priv) |
| 488 | { |
| 489 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
| 490 | } |
| 491 | |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 492 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
| 493 | { |
| 494 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 495 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
| 496 | unsigned long tx_packets = 0, tx_bytes = 0; |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 497 | int i; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 498 | |
| 499 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 500 | rx_packets += priv->rx_queue[i]->stats.rx_packets; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 501 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 502 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; |
| 503 | } |
| 504 | |
| 505 | dev->stats.rx_packets = rx_packets; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 506 | dev->stats.rx_bytes = rx_bytes; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 507 | dev->stats.rx_dropped = rx_dropped; |
| 508 | |
| 509 | for (i = 0; i < priv->num_tx_queues; i++) { |
Eric Dumazet | 1ac9ad1 | 2011-01-12 12:13:14 +0000 | [diff] [blame] | 510 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
| 511 | tx_packets += priv->tx_queue[i]->stats.tx_packets; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 512 | } |
| 513 | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 514 | dev->stats.tx_bytes = tx_bytes; |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 515 | dev->stats.tx_packets = tx_packets; |
| 516 | |
| 517 | return &dev->stats; |
| 518 | } |
| 519 | |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 520 | static const struct net_device_ops gfar_netdev_ops = { |
| 521 | .ndo_open = gfar_enet_open, |
| 522 | .ndo_start_xmit = gfar_start_xmit, |
| 523 | .ndo_stop = gfar_close, |
| 524 | .ndo_change_mtu = gfar_change_mtu, |
Michał Mirosław | 8b3afe9 | 2011-04-15 04:50:50 +0000 | [diff] [blame] | 525 | .ndo_set_features = gfar_set_features, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 526 | .ndo_set_rx_mode = gfar_set_multi, |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 527 | .ndo_tx_timeout = gfar_timeout, |
| 528 | .ndo_do_ioctl = gfar_ioctl, |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 529 | .ndo_get_stats = gfar_get_stats, |
Ben Hutchings | 240c102 | 2009-07-09 17:54:35 +0000 | [diff] [blame] | 530 | .ndo_set_mac_address = eth_mac_addr, |
| 531 | .ndo_validate_addr = eth_validate_addr, |
Andy Fleming | 26ccfc3 | 2009-03-10 12:58:28 +0000 | [diff] [blame] | 532 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 533 | .ndo_poll_controller = gfar_netpoll, |
| 534 | #endif |
| 535 | }; |
| 536 | |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 537 | static void gfar_ints_disable(struct gfar_private *priv) |
| 538 | { |
| 539 | int i; |
| 540 | for (i = 0; i < priv->num_grps; i++) { |
| 541 | struct gfar __iomem *regs = priv->gfargrp[i].regs; |
| 542 | /* Clear IEVENT */ |
| 543 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); |
| 544 | |
| 545 | /* Initialize IMASK */ |
| 546 | gfar_write(®s->imask, IMASK_INIT_CLEAR); |
| 547 | } |
| 548 | } |
| 549 | |
| 550 | static void gfar_ints_enable(struct gfar_private *priv) |
| 551 | { |
| 552 | int i; |
| 553 | for (i = 0; i < priv->num_grps; i++) { |
| 554 | struct gfar __iomem *regs = priv->gfargrp[i].regs; |
| 555 | /* Unmask the interrupts we look for */ |
| 556 | gfar_write(®s->imask, IMASK_DEFAULT); |
| 557 | } |
| 558 | } |
| 559 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 560 | void lock_tx_qs(struct gfar_private *priv) |
| 561 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 562 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 563 | |
| 564 | for (i = 0; i < priv->num_tx_queues; i++) |
| 565 | spin_lock(&priv->tx_queue[i]->txlock); |
| 566 | } |
| 567 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 568 | void unlock_tx_qs(struct gfar_private *priv) |
| 569 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 570 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 571 | |
| 572 | for (i = 0; i < priv->num_tx_queues; i++) |
| 573 | spin_unlock(&priv->tx_queue[i]->txlock); |
| 574 | } |
| 575 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 576 | static int gfar_alloc_tx_queues(struct gfar_private *priv) |
| 577 | { |
| 578 | int i; |
| 579 | |
| 580 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 581 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), |
| 582 | GFP_KERNEL); |
| 583 | if (!priv->tx_queue[i]) |
| 584 | return -ENOMEM; |
| 585 | |
| 586 | priv->tx_queue[i]->tx_skbuff = NULL; |
| 587 | priv->tx_queue[i]->qindex = i; |
| 588 | priv->tx_queue[i]->dev = priv->ndev; |
| 589 | spin_lock_init(&(priv->tx_queue[i]->txlock)); |
| 590 | } |
| 591 | return 0; |
| 592 | } |
| 593 | |
| 594 | static int gfar_alloc_rx_queues(struct gfar_private *priv) |
| 595 | { |
| 596 | int i; |
| 597 | |
| 598 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 599 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), |
| 600 | GFP_KERNEL); |
| 601 | if (!priv->rx_queue[i]) |
| 602 | return -ENOMEM; |
| 603 | |
| 604 | priv->rx_queue[i]->rx_skbuff = NULL; |
| 605 | priv->rx_queue[i]->qindex = i; |
| 606 | priv->rx_queue[i]->dev = priv->ndev; |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 607 | } |
| 608 | return 0; |
| 609 | } |
| 610 | |
| 611 | static void gfar_free_tx_queues(struct gfar_private *priv) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 612 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 613 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 614 | |
| 615 | for (i = 0; i < priv->num_tx_queues; i++) |
| 616 | kfree(priv->tx_queue[i]); |
| 617 | } |
| 618 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 619 | static void gfar_free_rx_queues(struct gfar_private *priv) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 620 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 621 | int i; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 622 | |
| 623 | for (i = 0; i < priv->num_rx_queues; i++) |
| 624 | kfree(priv->rx_queue[i]); |
| 625 | } |
| 626 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 627 | static void unmap_group_regs(struct gfar_private *priv) |
| 628 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 629 | int i; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 630 | |
| 631 | for (i = 0; i < MAXGROUPS; i++) |
| 632 | if (priv->gfargrp[i].regs) |
| 633 | iounmap(priv->gfargrp[i].regs); |
| 634 | } |
| 635 | |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 636 | static void free_gfar_dev(struct gfar_private *priv) |
| 637 | { |
| 638 | int i, j; |
| 639 | |
| 640 | for (i = 0; i < priv->num_grps; i++) |
| 641 | for (j = 0; j < GFAR_NUM_IRQS; j++) { |
| 642 | kfree(priv->gfargrp[i].irqinfo[j]); |
| 643 | priv->gfargrp[i].irqinfo[j] = NULL; |
| 644 | } |
| 645 | |
| 646 | free_netdev(priv->ndev); |
| 647 | } |
| 648 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 649 | static void disable_napi(struct gfar_private *priv) |
| 650 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 651 | int i; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 652 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 653 | for (i = 0; i < priv->num_grps; i++) { |
| 654 | napi_disable(&priv->gfargrp[i].napi_rx); |
| 655 | napi_disable(&priv->gfargrp[i].napi_tx); |
| 656 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | static void enable_napi(struct gfar_private *priv) |
| 660 | { |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 661 | int i; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 662 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 663 | for (i = 0; i < priv->num_grps; i++) { |
| 664 | napi_enable(&priv->gfargrp[i].napi_rx); |
| 665 | napi_enable(&priv->gfargrp[i].napi_tx); |
| 666 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | static int gfar_parse_group(struct device_node *np, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 670 | struct gfar_private *priv, const char *model) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 671 | { |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 672 | struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 673 | int i; |
| 674 | |
Paul Gortmaker | 7c1e7e9 | 2013-02-04 09:49:42 +0000 | [diff] [blame] | 675 | for (i = 0; i < GFAR_NUM_IRQS; i++) { |
| 676 | grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), |
| 677 | GFP_KERNEL); |
| 678 | if (!grp->irqinfo[i]) |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 679 | return -ENOMEM; |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 680 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 681 | |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 682 | grp->regs = of_iomap(np, 0); |
| 683 | if (!grp->regs) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 684 | return -ENOMEM; |
| 685 | |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 686 | gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 687 | |
| 688 | /* If we aren't the FEC we have multiple interrupts */ |
| 689 | if (model && strcasecmp(model, "FEC")) { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 690 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); |
| 691 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); |
| 692 | if (gfar_irq(grp, TX)->irq == NO_IRQ || |
| 693 | gfar_irq(grp, RX)->irq == NO_IRQ || |
| 694 | gfar_irq(grp, ER)->irq == NO_IRQ) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 695 | return -EINVAL; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 696 | } |
| 697 | |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 698 | grp->priv = priv; |
| 699 | spin_lock_init(&grp->grplock); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 700 | if (priv->mode == MQ_MG_MODE) { |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 701 | u32 *rxq_mask, *txq_mask; |
| 702 | rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); |
| 703 | txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); |
| 704 | |
| 705 | if (priv->poll_mode == GFAR_SQ_POLLING) { |
| 706 | /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ |
| 707 | grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); |
| 708 | grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); |
| 709 | } else { /* GFAR_MQ_POLLING */ |
| 710 | grp->rx_bit_map = rxq_mask ? |
| 711 | *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); |
| 712 | grp->tx_bit_map = txq_mask ? |
| 713 | *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); |
| 714 | } |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 715 | } else { |
Claudiu Manoil | 5fedcc1 | 2013-01-29 03:55:11 +0000 | [diff] [blame] | 716 | grp->rx_bit_map = 0xFF; |
| 717 | grp->tx_bit_map = 0xFF; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 718 | } |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 719 | |
| 720 | /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses |
| 721 | * right to left, so we need to revert the 8 bits to get the q index |
| 722 | */ |
| 723 | grp->rx_bit_map = bitrev8(grp->rx_bit_map); |
| 724 | grp->tx_bit_map = bitrev8(grp->tx_bit_map); |
| 725 | |
| 726 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, |
| 727 | * also assign queues to groups |
| 728 | */ |
| 729 | for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 730 | if (!grp->rx_queue) |
| 731 | grp->rx_queue = priv->rx_queue[i]; |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 732 | grp->num_rx_queues++; |
| 733 | grp->rstat |= (RSTAT_CLEAR_RHALT >> i); |
| 734 | priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); |
| 735 | priv->rx_queue[i]->grp = grp; |
| 736 | } |
| 737 | |
| 738 | for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 739 | if (!grp->tx_queue) |
| 740 | grp->tx_queue = priv->tx_queue[i]; |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 741 | grp->num_tx_queues++; |
| 742 | grp->tstat |= (TSTAT_CLEAR_THALT >> i); |
| 743 | priv->tqueue |= (TQUEUE_EN0 >> i); |
| 744 | priv->tx_queue[i]->grp = grp; |
| 745 | } |
| 746 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 747 | priv->num_grps++; |
| 748 | |
| 749 | return 0; |
| 750 | } |
| 751 | |
Grant Likely | 2dc1158 | 2010-08-06 09:25:50 -0600 | [diff] [blame] | 752 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 753 | { |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 754 | const char *model; |
| 755 | const char *ctype; |
| 756 | const void *mac_addr; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 757 | int err = 0, i; |
| 758 | struct net_device *dev = NULL; |
| 759 | struct gfar_private *priv = NULL; |
Grant Likely | 61c7a08 | 2010-04-13 16:12:29 -0700 | [diff] [blame] | 760 | struct device_node *np = ofdev->dev.of_node; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 761 | struct device_node *child = NULL; |
Andy Fleming | 4d7902f | 2009-02-04 16:43:44 -0800 | [diff] [blame] | 762 | const u32 *stash; |
| 763 | const u32 *stash_len; |
| 764 | const u32 *stash_idx; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 765 | unsigned int num_tx_qs, num_rx_qs; |
| 766 | u32 *tx_queues, *rx_queues; |
Claudiu Manoil | b338ce2 | 2014-03-11 18:01:24 +0200 | [diff] [blame] | 767 | unsigned short mode, poll_mode; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 768 | |
| 769 | if (!np || !of_device_is_available(np)) |
| 770 | return -ENODEV; |
| 771 | |
Claudiu Manoil | b338ce2 | 2014-03-11 18:01:24 +0200 | [diff] [blame] | 772 | if (of_device_is_compatible(np, "fsl,etsec2")) { |
| 773 | mode = MQ_MG_MODE; |
| 774 | poll_mode = GFAR_SQ_POLLING; |
| 775 | } else { |
| 776 | mode = SQ_SG_MODE; |
| 777 | poll_mode = GFAR_SQ_POLLING; |
| 778 | } |
| 779 | |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 780 | /* parse the num of HW tx and rx queues */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 781 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 782 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); |
| 783 | |
Claudiu Manoil | b338ce2 | 2014-03-11 18:01:24 +0200 | [diff] [blame] | 784 | if (mode == SQ_SG_MODE) { |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 785 | num_tx_qs = 1; |
| 786 | num_rx_qs = 1; |
| 787 | } else { /* MQ_MG_MODE */ |
Claudiu Manoil | c65d753 | 2014-03-21 09:33:17 +0200 | [diff] [blame] | 788 | /* get the actual number of supported groups */ |
| 789 | unsigned int num_grps = of_get_available_child_count(np); |
| 790 | |
| 791 | if (num_grps == 0 || num_grps > MAXGROUPS) { |
| 792 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", |
| 793 | num_grps); |
| 794 | pr_err("Cannot do alloc_etherdev, aborting\n"); |
| 795 | return -EINVAL; |
| 796 | } |
| 797 | |
Claudiu Manoil | b338ce2 | 2014-03-11 18:01:24 +0200 | [diff] [blame] | 798 | if (poll_mode == GFAR_SQ_POLLING) { |
Claudiu Manoil | c65d753 | 2014-03-21 09:33:17 +0200 | [diff] [blame] | 799 | num_tx_qs = num_grps; /* one txq per int group */ |
| 800 | num_rx_qs = num_grps; /* one rxq per int group */ |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 801 | } else { /* GFAR_MQ_POLLING */ |
| 802 | num_tx_qs = tx_queues ? *tx_queues : 1; |
| 803 | num_rx_qs = rx_queues ? *rx_queues : 1; |
| 804 | } |
| 805 | } |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 806 | |
| 807 | if (num_tx_qs > MAX_TX_QS) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 808 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", |
| 809 | num_tx_qs, MAX_TX_QS); |
| 810 | pr_err("Cannot do alloc_etherdev, aborting\n"); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 811 | return -EINVAL; |
| 812 | } |
| 813 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 814 | if (num_rx_qs > MAX_RX_QS) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 815 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", |
| 816 | num_rx_qs, MAX_RX_QS); |
| 817 | pr_err("Cannot do alloc_etherdev, aborting\n"); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 818 | return -EINVAL; |
| 819 | } |
| 820 | |
| 821 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); |
| 822 | dev = *pdev; |
| 823 | if (NULL == dev) |
| 824 | return -ENOMEM; |
| 825 | |
| 826 | priv = netdev_priv(dev); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 827 | priv->ndev = dev; |
| 828 | |
Claudiu Manoil | b338ce2 | 2014-03-11 18:01:24 +0200 | [diff] [blame] | 829 | priv->mode = mode; |
| 830 | priv->poll_mode = poll_mode; |
| 831 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 832 | priv->num_tx_queues = num_tx_qs; |
Ben Hutchings | fe06912 | 2010-09-27 08:27:37 +0000 | [diff] [blame] | 833 | netif_set_real_num_rx_queues(dev, num_rx_qs); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 834 | priv->num_rx_queues = num_rx_qs; |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 835 | |
| 836 | err = gfar_alloc_tx_queues(priv); |
| 837 | if (err) |
| 838 | goto tx_alloc_failed; |
| 839 | |
| 840 | err = gfar_alloc_rx_queues(priv); |
| 841 | if (err) |
| 842 | goto rx_alloc_failed; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 843 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 844 | /* Init Rx queue filer rule set linked list */ |
Sebastian Poehn | 4aa3a71 | 2011-06-20 13:57:59 -0700 | [diff] [blame] | 845 | INIT_LIST_HEAD(&priv->rx_list.list); |
| 846 | priv->rx_list.count = 0; |
| 847 | mutex_init(&priv->rx_queue_access); |
| 848 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 849 | model = of_get_property(np, "model", NULL); |
| 850 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 851 | for (i = 0; i < MAXGROUPS; i++) |
| 852 | priv->gfargrp[i].regs = NULL; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 853 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 854 | /* Parse and initialize group specific information */ |
Claudiu Manoil | b338ce2 | 2014-03-11 18:01:24 +0200 | [diff] [blame] | 855 | if (priv->mode == MQ_MG_MODE) { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 856 | for_each_child_of_node(np, child) { |
| 857 | err = gfar_parse_group(child, priv, model); |
| 858 | if (err) |
| 859 | goto err_grp_init; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 860 | } |
Claudiu Manoil | b338ce2 | 2014-03-11 18:01:24 +0200 | [diff] [blame] | 861 | } else { /* SQ_SG_MODE */ |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 862 | err = gfar_parse_group(np, priv, model); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 863 | if (err) |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 864 | goto err_grp_init; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 865 | } |
| 866 | |
Andy Fleming | 4d7902f | 2009-02-04 16:43:44 -0800 | [diff] [blame] | 867 | stash = of_get_property(np, "bd-stash", NULL); |
| 868 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 869 | if (stash) { |
Andy Fleming | 4d7902f | 2009-02-04 16:43:44 -0800 | [diff] [blame] | 870 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
| 871 | priv->bd_stash_en = 1; |
| 872 | } |
| 873 | |
| 874 | stash_len = of_get_property(np, "rx-stash-len", NULL); |
| 875 | |
| 876 | if (stash_len) |
| 877 | priv->rx_stash_size = *stash_len; |
| 878 | |
| 879 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); |
| 880 | |
| 881 | if (stash_idx) |
| 882 | priv->rx_stash_index = *stash_idx; |
| 883 | |
| 884 | if (stash_len || stash_idx) |
| 885 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; |
| 886 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 887 | mac_addr = of_get_mac_address(np); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 888 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 889 | if (mac_addr) |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 890 | memcpy(dev->dev_addr, mac_addr, ETH_ALEN); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 891 | |
| 892 | if (model && !strcasecmp(model, "TSEC")) |
Claudiu Manoil | 34018fd | 2014-02-17 12:53:15 +0200 | [diff] [blame] | 893 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 894 | FSL_GIANFAR_DEV_HAS_COALESCE | |
| 895 | FSL_GIANFAR_DEV_HAS_RMON | |
| 896 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; |
| 897 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 898 | if (model && !strcasecmp(model, "eTSEC")) |
Claudiu Manoil | 34018fd | 2014-02-17 12:53:15 +0200 | [diff] [blame] | 899 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 900 | FSL_GIANFAR_DEV_HAS_COALESCE | |
| 901 | FSL_GIANFAR_DEV_HAS_RMON | |
| 902 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 903 | FSL_GIANFAR_DEV_HAS_CSUM | |
| 904 | FSL_GIANFAR_DEV_HAS_VLAN | |
| 905 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | |
| 906 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
| 907 | FSL_GIANFAR_DEV_HAS_TIMER; |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 908 | |
| 909 | ctype = of_get_property(np, "phy-connection-type", NULL); |
| 910 | |
| 911 | /* We only care about rgmii-id. The rest are autodetected */ |
| 912 | if (ctype && !strcmp(ctype, "rgmii-id")) |
| 913 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; |
| 914 | else |
| 915 | priv->interface = PHY_INTERFACE_MODE_MII; |
| 916 | |
| 917 | if (of_get_property(np, "fsl,magic-packet", NULL)) |
| 918 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; |
| 919 | |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 920 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 921 | |
Florian Fainelli | be40364 | 2014-05-22 09:47:48 -0700 | [diff] [blame] | 922 | /* In the case of a fixed PHY, the DT node associated |
| 923 | * to the PHY is the Ethernet MAC DT node. |
| 924 | */ |
Uwe Kleine-König | 6f2c9bd | 2014-08-07 22:17:07 +0200 | [diff] [blame] | 925 | if (!priv->phy_node && of_phy_is_fixed_link(np)) { |
Florian Fainelli | be40364 | 2014-05-22 09:47:48 -0700 | [diff] [blame] | 926 | err = of_phy_register_fixed_link(np); |
| 927 | if (err) |
| 928 | goto err_grp_init; |
| 929 | |
Uwe Kleine-König | 6f2c9bd | 2014-08-07 22:17:07 +0200 | [diff] [blame] | 930 | priv->phy_node = of_node_get(np); |
Florian Fainelli | be40364 | 2014-05-22 09:47:48 -0700 | [diff] [blame] | 931 | } |
| 932 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 933 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 934 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 935 | |
| 936 | return 0; |
| 937 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 938 | err_grp_init: |
| 939 | unmap_group_regs(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 940 | rx_alloc_failed: |
| 941 | gfar_free_rx_queues(priv); |
| 942 | tx_alloc_failed: |
| 943 | gfar_free_tx_queues(priv); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 944 | free_gfar_dev(priv); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 945 | return err; |
| 946 | } |
| 947 | |
Ben Hutchings | ca0c88c | 2013-11-18 23:05:27 +0000 | [diff] [blame] | 948 | static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 949 | { |
| 950 | struct hwtstamp_config config; |
| 951 | struct gfar_private *priv = netdev_priv(netdev); |
| 952 | |
| 953 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
| 954 | return -EFAULT; |
| 955 | |
| 956 | /* reserved for future extensions */ |
| 957 | if (config.flags) |
| 958 | return -EINVAL; |
| 959 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 960 | switch (config.tx_type) { |
| 961 | case HWTSTAMP_TX_OFF: |
| 962 | priv->hwts_tx_en = 0; |
| 963 | break; |
| 964 | case HWTSTAMP_TX_ON: |
| 965 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
| 966 | return -ERANGE; |
| 967 | priv->hwts_tx_en = 1; |
| 968 | break; |
| 969 | default: |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 970 | return -ERANGE; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 971 | } |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 972 | |
| 973 | switch (config.rx_filter) { |
| 974 | case HWTSTAMP_FILTER_NONE: |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 975 | if (priv->hwts_rx_en) { |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 976 | priv->hwts_rx_en = 0; |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 977 | reset_gfar(netdev); |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 978 | } |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 979 | break; |
| 980 | default: |
| 981 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) |
| 982 | return -ERANGE; |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 983 | if (!priv->hwts_rx_en) { |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 984 | priv->hwts_rx_en = 1; |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 985 | reset_gfar(netdev); |
Manfred Rudigier | 97553f7 | 2010-06-11 01:49:05 +0000 | [diff] [blame] | 986 | } |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 987 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
| 988 | break; |
| 989 | } |
| 990 | |
| 991 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
| 992 | -EFAULT : 0; |
| 993 | } |
| 994 | |
Ben Hutchings | ca0c88c | 2013-11-18 23:05:27 +0000 | [diff] [blame] | 995 | static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
| 996 | { |
| 997 | struct hwtstamp_config config; |
| 998 | struct gfar_private *priv = netdev_priv(netdev); |
| 999 | |
| 1000 | config.flags = 0; |
| 1001 | config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; |
| 1002 | config.rx_filter = (priv->hwts_rx_en ? |
| 1003 | HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); |
| 1004 | |
| 1005 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
| 1006 | -EFAULT : 0; |
| 1007 | } |
| 1008 | |
Clifford Wolf | 0faac9f | 2009-01-09 10:23:11 +0000 | [diff] [blame] | 1009 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 1010 | { |
| 1011 | struct gfar_private *priv = netdev_priv(dev); |
| 1012 | |
| 1013 | if (!netif_running(dev)) |
| 1014 | return -EINVAL; |
| 1015 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 1016 | if (cmd == SIOCSHWTSTAMP) |
Ben Hutchings | ca0c88c | 2013-11-18 23:05:27 +0000 | [diff] [blame] | 1017 | return gfar_hwtstamp_set(dev, rq); |
| 1018 | if (cmd == SIOCGHWTSTAMP) |
| 1019 | return gfar_hwtstamp_get(dev, rq); |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 1020 | |
Clifford Wolf | 0faac9f | 2009-01-09 10:23:11 +0000 | [diff] [blame] | 1021 | if (!priv->phydev) |
| 1022 | return -ENODEV; |
| 1023 | |
Richard Cochran | 28b0411 | 2010-07-17 08:48:55 +0000 | [diff] [blame] | 1024 | return phy_mii_ioctl(priv->phydev, rq, cmd); |
Clifford Wolf | 0faac9f | 2009-01-09 10:23:11 +0000 | [diff] [blame] | 1025 | } |
| 1026 | |
Anton Vorontsov | 18294ad | 2009-11-04 12:53:00 +0000 | [diff] [blame] | 1027 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
| 1028 | u32 class) |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1029 | { |
| 1030 | u32 rqfpr = FPR_FILER_MASK; |
| 1031 | u32 rqfcr = 0x0; |
| 1032 | |
| 1033 | rqfar--; |
| 1034 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 1035 | priv->ftp_rqfpr[rqfar] = rqfpr; |
| 1036 | priv->ftp_rqfcr[rqfar] = rqfcr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1037 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 1038 | |
| 1039 | rqfar--; |
| 1040 | rqfcr = RQFCR_CMP_NOMATCH; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 1041 | priv->ftp_rqfpr[rqfar] = rqfpr; |
| 1042 | priv->ftp_rqfcr[rqfar] = rqfcr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1043 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 1044 | |
| 1045 | rqfar--; |
| 1046 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; |
| 1047 | rqfpr = class; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 1048 | priv->ftp_rqfcr[rqfar] = rqfcr; |
| 1049 | priv->ftp_rqfpr[rqfar] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1050 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 1051 | |
| 1052 | rqfar--; |
| 1053 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; |
| 1054 | rqfpr = class; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 1055 | priv->ftp_rqfcr[rqfar] = rqfcr; |
| 1056 | priv->ftp_rqfpr[rqfar] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1057 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 1058 | |
| 1059 | return rqfar; |
| 1060 | } |
| 1061 | |
| 1062 | static void gfar_init_filer_table(struct gfar_private *priv) |
| 1063 | { |
| 1064 | int i = 0x0; |
| 1065 | u32 rqfar = MAX_FILER_IDX; |
| 1066 | u32 rqfcr = 0x0; |
| 1067 | u32 rqfpr = FPR_FILER_MASK; |
| 1068 | |
| 1069 | /* Default rule */ |
| 1070 | rqfcr = RQFCR_CMP_MATCH; |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 1071 | priv->ftp_rqfcr[rqfar] = rqfcr; |
| 1072 | priv->ftp_rqfpr[rqfar] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1073 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
| 1074 | |
| 1075 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); |
| 1076 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); |
| 1077 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); |
| 1078 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); |
| 1079 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); |
| 1080 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); |
| 1081 | |
Uwe Kleine-König | 85dd08e | 2010-06-11 12:16:55 +0200 | [diff] [blame] | 1082 | /* cur_filer_idx indicated the first non-masked rule */ |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1083 | priv->cur_filer_idx = rqfar; |
| 1084 | |
| 1085 | /* Rest are masked rules */ |
| 1086 | rqfcr = RQFCR_CMP_NOMATCH; |
| 1087 | for (i = 0; i < rqfar; i++) { |
Wu Jiajun-B06378 | 6c43e04 | 2011-06-07 21:46:51 +0000 | [diff] [blame] | 1088 | priv->ftp_rqfcr[i] = rqfcr; |
| 1089 | priv->ftp_rqfpr[i] = rqfpr; |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1090 | gfar_write_filer(priv, i, rqfcr, rqfpr); |
| 1091 | } |
| 1092 | } |
| 1093 | |
Claudiu Manoil | d6ef0bc | 2014-10-07 10:44:32 +0300 | [diff] [blame] | 1094 | #ifdef CONFIG_PPC |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1095 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1096 | { |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1097 | unsigned int pvr = mfspr(SPRN_PVR); |
| 1098 | unsigned int svr = mfspr(SPRN_SVR); |
| 1099 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ |
| 1100 | unsigned int rev = svr & 0xffff; |
| 1101 | |
| 1102 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ |
| 1103 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1104 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1105 | priv->errata |= GFAR_ERRATA_74; |
| 1106 | |
Anton Vorontsov | deb90ea | 2010-06-30 06:39:13 +0000 | [diff] [blame] | 1107 | /* MPC8313 and MPC837x all rev */ |
| 1108 | if ((pvr == 0x80850010 && mod == 0x80b0) || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1109 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
Anton Vorontsov | deb90ea | 2010-06-30 06:39:13 +0000 | [diff] [blame] | 1110 | priv->errata |= GFAR_ERRATA_76; |
| 1111 | |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1112 | /* MPC8313 Rev < 2.0 */ |
| 1113 | if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 1114 | priv->errata |= GFAR_ERRATA_12; |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1115 | } |
| 1116 | |
| 1117 | static void __gfar_detect_errata_85xx(struct gfar_private *priv) |
| 1118 | { |
| 1119 | unsigned int svr = mfspr(SPRN_SVR); |
| 1120 | |
| 1121 | if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) |
| 1122 | priv->errata |= GFAR_ERRATA_12; |
Claudiu Manoil | 53fad77 | 2013-10-09 20:20:42 +0300 | [diff] [blame] | 1123 | if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || |
| 1124 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) |
| 1125 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1126 | } |
Claudiu Manoil | d6ef0bc | 2014-10-07 10:44:32 +0300 | [diff] [blame] | 1127 | #endif |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1128 | |
| 1129 | static void gfar_detect_errata(struct gfar_private *priv) |
| 1130 | { |
| 1131 | struct device *dev = &priv->ofdev->dev; |
| 1132 | |
| 1133 | /* no plans to fix */ |
| 1134 | priv->errata |= GFAR_ERRATA_A002; |
| 1135 | |
Claudiu Manoil | d6ef0bc | 2014-10-07 10:44:32 +0300 | [diff] [blame] | 1136 | #ifdef CONFIG_PPC |
Claudiu Manoil | 2969b1f | 2013-10-09 20:20:41 +0300 | [diff] [blame] | 1137 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) |
| 1138 | __gfar_detect_errata_85xx(priv); |
| 1139 | else /* non-mpc85xx parts, i.e. e300 core based */ |
| 1140 | __gfar_detect_errata_83xx(priv); |
Claudiu Manoil | d6ef0bc | 2014-10-07 10:44:32 +0300 | [diff] [blame] | 1141 | #endif |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 1142 | |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1143 | if (priv->errata) |
| 1144 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", |
| 1145 | priv->errata); |
| 1146 | } |
| 1147 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1148 | void gfar_mac_reset(struct gfar_private *priv) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | { |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1150 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1151 | u32 tempval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | |
| 1153 | /* Reset MAC layer */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1154 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 | |
Andy Fleming | b98ac70 | 2009-02-04 16:38:05 -0800 | [diff] [blame] | 1156 | /* We need to delay at least 3 TX clocks */ |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1157 | udelay(3); |
Andy Fleming | b98ac70 | 2009-02-04 16:38:05 -0800 | [diff] [blame] | 1158 | |
Claudiu Manoil | 23402bd | 2013-08-12 13:53:26 +0300 | [diff] [blame] | 1159 | /* the soft reset bit is not self-resetting, so we need to |
| 1160 | * clear it before resuming normal operation |
| 1161 | */ |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1162 | gfar_write(®s->maccfg1, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1164 | udelay(3); |
| 1165 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 1166 | /* Compute rx_buff_size based on config flags */ |
| 1167 | gfar_rx_buff_size_config(priv); |
| 1168 | |
| 1169 | /* Initialize the max receive frame/buffer lengths */ |
| 1170 | gfar_write(®s->maxfrm, priv->rx_buffer_size); |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1171 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
| 1172 | |
| 1173 | /* Initialize the Minimum Frame Length Register */ |
| 1174 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
| 1175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | /* Initialize MACCFG2. */ |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1177 | tempval = MACCFG2_INIT_SETTINGS; |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 1178 | |
| 1179 | /* If the mtu is larger than the max size for standard |
| 1180 | * ethernet frames (ie, a jumbo frame), then set maccfg2 |
| 1181 | * to allow huge frames, and to check the length |
| 1182 | */ |
| 1183 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || |
| 1184 | gfar_has_errata(priv, GFAR_ERRATA_74)) |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1185 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 1186 | |
Anton Vorontsov | 7d35097 | 2010-06-30 06:39:12 +0000 | [diff] [blame] | 1187 | gfar_write(®s->maccfg2, tempval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1189 | /* Clear mac addr hash registers */ |
| 1190 | gfar_write(®s->igaddr0, 0); |
| 1191 | gfar_write(®s->igaddr1, 0); |
| 1192 | gfar_write(®s->igaddr2, 0); |
| 1193 | gfar_write(®s->igaddr3, 0); |
| 1194 | gfar_write(®s->igaddr4, 0); |
| 1195 | gfar_write(®s->igaddr5, 0); |
| 1196 | gfar_write(®s->igaddr6, 0); |
| 1197 | gfar_write(®s->igaddr7, 0); |
| 1198 | |
| 1199 | gfar_write(®s->gaddr0, 0); |
| 1200 | gfar_write(®s->gaddr1, 0); |
| 1201 | gfar_write(®s->gaddr2, 0); |
| 1202 | gfar_write(®s->gaddr3, 0); |
| 1203 | gfar_write(®s->gaddr4, 0); |
| 1204 | gfar_write(®s->gaddr5, 0); |
| 1205 | gfar_write(®s->gaddr6, 0); |
| 1206 | gfar_write(®s->gaddr7, 0); |
| 1207 | |
| 1208 | if (priv->extended_hash) |
| 1209 | gfar_clear_exact_match(priv->ndev); |
| 1210 | |
| 1211 | gfar_mac_rx_config(priv); |
| 1212 | |
| 1213 | gfar_mac_tx_config(priv); |
| 1214 | |
| 1215 | gfar_set_mac_address(priv->ndev); |
| 1216 | |
| 1217 | gfar_set_multi(priv->ndev); |
| 1218 | |
| 1219 | /* clear ievent and imask before configuring coalescing */ |
| 1220 | gfar_ints_disable(priv); |
| 1221 | |
| 1222 | /* Configure the coalescing support */ |
| 1223 | gfar_configure_coalescing_all(priv); |
| 1224 | } |
| 1225 | |
| 1226 | static void gfar_hw_init(struct gfar_private *priv) |
| 1227 | { |
| 1228 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 1229 | u32 attrs; |
| 1230 | |
| 1231 | /* Stop the DMA engine now, in case it was running before |
| 1232 | * (The firmware could have used it, and left it running). |
| 1233 | */ |
| 1234 | gfar_halt(priv); |
| 1235 | |
| 1236 | gfar_mac_reset(priv); |
| 1237 | |
| 1238 | /* Zero out the rmon mib registers if it has them */ |
| 1239 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
| 1240 | memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); |
| 1241 | |
| 1242 | /* Mask off the CAM interrupts */ |
| 1243 | gfar_write(®s->rmon.cam1, 0xffffffff); |
| 1244 | gfar_write(®s->rmon.cam2, 0xffffffff); |
| 1245 | } |
| 1246 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | /* Initialize ECNTRL */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1248 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | |
Claudiu Manoil | 34018fd | 2014-02-17 12:53:15 +0200 | [diff] [blame] | 1250 | /* Set the extraction length and index */ |
| 1251 | attrs = ATTRELI_EL(priv->rx_stash_size) | |
| 1252 | ATTRELI_EI(priv->rx_stash_index); |
| 1253 | |
| 1254 | gfar_write(®s->attreli, attrs); |
| 1255 | |
| 1256 | /* Start with defaults, and add stashing |
| 1257 | * depending on driver parameters |
| 1258 | */ |
| 1259 | attrs = ATTR_INIT_SETTINGS; |
| 1260 | |
| 1261 | if (priv->bd_stash_en) |
| 1262 | attrs |= ATTR_BDSTASH; |
| 1263 | |
| 1264 | if (priv->rx_stash_size != 0) |
| 1265 | attrs |= ATTR_BUFSTASH; |
| 1266 | |
| 1267 | gfar_write(®s->attr, attrs); |
| 1268 | |
| 1269 | /* FIFO configs */ |
| 1270 | gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); |
| 1271 | gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); |
| 1272 | gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); |
| 1273 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1274 | /* Program the interrupt steering regs, only for MG devices */ |
| 1275 | if (priv->num_grps > 1) |
| 1276 | gfar_write_isrg(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1277 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | |
Xiubo Li | 898157e | 2014-06-04 16:49:16 +0800 | [diff] [blame] | 1279 | static void gfar_init_addr_hash_table(struct gfar_private *priv) |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1280 | { |
| 1281 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1282 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1283 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1284 | priv->extended_hash = 1; |
| 1285 | priv->hash_width = 9; |
| 1286 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1287 | priv->hash_regs[0] = ®s->igaddr0; |
| 1288 | priv->hash_regs[1] = ®s->igaddr1; |
| 1289 | priv->hash_regs[2] = ®s->igaddr2; |
| 1290 | priv->hash_regs[3] = ®s->igaddr3; |
| 1291 | priv->hash_regs[4] = ®s->igaddr4; |
| 1292 | priv->hash_regs[5] = ®s->igaddr5; |
| 1293 | priv->hash_regs[6] = ®s->igaddr6; |
| 1294 | priv->hash_regs[7] = ®s->igaddr7; |
| 1295 | priv->hash_regs[8] = ®s->gaddr0; |
| 1296 | priv->hash_regs[9] = ®s->gaddr1; |
| 1297 | priv->hash_regs[10] = ®s->gaddr2; |
| 1298 | priv->hash_regs[11] = ®s->gaddr3; |
| 1299 | priv->hash_regs[12] = ®s->gaddr4; |
| 1300 | priv->hash_regs[13] = ®s->gaddr5; |
| 1301 | priv->hash_regs[14] = ®s->gaddr6; |
| 1302 | priv->hash_regs[15] = ®s->gaddr7; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1303 | |
| 1304 | } else { |
| 1305 | priv->extended_hash = 0; |
| 1306 | priv->hash_width = 8; |
| 1307 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1308 | priv->hash_regs[0] = ®s->gaddr0; |
| 1309 | priv->hash_regs[1] = ®s->gaddr1; |
| 1310 | priv->hash_regs[2] = ®s->gaddr2; |
| 1311 | priv->hash_regs[3] = ®s->gaddr3; |
| 1312 | priv->hash_regs[4] = ®s->gaddr4; |
| 1313 | priv->hash_regs[5] = ®s->gaddr5; |
| 1314 | priv->hash_regs[6] = ®s->gaddr6; |
| 1315 | priv->hash_regs[7] = ®s->gaddr7; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1316 | } |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1317 | } |
| 1318 | |
| 1319 | /* Set up the ethernet device structure, private data, |
| 1320 | * and anything else we need before we start |
| 1321 | */ |
| 1322 | static int gfar_probe(struct platform_device *ofdev) |
| 1323 | { |
| 1324 | struct net_device *dev = NULL; |
| 1325 | struct gfar_private *priv = NULL; |
| 1326 | int err = 0, i; |
| 1327 | |
| 1328 | err = gfar_of_init(ofdev, &dev); |
| 1329 | |
| 1330 | if (err) |
| 1331 | return err; |
| 1332 | |
| 1333 | priv = netdev_priv(dev); |
| 1334 | priv->ndev = dev; |
| 1335 | priv->ofdev = ofdev; |
| 1336 | priv->dev = &ofdev->dev; |
| 1337 | SET_NETDEV_DEV(dev, &ofdev->dev); |
| 1338 | |
| 1339 | spin_lock_init(&priv->bflock); |
| 1340 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
| 1341 | |
| 1342 | platform_set_drvdata(ofdev, priv); |
| 1343 | |
| 1344 | gfar_detect_errata(priv); |
| 1345 | |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1346 | /* Set the dev->base_addr to the gfar reg region */ |
| 1347 | dev->base_addr = (unsigned long) priv->gfargrp[0].regs; |
| 1348 | |
| 1349 | /* Fill in the dev structure */ |
| 1350 | dev->watchdog_timeo = TX_TIMEOUT; |
| 1351 | dev->mtu = 1500; |
| 1352 | dev->netdev_ops = &gfar_netdev_ops; |
| 1353 | dev->ethtool_ops = &gfar_ethtool_ops; |
| 1354 | |
| 1355 | /* Register for napi ...We are registering NAPI for each grp */ |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 1356 | for (i = 0; i < priv->num_grps; i++) { |
| 1357 | if (priv->poll_mode == GFAR_SQ_POLLING) { |
| 1358 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, |
| 1359 | gfar_poll_rx_sq, GFAR_DEV_WEIGHT); |
| 1360 | netif_napi_add(dev, &priv->gfargrp[i].napi_tx, |
| 1361 | gfar_poll_tx_sq, 2); |
| 1362 | } else { |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 1363 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, |
| 1364 | gfar_poll_rx, GFAR_DEV_WEIGHT); |
| 1365 | netif_napi_add(dev, &priv->gfargrp[i].napi_tx, |
| 1366 | gfar_poll_tx, 2); |
| 1367 | } |
| 1368 | } |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1369 | |
| 1370 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
| 1371 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
| 1372 | NETIF_F_RXCSUM; |
| 1373 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | |
| 1374 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
| 1375 | } |
| 1376 | |
| 1377 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
| 1378 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | |
| 1379 | NETIF_F_HW_VLAN_CTAG_RX; |
| 1380 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
| 1381 | } |
| 1382 | |
| 1383 | gfar_init_addr_hash_table(priv); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1384 | |
Claudiu Manoil | 532c37b | 2014-02-17 12:53:16 +0200 | [diff] [blame] | 1385 | /* Insert receive time stamps into padding alignment bytes */ |
| 1386 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
| 1387 | priv->padding = 8; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1388 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 1389 | if (dev->features & NETIF_F_IP_CSUM || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1390 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
Wu Jiajun-B06378 | bee9e58 | 2012-05-21 23:00:48 +0000 | [diff] [blame] | 1391 | dev->needed_headroom = GMAC_FCB_LEN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | |
| 1393 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1395 | /* Initializing some of the rx/tx queue level parameters */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1396 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 1397 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; |
| 1398 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; |
| 1399 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; |
| 1400 | priv->tx_queue[i]->txic = DEFAULT_TXIC; |
| 1401 | } |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1402 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1403 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1404 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; |
| 1405 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; |
| 1406 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
| 1407 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1409 | /* always enable rx filer */ |
Sebastian Poehn | 4aa3a71 | 2011-06-20 13:57:59 -0700 | [diff] [blame] | 1410 | priv->rx_filer_enable = 1; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1411 | /* Enable most messages by default */ |
| 1412 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
Claudiu Manoil | b98b8ba | 2012-09-23 22:39:08 +0000 | [diff] [blame] | 1413 | /* use pritority h/w tx queue scheduling for single queue devices */ |
| 1414 | if (priv->num_tx_queues == 1) |
| 1415 | priv->prio_sched_en = 1; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1416 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1417 | set_bit(GFAR_DOWN, &priv->state); |
| 1418 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1419 | gfar_hw_init(priv); |
Trent Piepho | d3eab82 | 2008-10-02 11:12:24 +0000 | [diff] [blame] | 1420 | |
Fabio Estevam | d4c642e | 2014-06-03 19:55:38 -0300 | [diff] [blame] | 1421 | /* Carrier starts down, phylib will bring it up */ |
| 1422 | netif_carrier_off(dev); |
| 1423 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 | err = register_netdev(dev); |
| 1425 | |
| 1426 | if (err) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1427 | pr_err("%s: Cannot register net device, aborting\n", dev->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 | goto register_fail; |
| 1429 | } |
| 1430 | |
Anton Vorontsov | 2884e5c | 2009-02-01 00:52:34 -0800 | [diff] [blame] | 1431 | device_init_wakeup(&dev->dev, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1432 | priv->device_flags & |
| 1433 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
Anton Vorontsov | 2884e5c | 2009-02-01 00:52:34 -0800 | [diff] [blame] | 1434 | |
Dai Haruki | c50a5d9 | 2008-12-17 16:51:32 -0800 | [diff] [blame] | 1435 | /* fill out IRQ number and name fields */ |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1436 | for (i = 0; i < priv->num_grps; i++) { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1437 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1438 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1439 | sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", |
Joe Perches | 0015e55 | 2012-03-25 07:10:07 +0000 | [diff] [blame] | 1440 | dev->name, "_g", '0' + i, "_tx"); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1441 | sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", |
Joe Perches | 0015e55 | 2012-03-25 07:10:07 +0000 | [diff] [blame] | 1442 | dev->name, "_g", '0' + i, "_rx"); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1443 | sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", |
Joe Perches | 0015e55 | 2012-03-25 07:10:07 +0000 | [diff] [blame] | 1444 | dev->name, "_g", '0' + i, "_er"); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1445 | } else |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1446 | strcpy(gfar_irq(grp, TX)->name, dev->name); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1447 | } |
Dai Haruki | c50a5d9 | 2008-12-17 16:51:32 -0800 | [diff] [blame] | 1448 | |
Sandeep Gopalpet | 7a8b337 | 2009-11-02 07:03:40 +0000 | [diff] [blame] | 1449 | /* Initialize the filer table */ |
| 1450 | gfar_init_filer_table(priv); |
| 1451 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 | /* Print out the device info */ |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1453 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1455 | /* Even more device info helps when determining which kernel |
| 1456 | * provided which set of benchmarks. |
| 1457 | */ |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1458 | netdev_info(dev, "Running with NAPI enabled\n"); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1459 | for (i = 0; i < priv->num_rx_queues; i++) |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1460 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", |
| 1461 | i, priv->rx_queue[i]->rx_ring_size); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1462 | for (i = 0; i < priv->num_tx_queues; i++) |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 1463 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", |
| 1464 | i, priv->tx_queue[i]->tx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1465 | |
| 1466 | return 0; |
| 1467 | |
| 1468 | register_fail: |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1469 | unmap_group_regs(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1470 | gfar_free_rx_queues(priv); |
| 1471 | gfar_free_tx_queues(priv); |
Uwe Kleine-König | 888c88b | 2014-08-07 21:20:12 +0200 | [diff] [blame] | 1472 | of_node_put(priv->phy_node); |
| 1473 | of_node_put(priv->tbi_node); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1474 | free_gfar_dev(priv); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1475 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | } |
| 1477 | |
Grant Likely | 2dc1158 | 2010-08-06 09:25:50 -0600 | [diff] [blame] | 1478 | static int gfar_remove(struct platform_device *ofdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | { |
Jingoo Han | 8513fbd | 2013-05-23 00:52:31 +0000 | [diff] [blame] | 1480 | struct gfar_private *priv = platform_get_drvdata(ofdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 | |
Uwe Kleine-König | 888c88b | 2014-08-07 21:20:12 +0200 | [diff] [blame] | 1482 | of_node_put(priv->phy_node); |
| 1483 | of_node_put(priv->tbi_node); |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1484 | |
David S. Miller | d9d8e04 | 2009-09-06 01:41:02 -0700 | [diff] [blame] | 1485 | unregister_netdev(priv->ndev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1486 | unmap_group_regs(priv); |
Claudiu Manoil | 2086278 | 2014-02-17 12:53:14 +0200 | [diff] [blame] | 1487 | gfar_free_rx_queues(priv); |
| 1488 | gfar_free_tx_queues(priv); |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 1489 | free_gfar_dev(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1490 | |
| 1491 | return 0; |
| 1492 | } |
| 1493 | |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1494 | #ifdef CONFIG_PM |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1495 | |
| 1496 | static int gfar_suspend(struct device *dev) |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1497 | { |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1498 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 1499 | struct net_device *ndev = priv->ndev; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1500 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1501 | unsigned long flags; |
| 1502 | u32 tempval; |
| 1503 | |
| 1504 | int magic_packet = priv->wol_en && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1505 | (priv->device_flags & |
| 1506 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1507 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1508 | netif_device_detach(ndev); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1509 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1510 | if (netif_running(ndev)) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1511 | |
| 1512 | local_irq_save(flags); |
| 1513 | lock_tx_qs(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1514 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1515 | gfar_halt_nodisable(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1516 | |
| 1517 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1518 | tempval = gfar_read(®s->maccfg1); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1519 | |
| 1520 | tempval &= ~MACCFG1_TX_EN; |
| 1521 | |
| 1522 | if (!magic_packet) |
| 1523 | tempval &= ~MACCFG1_RX_EN; |
| 1524 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1525 | gfar_write(®s->maccfg1, tempval); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1526 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1527 | unlock_tx_qs(priv); |
| 1528 | local_irq_restore(flags); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1529 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1530 | disable_napi(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1531 | |
| 1532 | if (magic_packet) { |
| 1533 | /* Enable interrupt on Magic Packet */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1534 | gfar_write(®s->imask, IMASK_MAG); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1535 | |
| 1536 | /* Enable Magic Packet mode */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1537 | tempval = gfar_read(®s->maccfg2); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1538 | tempval |= MACCFG2_MPEN; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1539 | gfar_write(®s->maccfg2, tempval); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1540 | } else { |
| 1541 | phy_stop(priv->phydev); |
| 1542 | } |
| 1543 | } |
| 1544 | |
| 1545 | return 0; |
| 1546 | } |
| 1547 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1548 | static int gfar_resume(struct device *dev) |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1549 | { |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1550 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 1551 | struct net_device *ndev = priv->ndev; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1552 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1553 | unsigned long flags; |
| 1554 | u32 tempval; |
| 1555 | int magic_packet = priv->wol_en && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1556 | (priv->device_flags & |
| 1557 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1558 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1559 | if (!netif_running(ndev)) { |
| 1560 | netif_device_attach(ndev); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1561 | return 0; |
| 1562 | } |
| 1563 | |
| 1564 | if (!magic_packet && priv->phydev) |
| 1565 | phy_start(priv->phydev); |
| 1566 | |
| 1567 | /* Disable Magic Packet mode, in case something |
| 1568 | * else woke us up. |
| 1569 | */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1570 | local_irq_save(flags); |
| 1571 | lock_tx_qs(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1572 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1573 | tempval = gfar_read(®s->maccfg2); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1574 | tempval &= ~MACCFG2_MPEN; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1575 | gfar_write(®s->maccfg2, tempval); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1576 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1577 | gfar_start(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1578 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1579 | unlock_tx_qs(priv); |
| 1580 | local_irq_restore(flags); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1581 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1582 | netif_device_attach(ndev); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1583 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1584 | enable_napi(priv); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1585 | |
| 1586 | return 0; |
| 1587 | } |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1588 | |
| 1589 | static int gfar_restore(struct device *dev) |
| 1590 | { |
| 1591 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 1592 | struct net_device *ndev = priv->ndev; |
| 1593 | |
Wang Dongsheng | 103cdd1 | 2012-11-09 04:43:51 +0000 | [diff] [blame] | 1594 | if (!netif_running(ndev)) { |
| 1595 | netif_device_attach(ndev); |
| 1596 | |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1597 | return 0; |
Wang Dongsheng | 103cdd1 | 2012-11-09 04:43:51 +0000 | [diff] [blame] | 1598 | } |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1599 | |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 1600 | if (gfar_init_bds(ndev)) { |
| 1601 | free_skb_resources(priv); |
| 1602 | return -ENOMEM; |
| 1603 | } |
| 1604 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 1605 | gfar_mac_reset(priv); |
| 1606 | |
| 1607 | gfar_init_tx_rx_base(priv); |
| 1608 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1609 | gfar_start(priv); |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1610 | |
| 1611 | priv->oldlink = 0; |
| 1612 | priv->oldspeed = 0; |
| 1613 | priv->oldduplex = -1; |
| 1614 | |
| 1615 | if (priv->phydev) |
| 1616 | phy_start(priv->phydev); |
| 1617 | |
| 1618 | netif_device_attach(ndev); |
Anton Vorontsov | 5ea681d | 2009-11-10 14:11:05 +0000 | [diff] [blame] | 1619 | enable_napi(priv); |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1620 | |
| 1621 | return 0; |
| 1622 | } |
| 1623 | |
| 1624 | static struct dev_pm_ops gfar_pm_ops = { |
| 1625 | .suspend = gfar_suspend, |
| 1626 | .resume = gfar_resume, |
| 1627 | .freeze = gfar_suspend, |
| 1628 | .thaw = gfar_resume, |
| 1629 | .restore = gfar_restore, |
| 1630 | }; |
| 1631 | |
| 1632 | #define GFAR_PM_OPS (&gfar_pm_ops) |
| 1633 | |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1634 | #else |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1635 | |
| 1636 | #define GFAR_PM_OPS NULL |
Anton Vorontsov | be926fc | 2009-10-12 06:00:42 +0000 | [diff] [blame] | 1637 | |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1638 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1639 | |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1640 | /* Reads the controller's registers to determine what interface |
| 1641 | * connects it to the PHY. |
| 1642 | */ |
| 1643 | static phy_interface_t gfar_get_interface(struct net_device *dev) |
| 1644 | { |
| 1645 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1646 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1647 | u32 ecntrl; |
| 1648 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1649 | ecntrl = gfar_read(®s->ecntrl); |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1650 | |
| 1651 | if (ecntrl & ECNTRL_SGMII_MODE) |
| 1652 | return PHY_INTERFACE_MODE_SGMII; |
| 1653 | |
| 1654 | if (ecntrl & ECNTRL_TBI_MODE) { |
| 1655 | if (ecntrl & ECNTRL_REDUCED_MODE) |
| 1656 | return PHY_INTERFACE_MODE_RTBI; |
| 1657 | else |
| 1658 | return PHY_INTERFACE_MODE_TBI; |
| 1659 | } |
| 1660 | |
| 1661 | if (ecntrl & ECNTRL_REDUCED_MODE) { |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1662 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1663 | return PHY_INTERFACE_MODE_RMII; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1664 | } |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1665 | else { |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1666 | phy_interface_t interface = priv->interface; |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1667 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1668 | /* This isn't autodetected right now, so it must |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1669 | * be set by the device tree or platform code. |
| 1670 | */ |
| 1671 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) |
| 1672 | return PHY_INTERFACE_MODE_RGMII_ID; |
| 1673 | |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1674 | return PHY_INTERFACE_MODE_RGMII; |
Andy Fleming | 7132ab7 | 2007-07-11 11:43:07 -0500 | [diff] [blame] | 1675 | } |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1676 | } |
| 1677 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1678 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1679 | return PHY_INTERFACE_MODE_GMII; |
| 1680 | |
| 1681 | return PHY_INTERFACE_MODE_MII; |
| 1682 | } |
| 1683 | |
| 1684 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1685 | /* Initializes driver's PHY state, and attaches to the PHY. |
| 1686 | * Returns 0 on success. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | */ |
| 1688 | static int init_phy(struct net_device *dev) |
| 1689 | { |
| 1690 | struct gfar_private *priv = netdev_priv(dev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1691 | uint gigabit_support = |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1692 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
Claudiu Manoil | 23402bd | 2013-08-12 13:53:26 +0300 | [diff] [blame] | 1693 | GFAR_SUPPORTED_GBIT : 0; |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1694 | phy_interface_t interface; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | |
| 1696 | priv->oldlink = 0; |
| 1697 | priv->oldspeed = 0; |
| 1698 | priv->oldduplex = -1; |
| 1699 | |
Andy Fleming | e8a2b6a | 2006-12-01 12:01:06 -0600 | [diff] [blame] | 1700 | interface = gfar_get_interface(dev); |
| 1701 | |
Anton Vorontsov | 1db780f | 2009-07-16 21:31:42 +0000 | [diff] [blame] | 1702 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
| 1703 | interface); |
Anton Vorontsov | 1db780f | 2009-07-16 21:31:42 +0000 | [diff] [blame] | 1704 | if (!priv->phydev) { |
| 1705 | dev_err(&dev->dev, "could not attach to PHY\n"); |
| 1706 | return -ENODEV; |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1707 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1708 | |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1709 | if (interface == PHY_INTERFACE_MODE_SGMII) |
| 1710 | gfar_configure_serdes(dev); |
| 1711 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1712 | /* Remove any features not supported by the controller */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1713 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
| 1714 | priv->phydev->advertising = priv->phydev->supported; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1715 | |
Pavaluca Matei-B46610 | cf987af | 2014-10-27 10:42:42 +0200 | [diff] [blame] | 1716 | /* Add support for flow control, but don't advertise it by default */ |
| 1717 | priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); |
| 1718 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | } |
| 1721 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1722 | /* Initialize TBI PHY interface for communicating with the |
Paul Gortmaker | d031358 | 2008-04-17 00:08:10 -0400 | [diff] [blame] | 1723 | * SERDES lynx PHY on the chip. We communicate with this PHY |
| 1724 | * through the MDIO bus on each controller, treating it as a |
| 1725 | * "normal" PHY at the address found in the TBIPA register. We assume |
| 1726 | * that the TBIPA register is valid. Either the MDIO bus code will set |
| 1727 | * it to a value that doesn't conflict with other PHYs on the bus, or the |
| 1728 | * value doesn't matter, as there are no other PHYs on the bus. |
| 1729 | */ |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1730 | static void gfar_configure_serdes(struct net_device *dev) |
| 1731 | { |
| 1732 | struct gfar_private *priv = netdev_priv(dev); |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1733 | struct phy_device *tbiphy; |
Trent Piepho | c132419 | 2008-10-30 18:17:06 -0700 | [diff] [blame] | 1734 | |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1735 | if (!priv->tbi_node) { |
| 1736 | dev_warn(&dev->dev, "error: SGMII mode requires that the " |
| 1737 | "device tree specify a tbi-handle\n"); |
| 1738 | return; |
| 1739 | } |
| 1740 | |
| 1741 | tbiphy = of_phy_find_device(priv->tbi_node); |
| 1742 | if (!tbiphy) { |
| 1743 | dev_err(&dev->dev, "error: Could not get TBI device\n"); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1744 | return; |
| 1745 | } |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1746 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1747 | /* If the link is already up, we must already be ok, and don't need to |
Trent Piepho | bdb59f9 | 2008-10-30 18:17:07 -0700 | [diff] [blame] | 1748 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
| 1749 | * everything for us? Resetting it takes the link down and requires |
| 1750 | * several seconds for it to come back. |
| 1751 | */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1752 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 1753 | return; |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1754 | |
Paul Gortmaker | d031358 | 2008-04-17 00:08:10 -0400 | [diff] [blame] | 1755 | /* Single clk mode, mii mode off(for serdes communication) */ |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1756 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1757 | |
Grant Likely | fe192a4 | 2009-04-25 12:53:12 +0000 | [diff] [blame] | 1758 | phy_write(tbiphy, MII_ADVERTISE, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1759 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
| 1760 | ADVERTISE_1000XPSE_ASYM); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1761 | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1762 | phy_write(tbiphy, MII_BMCR, |
| 1763 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | |
| 1764 | BMCR_SPEED1000); |
Kapil Juneja | d3c1287 | 2007-05-11 18:25:11 -0500 | [diff] [blame] | 1765 | } |
| 1766 | |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1767 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
| 1768 | { |
| 1769 | u32 res; |
| 1770 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1771 | /* Normaly TSEC should not hang on GRS commands, so we should |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1772 | * actually wait for IEVENT_GRSC flag. |
| 1773 | */ |
Claudiu Manoil | ad3660c | 2013-10-09 20:20:40 +0300 | [diff] [blame] | 1774 | if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1775 | return 0; |
| 1776 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1777 | /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1778 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
| 1779 | * and the Rx can be safely reset. |
| 1780 | */ |
| 1781 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); |
| 1782 | res &= 0x7f807f80; |
| 1783 | if ((res & 0xffff) == (res >> 16)) |
| 1784 | return 1; |
| 1785 | |
| 1786 | return 0; |
| 1787 | } |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1788 | |
| 1789 | /* Halt the receive and transmit queues */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1790 | static void gfar_halt_nodisable(struct gfar_private *priv) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | { |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 1792 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1793 | u32 tempval; |
Claudiu Manoil | a4feee8 | 2014-10-07 10:44:34 +0300 | [diff] [blame] | 1794 | unsigned int timeout; |
| 1795 | int stopped; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1796 | |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 1797 | gfar_ints_disable(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | |
Claudiu Manoil | a4feee8 | 2014-10-07 10:44:34 +0300 | [diff] [blame] | 1799 | if (gfar_is_dma_stopped(priv)) |
| 1800 | return; |
| 1801 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1802 | /* Stop the DMA, and wait for it to stop */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1803 | tempval = gfar_read(®s->dmactrl); |
Claudiu Manoil | a4feee8 | 2014-10-07 10:44:34 +0300 | [diff] [blame] | 1804 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
| 1805 | gfar_write(®s->dmactrl, tempval); |
Anton Vorontsov | 511d934 | 2010-06-30 06:39:15 +0000 | [diff] [blame] | 1806 | |
Claudiu Manoil | a4feee8 | 2014-10-07 10:44:34 +0300 | [diff] [blame] | 1807 | retry: |
| 1808 | timeout = 1000; |
| 1809 | while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { |
| 1810 | cpu_relax(); |
| 1811 | timeout--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 | } |
Claudiu Manoil | a4feee8 | 2014-10-07 10:44:34 +0300 | [diff] [blame] | 1813 | |
| 1814 | if (!timeout) |
| 1815 | stopped = gfar_is_dma_stopped(priv); |
| 1816 | |
| 1817 | if (!stopped && !gfar_is_rx_dma_stopped(priv) && |
| 1818 | !__gfar_is_rx_idle(priv)) |
| 1819 | goto retry; |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1820 | } |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1821 | |
| 1822 | /* Halt the receive and transmit queues */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1823 | void gfar_halt(struct gfar_private *priv) |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1824 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1825 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 1826 | u32 tempval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1828 | /* Dissable the Rx/Tx hw queues */ |
| 1829 | gfar_write(®s->rqueue, 0); |
| 1830 | gfar_write(®s->tqueue, 0); |
Scott Wood | 2a54adc | 2008-08-12 15:10:46 -0500 | [diff] [blame] | 1831 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1832 | mdelay(10); |
| 1833 | |
| 1834 | gfar_halt_nodisable(priv); |
| 1835 | |
| 1836 | /* Disable Rx/Tx DMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1837 | tempval = gfar_read(®s->maccfg1); |
| 1838 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); |
| 1839 | gfar_write(®s->maccfg1, tempval); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1840 | } |
| 1841 | |
| 1842 | void stop_gfar(struct net_device *dev) |
| 1843 | { |
| 1844 | struct gfar_private *priv = netdev_priv(dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1845 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1846 | netif_tx_stop_all_queues(dev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 1847 | |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 1848 | smp_mb__before_atomic(); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1849 | set_bit(GFAR_DOWN, &priv->state); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 1850 | smp_mb__after_atomic(); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1851 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1852 | disable_napi(priv); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1853 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1854 | /* disable ints and gracefully shut down Rx/Tx DMA */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1855 | gfar_halt(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1856 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 1857 | phy_stop(priv->phydev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1858 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 | free_skb_resources(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1860 | } |
| 1861 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1862 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1863 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1864 | struct txbd8 *txbdp; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1865 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1866 | int i, j; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1867 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1868 | txbdp = tx_queue->tx_bd_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1869 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1870 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
| 1871 | if (!tx_queue->tx_skbuff[i]) |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1872 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1873 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1874 | dma_unmap_single(priv->dev, txbdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1875 | txbdp->length, DMA_TO_DEVICE); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1876 | txbdp->lstatus = 0; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1877 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1878 | j++) { |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 1879 | txbdp++; |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1880 | dma_unmap_page(priv->dev, txbdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1881 | txbdp->length, DMA_TO_DEVICE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | } |
Andy Fleming | ad5da7a | 2008-05-07 13:20:55 -0500 | [diff] [blame] | 1883 | txbdp++; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1884 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
| 1885 | tx_queue->tx_skbuff[i] = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1886 | } |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1887 | kfree(tx_queue->tx_skbuff); |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 1888 | tx_queue->tx_skbuff = NULL; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1889 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1890 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1891 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
| 1892 | { |
| 1893 | struct rxbd8 *rxbdp; |
| 1894 | struct gfar_private *priv = netdev_priv(rx_queue->dev); |
| 1895 | int i; |
| 1896 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1897 | rxbdp = rx_queue->rx_bd_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1898 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1899 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
| 1900 | if (rx_queue->rx_skbuff[i]) { |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1901 | dma_unmap_single(priv->dev, rxbdp->bufPtr, |
| 1902 | priv->rx_buffer_size, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1903 | DMA_FROM_DEVICE); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1904 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
| 1905 | rx_queue->rx_skbuff[i] = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1906 | } |
Anton Vorontsov | e69edd2 | 2009-10-12 06:00:30 +0000 | [diff] [blame] | 1907 | rxbdp->lstatus = 0; |
| 1908 | rxbdp->bufPtr = 0; |
| 1909 | rxbdp++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1910 | } |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 1911 | kfree(rx_queue->rx_skbuff); |
Claudiu Manoil | 1eb8f7a | 2012-11-08 22:11:41 +0000 | [diff] [blame] | 1912 | rx_queue->rx_skbuff = NULL; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1913 | } |
Anton Vorontsov | e69edd2 | 2009-10-12 06:00:30 +0000 | [diff] [blame] | 1914 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1915 | /* If there are any tx skbs or rx skbs still around, free them. |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1916 | * Then free tx_skbuff and rx_skbuff |
| 1917 | */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1918 | static void free_skb_resources(struct gfar_private *priv) |
| 1919 | { |
| 1920 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 1921 | struct gfar_priv_rx_q *rx_queue = NULL; |
| 1922 | int i; |
| 1923 | |
| 1924 | /* Go through all the buffer descriptors and free their data buffers */ |
| 1925 | for (i = 0; i < priv->num_tx_queues; i++) { |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 1926 | struct netdev_queue *txq; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1927 | |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1928 | tx_queue = priv->tx_queue[i]; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 1929 | txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1930 | if (tx_queue->tx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1931 | free_skb_tx_queue(tx_queue); |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 1932 | netdev_tx_reset_queue(txq); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1933 | } |
| 1934 | |
| 1935 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1936 | rx_queue = priv->rx_queue[i]; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1937 | if (rx_queue->rx_skbuff) |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 1938 | free_skb_rx_queue(rx_queue); |
| 1939 | } |
| 1940 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 1941 | dma_free_coherent(priv->dev, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 1942 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
| 1943 | sizeof(struct rxbd8) * priv->total_rx_ring_size, |
| 1944 | priv->tx_queue[0]->tx_bd_base, |
| 1945 | priv->tx_queue[0]->tx_bd_dma_base); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1946 | } |
| 1947 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1948 | void gfar_start(struct gfar_private *priv) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1949 | { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1950 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1951 | u32 tempval; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1952 | int i = 0; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1953 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1954 | /* Enable Rx/Tx hw queues */ |
| 1955 | gfar_write(®s->rqueue, priv->rqueue); |
| 1956 | gfar_write(®s->tqueue, priv->tqueue); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1957 | |
| 1958 | /* Initialize DMACTRL to have WWR and WOP */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1959 | tempval = gfar_read(®s->dmactrl); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1960 | tempval |= DMACTRL_INIT_SETTINGS; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1961 | gfar_write(®s->dmactrl, tempval); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1962 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1963 | /* Make sure we aren't stopped */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1964 | tempval = gfar_read(®s->dmactrl); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1965 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 1966 | gfar_write(®s->dmactrl, tempval); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1967 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1968 | for (i = 0; i < priv->num_grps; i++) { |
| 1969 | regs = priv->gfargrp[i].regs; |
| 1970 | /* Clear THLT/RHLT, so that the DMA starts polling now */ |
| 1971 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); |
| 1972 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1973 | } |
Dai Haruki | 12dea57 | 2008-12-16 15:30:20 -0800 | [diff] [blame] | 1974 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1975 | /* Enable Rx/Tx DMA */ |
| 1976 | tempval = gfar_read(®s->maccfg1); |
| 1977 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); |
| 1978 | gfar_write(®s->maccfg1, tempval); |
| 1979 | |
Claudiu Manoil | efeddce | 2014-02-17 12:53:17 +0200 | [diff] [blame] | 1980 | gfar_ints_enable(priv); |
| 1981 | |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 1982 | priv->ndev->trans_start = jiffies; /* prevent tx timeout */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 1983 | } |
| 1984 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 1985 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
| 1986 | { |
| 1987 | free_irq(gfar_irq(grp, TX)->irq, grp); |
| 1988 | free_irq(gfar_irq(grp, RX)->irq, grp); |
| 1989 | free_irq(gfar_irq(grp, ER)->irq, grp); |
| 1990 | } |
| 1991 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 1992 | static int register_grp_irqs(struct gfar_priv_grp *grp) |
| 1993 | { |
| 1994 | struct gfar_private *priv = grp->priv; |
| 1995 | struct net_device *dev = priv->ndev; |
Anton Vorontsov | ccc05c6 | 2009-10-12 06:00:26 +0000 | [diff] [blame] | 1996 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1997 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1998 | /* If the device has multiple interrupts, register for |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 1999 | * them. Otherwise, only register for the one |
| 2000 | */ |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 2001 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2002 | /* Install our interrupt handlers for Error, |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2003 | * Transmit, and Receive |
| 2004 | */ |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2005 | err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
| 2006 | gfar_irq(grp, ER)->name, grp); |
| 2007 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2008 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2009 | gfar_irq(grp, ER)->irq); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2010 | |
Julia Lawall | 2145f1a | 2010-08-05 10:26:20 +0000 | [diff] [blame] | 2011 | goto err_irq_fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2012 | } |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2013 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
| 2014 | gfar_irq(grp, TX)->name, grp); |
| 2015 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2016 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2017 | gfar_irq(grp, TX)->irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | goto tx_irq_fail; |
| 2019 | } |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2020 | err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, |
| 2021 | gfar_irq(grp, RX)->name, grp); |
| 2022 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2023 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2024 | gfar_irq(grp, RX)->irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 | goto rx_irq_fail; |
| 2026 | } |
| 2027 | } else { |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2028 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
| 2029 | gfar_irq(grp, TX)->name, grp); |
| 2030 | if (err < 0) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2031 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2032 | gfar_irq(grp, TX)->irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2033 | goto err_irq_fail; |
| 2034 | } |
| 2035 | } |
| 2036 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2037 | return 0; |
| 2038 | |
| 2039 | rx_irq_fail: |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2040 | free_irq(gfar_irq(grp, TX)->irq, grp); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2041 | tx_irq_fail: |
Claudiu Manoil | ee873fd | 2013-01-29 03:55:12 +0000 | [diff] [blame] | 2042 | free_irq(gfar_irq(grp, ER)->irq, grp); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2043 | err_irq_fail: |
| 2044 | return err; |
| 2045 | |
| 2046 | } |
| 2047 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 2048 | static void gfar_free_irq(struct gfar_private *priv) |
| 2049 | { |
| 2050 | int i; |
| 2051 | |
| 2052 | /* Free the IRQs */ |
| 2053 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
| 2054 | for (i = 0; i < priv->num_grps; i++) |
| 2055 | free_grp_irqs(&priv->gfargrp[i]); |
| 2056 | } else { |
| 2057 | for (i = 0; i < priv->num_grps; i++) |
| 2058 | free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, |
| 2059 | &priv->gfargrp[i]); |
| 2060 | } |
| 2061 | } |
| 2062 | |
| 2063 | static int gfar_request_irq(struct gfar_private *priv) |
| 2064 | { |
| 2065 | int err, i, j; |
| 2066 | |
| 2067 | for (i = 0; i < priv->num_grps; i++) { |
| 2068 | err = register_grp_irqs(&priv->gfargrp[i]); |
| 2069 | if (err) { |
| 2070 | for (j = 0; j < i; j++) |
| 2071 | free_grp_irqs(&priv->gfargrp[j]); |
| 2072 | return err; |
| 2073 | } |
| 2074 | } |
| 2075 | |
| 2076 | return 0; |
| 2077 | } |
| 2078 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2079 | /* Bring the controller up and running */ |
| 2080 | int startup_gfar(struct net_device *ndev) |
| 2081 | { |
| 2082 | struct gfar_private *priv = netdev_priv(ndev); |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 2083 | int err; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2084 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 2085 | gfar_mac_reset(priv); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2086 | |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2087 | err = gfar_alloc_skb_resources(ndev); |
| 2088 | if (err) |
| 2089 | return err; |
| 2090 | |
Claudiu Manoil | a328ac9 | 2014-02-24 12:13:42 +0200 | [diff] [blame] | 2091 | gfar_init_tx_rx_base(priv); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2092 | |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 2093 | smp_mb__before_atomic(); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2094 | clear_bit(GFAR_DOWN, &priv->state); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 2095 | smp_mb__after_atomic(); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2096 | |
| 2097 | /* Start Rx/Tx DMA and enable the interrupts */ |
Claudiu Manoil | c10650b | 2014-02-17 12:53:18 +0200 | [diff] [blame] | 2098 | gfar_start(priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 | |
Anton Vorontsov | 826aa4a | 2009-10-12 06:00:34 +0000 | [diff] [blame] | 2100 | phy_start(priv->phydev); |
| 2101 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2102 | enable_napi(priv); |
| 2103 | |
| 2104 | netif_tx_wake_all_queues(ndev); |
| 2105 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2106 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2107 | } |
| 2108 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2109 | /* Called when something needs to use the ethernet device |
| 2110 | * Returns 0 for success. |
| 2111 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2112 | static int gfar_enet_open(struct net_device *dev) |
| 2113 | { |
Li Yang | 94e8cc3 | 2007-10-12 21:53:51 +0800 | [diff] [blame] | 2114 | struct gfar_private *priv = netdev_priv(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2115 | int err; |
| 2116 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2117 | err = init_phy(dev); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2118 | if (err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2119 | return err; |
| 2120 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 2121 | err = gfar_request_irq(priv); |
| 2122 | if (err) |
| 2123 | return err; |
| 2124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2125 | err = startup_gfar(dev); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2126 | if (err) |
Anton Vorontsov | db0e8e3 | 2007-10-17 23:57:46 +0400 | [diff] [blame] | 2127 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2128 | |
Anton Vorontsov | 2884e5c | 2009-02-01 00:52:34 -0800 | [diff] [blame] | 2129 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
| 2130 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2131 | return err; |
| 2132 | } |
| 2133 | |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2134 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2135 | { |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2136 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
Kumar Gala | 6c31d55 | 2009-04-28 08:04:10 -0700 | [diff] [blame] | 2137 | |
| 2138 | memset(fcb, 0, GMAC_FCB_LEN); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2139 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2140 | return fcb; |
| 2141 | } |
| 2142 | |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2143 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2144 | int fcb_length) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2145 | { |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2146 | /* If we're here, it's a IP packet with a TCP or UDP |
| 2147 | * payload. We set it to checksum, using a pseudo-header |
| 2148 | * we provide |
| 2149 | */ |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 2150 | u8 flags = TXFCB_DEFAULT; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2151 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2152 | /* Tell the controller what the protocol is |
| 2153 | * And provide the already calculated phcs |
| 2154 | */ |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 2155 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2156 | flags |= TXFCB_UDP; |
Arnaldo Carvalho de Melo | 4bedb45 | 2007-03-13 14:28:48 -0300 | [diff] [blame] | 2157 | fcb->phcs = udp_hdr(skb)->check; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2158 | } else |
Kumar Gala | 8da32de | 2007-06-29 00:12:04 -0500 | [diff] [blame] | 2159 | fcb->phcs = tcp_hdr(skb)->check; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2160 | |
| 2161 | /* l3os is the distance between the start of the |
| 2162 | * frame (skb->data) and the start of the IP hdr. |
| 2163 | * l4os is the distance between the start of the |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2164 | * l3 hdr and the l4 hdr |
| 2165 | */ |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2166 | fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); |
Arnaldo Carvalho de Melo | cfe1fc7 | 2007-03-16 17:26:39 -0300 | [diff] [blame] | 2167 | fcb->l4os = skb_network_header_len(skb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2168 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2169 | fcb->flags = flags; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2170 | } |
| 2171 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2172 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2173 | { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2174 | fcb->flags |= TXFCB_VLN; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2175 | fcb->vlctl = vlan_tx_tag_get(skb); |
| 2176 | } |
| 2177 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2178 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2179 | struct txbd8 *base, int ring_size) |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2180 | { |
| 2181 | struct txbd8 *new_bd = bdp + stride; |
| 2182 | |
| 2183 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; |
| 2184 | } |
| 2185 | |
| 2186 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2187 | int ring_size) |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2188 | { |
| 2189 | return skip_txbd(bdp, 1, base, ring_size); |
| 2190 | } |
| 2191 | |
Claudiu Manoil | 02d88fb | 2013-08-05 17:20:09 +0300 | [diff] [blame] | 2192 | /* eTSEC12: csum generation not supported for some fcb offsets */ |
| 2193 | static inline bool gfar_csum_errata_12(struct gfar_private *priv, |
| 2194 | unsigned long fcb_addr) |
| 2195 | { |
| 2196 | return (gfar_has_errata(priv, GFAR_ERRATA_12) && |
| 2197 | (fcb_addr % 0x20) > 0x18); |
| 2198 | } |
| 2199 | |
| 2200 | /* eTSEC76: csum generation for frames larger than 2500 may |
| 2201 | * cause excess delays before start of transmission |
| 2202 | */ |
| 2203 | static inline bool gfar_csum_errata_76(struct gfar_private *priv, |
| 2204 | unsigned int len) |
| 2205 | { |
| 2206 | return (gfar_has_errata(priv, GFAR_ERRATA_76) && |
| 2207 | (len > 2500)); |
| 2208 | } |
| 2209 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2210 | /* This is called by the kernel when a frame is ready for transmission. |
| 2211 | * It is pointed to by the dev->hard_start_xmit function pointer |
| 2212 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2213 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 2214 | { |
| 2215 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2216 | struct gfar_priv_tx_q *tx_queue = NULL; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2217 | struct netdev_queue *txq; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 2218 | struct gfar __iomem *regs = NULL; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2219 | struct txfcb *fcb = NULL; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2220 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
Dai Haruki | 5a5efed | 2008-12-16 15:34:50 -0800 | [diff] [blame] | 2221 | u32 lstatus; |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2222 | int i, rq = 0; |
| 2223 | int do_tstamp, do_csum, do_vlan; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2224 | u32 bufaddr; |
Andy Fleming | fef6108 | 2006-04-20 16:44:29 -0500 | [diff] [blame] | 2225 | unsigned long flags; |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2226 | unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2227 | |
| 2228 | rq = skb->queue_mapping; |
| 2229 | tx_queue = priv->tx_queue[rq]; |
| 2230 | txq = netdev_get_tx_queue(dev, rq); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2231 | base = tx_queue->tx_bd_base; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2232 | regs = tx_queue->grp->regs; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2233 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2234 | do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
| 2235 | do_vlan = vlan_tx_tag_present(skb); |
| 2236 | do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
| 2237 | priv->hwts_tx_en; |
| 2238 | |
| 2239 | if (do_csum || do_vlan) |
| 2240 | fcb_len = GMAC_FCB_LEN; |
| 2241 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2242 | /* check if time stamp should be generated */ |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2243 | if (unlikely(do_tstamp)) |
| 2244 | fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2245 | |
Li Yang | 5b28bea | 2009-03-27 15:54:30 -0700 | [diff] [blame] | 2246 | /* make space for additional header when fcb is needed */ |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2247 | if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2248 | struct sk_buff *skb_new; |
| 2249 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2250 | skb_new = skb_realloc_headroom(skb, fcb_len); |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2251 | if (!skb_new) { |
| 2252 | dev->stats.tx_errors++; |
Eric W. Biederman | c9974ad | 2014-03-11 14:20:26 -0700 | [diff] [blame] | 2253 | dev_kfree_skb_any(skb); |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2254 | return NETDEV_TX_OK; |
| 2255 | } |
Manfred Rudigier | db83d13 | 2012-01-09 23:26:50 +0000 | [diff] [blame] | 2256 | |
Eric Dumazet | 313b037 | 2012-07-05 11:45:13 +0000 | [diff] [blame] | 2257 | if (skb->sk) |
| 2258 | skb_set_owner_w(skb_new, skb->sk); |
Eric W. Biederman | c9974ad | 2014-03-11 14:20:26 -0700 | [diff] [blame] | 2259 | dev_consume_skb_any(skb); |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2260 | skb = skb_new; |
| 2261 | } |
| 2262 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2263 | /* total number of fragments in the SKB */ |
| 2264 | nr_frags = skb_shinfo(skb)->nr_frags; |
| 2265 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2266 | /* calculate the required number of TxBDs for this skb */ |
| 2267 | if (unlikely(do_tstamp)) |
| 2268 | nr_txbds = nr_frags + 2; |
| 2269 | else |
| 2270 | nr_txbds = nr_frags + 1; |
| 2271 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2272 | /* check if there is space to queue this packet */ |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2273 | if (nr_txbds > tx_queue->num_txbdfree) { |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2274 | /* no space, stop the queue */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2275 | netif_tx_stop_queue(txq); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2276 | dev->stats.tx_fifo_errors++; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2277 | return NETDEV_TX_BUSY; |
| 2278 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2279 | |
| 2280 | /* Update transmit stats */ |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2281 | bytes_sent = skb->len; |
| 2282 | tx_queue->stats.tx_bytes += bytes_sent; |
| 2283 | /* keep Tx bytes on wire for BQL accounting */ |
| 2284 | GFAR_CB(skb)->bytes_sent = bytes_sent; |
Eric Dumazet | 1ac9ad1 | 2011-01-12 12:13:14 +0000 | [diff] [blame] | 2285 | tx_queue->stats.tx_packets++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2286 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2287 | txbdp = txbdp_start = tx_queue->cur_tx; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2288 | lstatus = txbdp->lstatus; |
| 2289 | |
| 2290 | /* Time stamp insertion requires one additional TxBD */ |
| 2291 | if (unlikely(do_tstamp)) |
| 2292 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2293 | tx_queue->tx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2294 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2295 | if (nr_frags == 0) { |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2296 | if (unlikely(do_tstamp)) |
| 2297 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2298 | TXBD_INTERRUPT); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2299 | else |
| 2300 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2301 | } else { |
| 2302 | /* Place the fragment addresses and lengths into the TxBDs */ |
| 2303 | for (i = 0; i < nr_frags; i++) { |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2304 | unsigned int frag_len; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2305 | /* Point at the next BD, wrapping as needed */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2306 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2307 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2308 | frag_len = skb_shinfo(skb)->frags[i].size; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2309 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2310 | lstatus = txbdp->lstatus | frag_len | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2311 | BD_LFLAG(TXBD_READY); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2312 | |
| 2313 | /* Handle the last BD specially */ |
| 2314 | if (i == nr_frags - 1) |
| 2315 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
| 2316 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2317 | bufaddr = skb_frag_dma_map(priv->dev, |
Ian Campbell | 2234a72 | 2011-08-29 23:18:29 +0000 | [diff] [blame] | 2318 | &skb_shinfo(skb)->frags[i], |
| 2319 | 0, |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2320 | frag_len, |
Ian Campbell | 2234a72 | 2011-08-29 23:18:29 +0000 | [diff] [blame] | 2321 | DMA_TO_DEVICE); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2322 | |
| 2323 | /* set the TxBD length and buffer pointer */ |
| 2324 | txbdp->bufPtr = bufaddr; |
| 2325 | txbdp->lstatus = lstatus; |
| 2326 | } |
| 2327 | |
| 2328 | lstatus = txbdp_start->lstatus; |
| 2329 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2330 | |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2331 | /* Add TxPAL between FCB and frame if required */ |
| 2332 | if (unlikely(do_tstamp)) { |
| 2333 | skb_push(skb, GMAC_TXPAL_LEN); |
| 2334 | memset(skb->data, 0, GMAC_TXPAL_LEN); |
| 2335 | } |
| 2336 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2337 | /* Add TxFCB if required */ |
| 2338 | if (fcb_len) { |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2339 | fcb = gfar_add_fcb(skb); |
Claudiu Manoil | 02d88fb | 2013-08-05 17:20:09 +0300 | [diff] [blame] | 2340 | lstatus |= BD_LFLAG(TXBD_TOE); |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2341 | } |
| 2342 | |
| 2343 | /* Set up checksumming */ |
| 2344 | if (do_csum) { |
| 2345 | gfar_tx_checksum(skb, fcb, fcb_len); |
Claudiu Manoil | 02d88fb | 2013-08-05 17:20:09 +0300 | [diff] [blame] | 2346 | |
| 2347 | if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || |
| 2348 | unlikely(gfar_csum_errata_76(priv, skb->len))) { |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 2349 | __skb_pull(skb, GMAC_FCB_LEN); |
| 2350 | skb_checksum_help(skb); |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2351 | if (do_vlan || do_tstamp) { |
| 2352 | /* put back a new fcb for vlan/tstamp TOE */ |
| 2353 | fcb = gfar_add_fcb(skb); |
| 2354 | } else { |
| 2355 | /* Tx TOE not used */ |
| 2356 | lstatus &= ~(BD_LFLAG(TXBD_TOE)); |
| 2357 | fcb = NULL; |
| 2358 | } |
Alex Dubov | 4363c2fdd | 2011-03-16 17:57:13 +0000 | [diff] [blame] | 2359 | } |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2360 | } |
| 2361 | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2362 | if (do_vlan) |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2363 | gfar_tx_vlan(skb, fcb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2364 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2365 | /* Setup tx hardware time stamping if requested */ |
| 2366 | if (unlikely(do_tstamp)) { |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2367 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2368 | fcb->ptp = 1; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2369 | } |
| 2370 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2371 | txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2372 | skb_headlen(skb), DMA_TO_DEVICE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2373 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2374 | /* If time stamping is requested one additional TxBD must be set up. The |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2375 | * first TxBD points to the FCB and must have a data length of |
| 2376 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with |
| 2377 | * the full frame length. |
| 2378 | */ |
| 2379 | if (unlikely(do_tstamp)) { |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2380 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2381 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | |
Claudiu Manoil | 0d0cffd | 2013-08-05 17:20:10 +0300 | [diff] [blame] | 2382 | (skb_headlen(skb) - fcb_len); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2383 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
| 2384 | } else { |
| 2385 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
| 2386 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2387 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2388 | netdev_tx_sent_queue(txq, bytes_sent); |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2389 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2390 | /* We can work in parallel with gfar_clean_tx_ring(), except |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2391 | * when modifying num_txbdfree. Note that we didn't grab the lock |
| 2392 | * when we were reading the num_txbdfree and checking for available |
| 2393 | * space, that's because outside of this function it can only grow, |
| 2394 | * and once we've got needed space, it cannot suddenly disappear. |
| 2395 | * |
| 2396 | * The lock also protects us from gfar_error(), which can modify |
| 2397 | * regs->tstat and thus retrigger the transfers, which is why we |
| 2398 | * also must grab the lock before setting ready bit for the first |
| 2399 | * to be transmitted BD. |
| 2400 | */ |
| 2401 | spin_lock_irqsave(&tx_queue->txlock, flags); |
| 2402 | |
Claudiu Manoil | d55398b | 2014-10-07 10:44:35 +0300 | [diff] [blame] | 2403 | gfar_wmb(); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2404 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2405 | txbdp_start->lstatus = lstatus; |
| 2406 | |
Claudiu Manoil | d55398b | 2014-10-07 10:44:35 +0300 | [diff] [blame] | 2407 | gfar_wmb(); /* force lstatus write before tx_skbuff */ |
Anton Vorontsov | 0eddba5 | 2010-03-03 08:18:58 +0000 | [diff] [blame] | 2408 | |
| 2409 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
| 2410 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2411 | /* Update the current skb pointer to the next entry we will use |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2412 | * (wrapping if necessary) |
| 2413 | */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2414 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2415 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2416 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2417 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2418 | |
| 2419 | /* reduce TxBD free count */ |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2420 | tx_queue->num_txbdfree -= (nr_txbds); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2421 | |
| 2422 | /* If the next BD still needs to be cleaned up, then the bds |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2423 | * are full. We need to tell the kernel to stop sending us stuff. |
| 2424 | */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2425 | if (!tx_queue->num_txbdfree) { |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2426 | netif_tx_stop_queue(txq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2427 | |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 2428 | dev->stats.tx_fifo_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2429 | } |
| 2430 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2431 | /* Tell the DMA to go go go */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2432 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2433 | |
| 2434 | /* Unlock priv */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2435 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2436 | |
Stephen Hemminger | 54dc79f | 2009-03-27 00:38:45 -0700 | [diff] [blame] | 2437 | return NETDEV_TX_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2438 | } |
| 2439 | |
| 2440 | /* Stops the kernel queue, and halts the controller */ |
| 2441 | static int gfar_close(struct net_device *dev) |
| 2442 | { |
| 2443 | struct gfar_private *priv = netdev_priv(dev); |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2444 | |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2445 | cancel_work_sync(&priv->reset_task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2446 | stop_gfar(dev); |
| 2447 | |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 2448 | /* Disconnect from the PHY */ |
| 2449 | phy_disconnect(priv->phydev); |
| 2450 | priv->phydev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2451 | |
Claudiu Manoil | 80ec396 | 2014-02-24 12:13:44 +0200 | [diff] [blame] | 2452 | gfar_free_irq(priv); |
| 2453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2454 | return 0; |
| 2455 | } |
| 2456 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2457 | /* Changes the mac address if the controller is not running. */ |
Andy Fleming | f162b9d | 2008-05-02 13:00:30 -0500 | [diff] [blame] | 2458 | static int gfar_set_mac_address(struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2459 | { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2460 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2461 | |
| 2462 | return 0; |
| 2463 | } |
| 2464 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2465 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
| 2466 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2467 | struct gfar_private *priv = netdev_priv(dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2468 | int frame_size = new_mtu + ETH_HLEN; |
| 2469 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2470 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2471 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2472 | return -EINVAL; |
| 2473 | } |
| 2474 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2475 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
| 2476 | cpu_relax(); |
| 2477 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 2478 | if (dev->flags & IFF_UP) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2479 | stop_gfar(dev); |
| 2480 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2481 | dev->mtu = new_mtu; |
| 2482 | |
Claudiu Manoil | 8830264 | 2014-02-24 12:13:43 +0200 | [diff] [blame] | 2483 | if (dev->flags & IFF_UP) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2484 | startup_gfar(dev); |
| 2485 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2486 | clear_bit_unlock(GFAR_RESETTING, &priv->state); |
| 2487 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 | return 0; |
| 2489 | } |
| 2490 | |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2491 | void reset_gfar(struct net_device *ndev) |
| 2492 | { |
| 2493 | struct gfar_private *priv = netdev_priv(ndev); |
| 2494 | |
| 2495 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
| 2496 | cpu_relax(); |
| 2497 | |
| 2498 | stop_gfar(ndev); |
| 2499 | startup_gfar(ndev); |
| 2500 | |
| 2501 | clear_bit_unlock(GFAR_RESETTING, &priv->state); |
| 2502 | } |
| 2503 | |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2504 | /* gfar_reset_task gets scheduled when a packet has not been |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2505 | * transmitted after a set amount of time. |
| 2506 | * For now, assume that clearing out all the structures, and |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2507 | * starting over will fix the problem. |
| 2508 | */ |
| 2509 | static void gfar_reset_task(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2510 | { |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2511 | struct gfar_private *priv = container_of(work, struct gfar_private, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2512 | reset_task); |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2513 | reset_gfar(priv->ndev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2514 | } |
| 2515 | |
Sebastian Siewior | ab93990 | 2008-08-19 21:12:45 +0200 | [diff] [blame] | 2516 | static void gfar_timeout(struct net_device *dev) |
| 2517 | { |
| 2518 | struct gfar_private *priv = netdev_priv(dev); |
| 2519 | |
| 2520 | dev->stats.tx_errors++; |
| 2521 | schedule_work(&priv->reset_task); |
| 2522 | } |
| 2523 | |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2524 | static void gfar_align_skb(struct sk_buff *skb) |
| 2525 | { |
| 2526 | /* We need the data buffer to be aligned properly. We will reserve |
| 2527 | * as many bytes as needed to align the data properly |
| 2528 | */ |
| 2529 | skb_reserve(skb, RXBUF_ALIGNMENT - |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2530 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2531 | } |
| 2532 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2533 | /* Interrupt Handler for Transmit complete */ |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2534 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2535 | { |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2536 | struct net_device *dev = tx_queue->dev; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2537 | struct netdev_queue *txq; |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2538 | struct gfar_private *priv = netdev_priv(dev); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2539 | struct txbd8 *bdp, *next = NULL; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2540 | struct txbd8 *lbdp = NULL; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2541 | struct txbd8 *base = tx_queue->tx_bd_base; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2542 | struct sk_buff *skb; |
| 2543 | int skb_dirtytx; |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2544 | int tx_ring_size = tx_queue->tx_ring_size; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2545 | int frags = 0, nr_txbds = 0; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2546 | int i; |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2547 | int howmany = 0; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2548 | int tqi = tx_queue->qindex; |
| 2549 | unsigned int bytes_sent = 0; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2550 | u32 lstatus; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2551 | size_t buflen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2552 | |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2553 | txq = netdev_get_tx_queue(dev, tqi); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2554 | bdp = tx_queue->dirty_tx; |
| 2555 | skb_dirtytx = tx_queue->skb_dirtytx; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2556 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2557 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2558 | unsigned long flags; |
| 2559 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2560 | frags = skb_shinfo(skb)->nr_frags; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2561 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2562 | /* When time stamping, one additional TxBD must be freed. |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2563 | * Also, we need to dma_unmap_single() the TxPAL. |
| 2564 | */ |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2565 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2566 | nr_txbds = frags + 2; |
| 2567 | else |
| 2568 | nr_txbds = frags + 1; |
| 2569 | |
| 2570 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2571 | |
| 2572 | lstatus = lbdp->lstatus; |
| 2573 | |
| 2574 | /* Only clean completed frames */ |
| 2575 | if ((lstatus & BD_LFLAG(TXBD_READY)) && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2576 | (lstatus & BD_LENGTH_MASK)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2577 | break; |
| 2578 | |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2579 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2580 | next = next_txbd(bdp, base, tx_ring_size); |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2581 | buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2582 | } else |
| 2583 | buflen = bdp->length; |
| 2584 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2585 | dma_unmap_single(priv->dev, bdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2586 | buflen, DMA_TO_DEVICE); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2587 | |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2588 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2589 | struct skb_shared_hwtstamps shhwtstamps; |
| 2590 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2591 | |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2592 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
| 2593 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); |
Manfred Rudigier | 9c4886e | 2012-01-09 23:26:51 +0000 | [diff] [blame] | 2594 | skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2595 | skb_tstamp_tx(skb, &shhwtstamps); |
| 2596 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
| 2597 | bdp = next; |
| 2598 | } |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2599 | |
| 2600 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
| 2601 | bdp = next_txbd(bdp, base, tx_ring_size); |
| 2602 | |
| 2603 | for (i = 0; i < frags; i++) { |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2604 | dma_unmap_page(priv->dev, bdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2605 | bdp->length, DMA_TO_DEVICE); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2606 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
| 2607 | bdp = next_txbd(bdp, base, tx_ring_size); |
| 2608 | } |
| 2609 | |
Claudiu Manoil | 50ad076 | 2013-08-30 15:01:15 +0300 | [diff] [blame] | 2610 | bytes_sent += GFAR_CB(skb)->bytes_sent; |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2611 | |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2612 | dev_kfree_skb_any(skb); |
Andy Fleming | 0fd56bb | 2009-02-04 16:43:16 -0800 | [diff] [blame] | 2613 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2614 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2615 | |
| 2616 | skb_dirtytx = (skb_dirtytx + 1) & |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2617 | TX_RING_MOD_MASK(tx_ring_size); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2618 | |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2619 | howmany++; |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2620 | spin_lock_irqsave(&tx_queue->txlock, flags); |
Manfred Rudigier | f0ee7ac | 2010-04-08 23:10:35 +0000 | [diff] [blame] | 2621 | tx_queue->num_txbdfree += nr_txbds; |
Anton Vorontsov | a3bc1f1 | 2009-11-10 14:11:10 +0000 | [diff] [blame] | 2622 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2623 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2624 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2625 | /* If we freed a buffer, we can restart transmission, if necessary */ |
Claudiu Manoil | 0851133 | 2014-02-24 12:13:45 +0200 | [diff] [blame] | 2626 | if (tx_queue->num_txbdfree && |
| 2627 | netif_tx_queue_stopped(txq) && |
| 2628 | !(test_bit(GFAR_DOWN, &priv->state))) |
| 2629 | netif_wake_subqueue(priv->ndev, tqi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2630 | |
Dai Haruki | 4669bc9 | 2008-12-17 16:51:04 -0800 | [diff] [blame] | 2631 | /* Update dirty indicators */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2632 | tx_queue->skb_dirtytx = skb_dirtytx; |
| 2633 | tx_queue->dirty_tx = bdp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2634 | |
Paul Gortmaker | d8a0f1b | 2012-01-06 13:51:03 -0500 | [diff] [blame] | 2635 | netdev_tx_completed_queue(txq, howmany, bytes_sent); |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2636 | } |
| 2637 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2638 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2639 | struct sk_buff *skb) |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2640 | { |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2641 | struct net_device *dev = rx_queue->dev; |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2642 | struct gfar_private *priv = netdev_priv(dev); |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 2643 | dma_addr_t buf; |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2644 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2645 | buf = dma_map_single(priv->dev, skb->data, |
Anton Vorontsov | 8a102fe | 2009-10-12 06:00:37 +0000 | [diff] [blame] | 2646 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2647 | gfar_init_rxbdp(rx_queue, bdp, buf); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2648 | } |
| 2649 | |
Jan Ceuleers | 2281a0f | 2012-06-05 03:42:11 +0000 | [diff] [blame] | 2650 | static struct sk_buff *gfar_alloc_skb(struct net_device *dev) |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2651 | { |
| 2652 | struct gfar_private *priv = netdev_priv(dev); |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2653 | struct sk_buff *skb; |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2654 | |
| 2655 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
| 2656 | if (!skb) |
| 2657 | return NULL; |
| 2658 | |
| 2659 | gfar_align_skb(skb); |
| 2660 | |
| 2661 | return skb; |
| 2662 | } |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2663 | |
Jan Ceuleers | 2281a0f | 2012-06-05 03:42:11 +0000 | [diff] [blame] | 2664 | struct sk_buff *gfar_new_skb(struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2665 | { |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2666 | return gfar_alloc_skb(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2667 | } |
| 2668 | |
Li Yang | 298e1a9 | 2007-10-16 14:18:13 +0800 | [diff] [blame] | 2669 | static inline void count_errors(unsigned short status, struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2670 | { |
Li Yang | 298e1a9 | 2007-10-16 14:18:13 +0800 | [diff] [blame] | 2671 | struct gfar_private *priv = netdev_priv(dev); |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 2672 | struct net_device_stats *stats = &dev->stats; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2673 | struct gfar_extra_stats *estats = &priv->extra_stats; |
| 2674 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2675 | /* If the packet was truncated, none of the other errors matter */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2676 | if (status & RXBD_TRUNCATED) { |
| 2677 | stats->rx_length_errors++; |
| 2678 | |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2679 | atomic64_inc(&estats->rx_trunc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2680 | |
| 2681 | return; |
| 2682 | } |
| 2683 | /* Count the errors, if there were any */ |
| 2684 | if (status & (RXBD_LARGE | RXBD_SHORT)) { |
| 2685 | stats->rx_length_errors++; |
| 2686 | |
| 2687 | if (status & RXBD_LARGE) |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2688 | atomic64_inc(&estats->rx_large); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2689 | else |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2690 | atomic64_inc(&estats->rx_short); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2691 | } |
| 2692 | if (status & RXBD_NONOCTET) { |
| 2693 | stats->rx_frame_errors++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2694 | atomic64_inc(&estats->rx_nonoctet); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2695 | } |
| 2696 | if (status & RXBD_CRCERR) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2697 | atomic64_inc(&estats->rx_crcerr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2698 | stats->rx_crc_errors++; |
| 2699 | } |
| 2700 | if (status & RXBD_OVERRUN) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2701 | atomic64_inc(&estats->rx_overrun); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2702 | stats->rx_crc_errors++; |
| 2703 | } |
| 2704 | } |
| 2705 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 2706 | irqreturn_t gfar_receive(int irq, void *grp_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2707 | { |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2708 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
| 2709 | unsigned long flags; |
| 2710 | u32 imask; |
| 2711 | |
| 2712 | if (likely(napi_schedule_prep(&grp->napi_rx))) { |
| 2713 | spin_lock_irqsave(&grp->grplock, flags); |
| 2714 | imask = gfar_read(&grp->regs->imask); |
| 2715 | imask &= IMASK_RX_DISABLED; |
| 2716 | gfar_write(&grp->regs->imask, imask); |
| 2717 | spin_unlock_irqrestore(&grp->grplock, flags); |
| 2718 | __napi_schedule(&grp->napi_rx); |
| 2719 | } else { |
| 2720 | /* Clear IEVENT, so interrupts aren't called again |
| 2721 | * because of the packets that have already arrived. |
| 2722 | */ |
| 2723 | gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); |
| 2724 | } |
| 2725 | |
| 2726 | return IRQ_HANDLED; |
| 2727 | } |
| 2728 | |
| 2729 | /* Interrupt Handler for Transmit complete */ |
| 2730 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
| 2731 | { |
| 2732 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
| 2733 | unsigned long flags; |
| 2734 | u32 imask; |
| 2735 | |
| 2736 | if (likely(napi_schedule_prep(&grp->napi_tx))) { |
| 2737 | spin_lock_irqsave(&grp->grplock, flags); |
| 2738 | imask = gfar_read(&grp->regs->imask); |
| 2739 | imask &= IMASK_TX_DISABLED; |
| 2740 | gfar_write(&grp->regs->imask, imask); |
| 2741 | spin_unlock_irqrestore(&grp->grplock, flags); |
| 2742 | __napi_schedule(&grp->napi_tx); |
| 2743 | } else { |
| 2744 | /* Clear IEVENT, so interrupts aren't called again |
| 2745 | * because of the packets that have already arrived. |
| 2746 | */ |
| 2747 | gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); |
| 2748 | } |
| 2749 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2750 | return IRQ_HANDLED; |
| 2751 | } |
| 2752 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2753 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
| 2754 | { |
| 2755 | /* If valid headers were found, and valid sums |
| 2756 | * were verified, then we tell the kernel that no |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2757 | * checksumming is necessary. Otherwise, it is [FIXME] |
| 2758 | */ |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 2759 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2760 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 2761 | else |
Eric Dumazet | bc8acf2 | 2010-09-02 13:07:41 -0700 | [diff] [blame] | 2762 | skb_checksum_none_assert(skb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2763 | } |
| 2764 | |
| 2765 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2766 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
Claudiu Manoil | 61db26c | 2013-02-14 05:00:05 +0000 | [diff] [blame] | 2767 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
| 2768 | int amount_pull, struct napi_struct *napi) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2769 | { |
| 2770 | struct gfar_private *priv = netdev_priv(dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2771 | struct rxfcb *fcb = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2772 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2773 | /* fcb is at the beginning if exists */ |
| 2774 | fcb = (struct rxfcb *)skb->data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2775 | |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2776 | /* Remove the FCB from the skb |
| 2777 | * Remove the padded bytes, if there are any |
| 2778 | */ |
Sandeep Gopalpet | f74dac0 | 2009-12-24 03:13:06 +0000 | [diff] [blame] | 2779 | if (amount_pull) { |
| 2780 | skb_record_rx_queue(skb, fcb->rq); |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2781 | skb_pull(skb, amount_pull); |
Sandeep Gopalpet | f74dac0 | 2009-12-24 03:13:06 +0000 | [diff] [blame] | 2782 | } |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2783 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 2784 | /* Get receive timestamp from the skb */ |
| 2785 | if (priv->hwts_rx_en) { |
| 2786 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
| 2787 | u64 *ns = (u64 *) skb->data; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2788 | |
Manfred Rudigier | cc772ab | 2010-04-08 23:10:03 +0000 | [diff] [blame] | 2789 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
| 2790 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); |
| 2791 | } |
| 2792 | |
| 2793 | if (priv->padding) |
| 2794 | skb_pull(skb, priv->padding); |
| 2795 | |
Michał Mirosław | 8b3afe9 | 2011-04-15 04:50:50 +0000 | [diff] [blame] | 2796 | if (dev->features & NETIF_F_RXCSUM) |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2797 | gfar_rx_checksum(skb, fcb); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2798 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2799 | /* Tell the skb what kind of packet this is */ |
| 2800 | skb->protocol = eth_type_trans(skb, dev); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 2801 | |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 2802 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
David S. Miller | 823dcd2 | 2011-08-20 10:39:12 -0700 | [diff] [blame] | 2803 | * Even if vlan rx accel is disabled, on some chips |
| 2804 | * RXFCB_VLN is pseudo randomly set. |
| 2805 | */ |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 2806 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && |
David S. Miller | 823dcd2 | 2011-08-20 10:39:12 -0700 | [diff] [blame] | 2807 | fcb->flags & RXFCB_VLN) |
David S. Miller | e5905c8 | 2013-04-22 19:24:19 -0400 | [diff] [blame] | 2808 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); |
Jiri Pirko | 87c288c | 2011-07-20 04:54:19 +0000 | [diff] [blame] | 2809 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2810 | /* Send the packet up the stack */ |
Claudiu Manoil | 953d276 | 2013-03-21 03:12:15 +0000 | [diff] [blame] | 2811 | napi_gro_receive(napi, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2812 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2813 | } |
| 2814 | |
| 2815 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring |
Jan Ceuleers | 2281a0f | 2012-06-05 03:42:11 +0000 | [diff] [blame] | 2816 | * until the budget/quota has been reached. Returns the number |
| 2817 | * of frames handled |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2818 | */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2819 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2820 | { |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2821 | struct net_device *dev = rx_queue->dev; |
Andy Fleming | 31de198 | 2008-12-16 15:33:40 -0800 | [diff] [blame] | 2822 | struct rxbd8 *bdp, *base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2823 | struct sk_buff *skb; |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2824 | int pkt_len; |
| 2825 | int amount_pull; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2826 | int howmany = 0; |
| 2827 | struct gfar_private *priv = netdev_priv(dev); |
| 2828 | |
| 2829 | /* Get the first full descriptor */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2830 | bdp = rx_queue->cur_rx; |
| 2831 | base = rx_queue->rx_bd_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2832 | |
Claudiu Manoil | ba77971 | 2013-02-14 05:00:07 +0000 | [diff] [blame] | 2833 | amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2834 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2835 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2836 | struct sk_buff *newskb; |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2837 | |
Scott Wood | 3b6330c | 2007-05-16 15:06:59 -0500 | [diff] [blame] | 2838 | rmb(); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2839 | |
| 2840 | /* Add another skb for the future */ |
| 2841 | newskb = gfar_new_skb(dev); |
| 2842 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2843 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | |
Claudiu Manoil | 369ec16 | 2013-02-14 05:00:02 +0000 | [diff] [blame] | 2845 | dma_unmap_single(priv->dev, bdp->bufPtr, |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2846 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
Andy Fleming | 8118305 | 2008-11-12 10:07:11 -0600 | [diff] [blame] | 2847 | |
Anton Vorontsov | 63b88b9 | 2010-06-11 10:51:03 +0000 | [diff] [blame] | 2848 | if (unlikely(!(bdp->status & RXBD_ERR) && |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2849 | bdp->length > priv->rx_buffer_size)) |
Anton Vorontsov | 63b88b9 | 2010-06-11 10:51:03 +0000 | [diff] [blame] | 2850 | bdp->status = RXBD_LARGE; |
| 2851 | |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2852 | /* We drop the frame if we failed to allocate a new buffer */ |
| 2853 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2854 | bdp->status & RXBD_ERR)) { |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2855 | count_errors(bdp->status, dev); |
| 2856 | |
| 2857 | if (unlikely(!newskb)) |
| 2858 | newskb = skb; |
Eran Liberty | acbc0f0 | 2010-07-07 15:54:54 -0700 | [diff] [blame] | 2859 | else if (skb) |
Eric Dumazet | acb600d | 2012-10-05 06:23:55 +0000 | [diff] [blame] | 2860 | dev_kfree_skb(skb); |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2861 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2862 | /* Increment the number of packets */ |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 2863 | rx_queue->stats.rx_packets++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2864 | howmany++; |
| 2865 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2866 | if (likely(skb)) { |
| 2867 | pkt_len = bdp->length - ETH_FCS_LEN; |
| 2868 | /* Remove the FCS from the packet length */ |
| 2869 | skb_put(skb, pkt_len); |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 2870 | rx_queue->stats.rx_bytes += pkt_len; |
Sandeep Gopalpet | f74dac0 | 2009-12-24 03:13:06 +0000 | [diff] [blame] | 2871 | skb_record_rx_queue(skb, rx_queue->qindex); |
Wu Jiajun-B06378 | cd754a5 | 2012-04-19 22:54:35 +0000 | [diff] [blame] | 2872 | gfar_process_frame(dev, skb, amount_pull, |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2873 | &rx_queue->grp->napi_rx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2874 | |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2875 | } else { |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 2876 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); |
Sandeep Gopalpet | a7f3804 | 2009-12-16 01:15:07 +0000 | [diff] [blame] | 2877 | rx_queue->stats.rx_dropped++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 2878 | atomic64_inc(&priv->extra_stats.rx_skbmissing); |
Dai Haruki | 2c2db48 | 2008-12-16 15:31:15 -0800 | [diff] [blame] | 2879 | } |
| 2880 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2881 | } |
| 2882 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2883 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2884 | |
Andy Fleming | 815b97c | 2008-04-22 17:18:29 -0500 | [diff] [blame] | 2885 | /* Setup the new bdp */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2886 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2887 | |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 2888 | /* Update Last Free RxBD pointer for LFC */ |
| 2889 | if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) |
| 2890 | gfar_write(rx_queue->rfbptr, (u32)bdp); |
| 2891 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2892 | /* Update to the next pointer */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2893 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2894 | |
| 2895 | /* update to point at the next skb */ |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2896 | rx_queue->skb_currx = (rx_queue->skb_currx + 1) & |
| 2897 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2898 | } |
| 2899 | |
| 2900 | /* Update the current rxbd pointer to be the next one */ |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 2901 | rx_queue->cur_rx = bdp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2902 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2903 | return howmany; |
| 2904 | } |
| 2905 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2906 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2907 | { |
| 2908 | struct gfar_priv_grp *gfargrp = |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2909 | container_of(napi, struct gfar_priv_grp, napi_rx); |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2910 | struct gfar __iomem *regs = gfargrp->regs; |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 2911 | struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2912 | int work_done = 0; |
| 2913 | |
| 2914 | /* Clear IEVENT, so interrupts aren't called again |
| 2915 | * because of the packets that have already arrived |
| 2916 | */ |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2917 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2918 | |
| 2919 | work_done = gfar_clean_rx_ring(rx_queue, budget); |
| 2920 | |
| 2921 | if (work_done < budget) { |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2922 | u32 imask; |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2923 | napi_complete(napi); |
| 2924 | /* Clear the halt bit in RSTAT */ |
| 2925 | gfar_write(®s->rstat, gfargrp->rstat); |
| 2926 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2927 | spin_lock_irq(&gfargrp->grplock); |
| 2928 | imask = gfar_read(®s->imask); |
| 2929 | imask |= IMASK_RX_DEFAULT; |
| 2930 | gfar_write(®s->imask, imask); |
| 2931 | spin_unlock_irq(&gfargrp->grplock); |
Claudiu Manoil | 5eaedf3 | 2013-06-10 20:19:48 +0300 | [diff] [blame] | 2932 | } |
| 2933 | |
| 2934 | return work_done; |
| 2935 | } |
| 2936 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2937 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2938 | { |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 2939 | struct gfar_priv_grp *gfargrp = |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2940 | container_of(napi, struct gfar_priv_grp, napi_tx); |
| 2941 | struct gfar __iomem *regs = gfargrp->regs; |
Claudiu Manoil | 71ff9e3 | 2014-03-07 14:42:46 +0200 | [diff] [blame] | 2942 | struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2943 | u32 imask; |
| 2944 | |
| 2945 | /* Clear IEVENT, so interrupts aren't called again |
| 2946 | * because of the packets that have already arrived |
| 2947 | */ |
| 2948 | gfar_write(®s->ievent, IEVENT_TX_MASK); |
| 2949 | |
| 2950 | /* run Tx cleanup to completion */ |
| 2951 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) |
| 2952 | gfar_clean_tx_ring(tx_queue); |
| 2953 | |
| 2954 | napi_complete(napi); |
| 2955 | |
| 2956 | spin_lock_irq(&gfargrp->grplock); |
| 2957 | imask = gfar_read(®s->imask); |
| 2958 | imask |= IMASK_TX_DEFAULT; |
| 2959 | gfar_write(®s->imask, imask); |
| 2960 | spin_unlock_irq(&gfargrp->grplock); |
| 2961 | |
| 2962 | return 0; |
| 2963 | } |
| 2964 | |
| 2965 | static int gfar_poll_rx(struct napi_struct *napi, int budget) |
| 2966 | { |
| 2967 | struct gfar_priv_grp *gfargrp = |
| 2968 | container_of(napi, struct gfar_priv_grp, napi_rx); |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2969 | struct gfar_private *priv = gfargrp->priv; |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 2970 | struct gfar __iomem *regs = gfargrp->regs; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2971 | struct gfar_priv_rx_q *rx_queue = NULL; |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2972 | int work_done = 0, work_done_per_q = 0; |
Claudiu Manoil | 39c0a0d | 2013-03-21 03:12:13 +0000 | [diff] [blame] | 2973 | int i, budget_per_q = 0; |
Claudiu Manoil | 6be5ed3 | 2013-03-19 07:40:03 +0000 | [diff] [blame] | 2974 | unsigned long rstat_rxf; |
| 2975 | int num_act_queues; |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 2976 | |
Dai Haruki | 8c7396a | 2008-12-17 16:52:00 -0800 | [diff] [blame] | 2977 | /* Clear IEVENT, so interrupts aren't called again |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 2978 | * because of the packets that have already arrived |
| 2979 | */ |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 2980 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
Dai Haruki | 8c7396a | 2008-12-17 16:52:00 -0800 | [diff] [blame] | 2981 | |
Claudiu Manoil | 6be5ed3 | 2013-03-19 07:40:03 +0000 | [diff] [blame] | 2982 | rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; |
| 2983 | |
| 2984 | num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); |
| 2985 | if (num_act_queues) |
| 2986 | budget_per_q = budget/num_act_queues; |
| 2987 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2988 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
| 2989 | /* skip queue if not active */ |
| 2990 | if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) |
| 2991 | continue; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 2992 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2993 | rx_queue = priv->rx_queue[i]; |
| 2994 | work_done_per_q = |
| 2995 | gfar_clean_rx_ring(rx_queue, budget_per_q); |
| 2996 | work_done += work_done_per_q; |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 2997 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 2998 | /* finished processing this queue */ |
| 2999 | if (work_done_per_q < budget_per_q) { |
| 3000 | /* clear active queue hw indication */ |
| 3001 | gfar_write(®s->rstat, |
| 3002 | RSTAT_CLEAR_RXF0 >> i); |
| 3003 | num_act_queues--; |
Claudiu Manoil | 6be5ed3 | 2013-03-19 07:40:03 +0000 | [diff] [blame] | 3004 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 3005 | if (!num_act_queues) |
| 3006 | break; |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 3007 | } |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 3008 | } |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 3009 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 3010 | if (!num_act_queues) { |
| 3011 | u32 imask; |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 3012 | napi_complete(napi); |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 3013 | |
Claudiu Manoil | 3ba405d | 2013-10-14 17:05:09 +0300 | [diff] [blame] | 3014 | /* Clear the halt bit in RSTAT */ |
| 3015 | gfar_write(®s->rstat, gfargrp->rstat); |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 3016 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 3017 | spin_lock_irq(&gfargrp->grplock); |
| 3018 | imask = gfar_read(®s->imask); |
| 3019 | imask |= IMASK_RX_DEFAULT; |
| 3020 | gfar_write(®s->imask, imask); |
| 3021 | spin_unlock_irq(&gfargrp->grplock); |
Dai Haruki | d080cd6 | 2008-04-09 19:37:51 -0500 | [diff] [blame] | 3022 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3023 | |
Claudiu Manoil | c233cf40 | 2013-03-19 07:40:02 +0000 | [diff] [blame] | 3024 | return work_done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3025 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3026 | |
Claudiu Manoil | aeb12c5 | 2014-03-07 14:42:45 +0200 | [diff] [blame] | 3027 | static int gfar_poll_tx(struct napi_struct *napi, int budget) |
| 3028 | { |
| 3029 | struct gfar_priv_grp *gfargrp = |
| 3030 | container_of(napi, struct gfar_priv_grp, napi_tx); |
| 3031 | struct gfar_private *priv = gfargrp->priv; |
| 3032 | struct gfar __iomem *regs = gfargrp->regs; |
| 3033 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 3034 | int has_tx_work = 0; |
| 3035 | int i; |
| 3036 | |
| 3037 | /* Clear IEVENT, so interrupts aren't called again |
| 3038 | * because of the packets that have already arrived |
| 3039 | */ |
| 3040 | gfar_write(®s->ievent, IEVENT_TX_MASK); |
| 3041 | |
| 3042 | for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { |
| 3043 | tx_queue = priv->tx_queue[i]; |
| 3044 | /* run Tx cleanup to completion */ |
| 3045 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { |
| 3046 | gfar_clean_tx_ring(tx_queue); |
| 3047 | has_tx_work = 1; |
| 3048 | } |
| 3049 | } |
| 3050 | |
| 3051 | if (!has_tx_work) { |
| 3052 | u32 imask; |
| 3053 | napi_complete(napi); |
| 3054 | |
| 3055 | spin_lock_irq(&gfargrp->grplock); |
| 3056 | imask = gfar_read(®s->imask); |
| 3057 | imask |= IMASK_TX_DEFAULT; |
| 3058 | gfar_write(®s->imask, imask); |
| 3059 | spin_unlock_irq(&gfargrp->grplock); |
| 3060 | } |
| 3061 | |
| 3062 | return 0; |
| 3063 | } |
| 3064 | |
| 3065 | |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 3066 | #ifdef CONFIG_NET_POLL_CONTROLLER |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3067 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 3068 | * without having to re-enable interrupts. It's not called while |
| 3069 | * the interrupt routine is executing. |
| 3070 | */ |
| 3071 | static void gfar_netpoll(struct net_device *dev) |
| 3072 | { |
| 3073 | struct gfar_private *priv = netdev_priv(dev); |
Jan Ceuleers | 3a2e16c | 2012-06-05 03:42:14 +0000 | [diff] [blame] | 3074 | int i; |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 3075 | |
| 3076 | /* If the device has multiple interrupts, run tx/rx */ |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3077 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3078 | for (i = 0; i < priv->num_grps; i++) { |
Paul Gortmaker | 62ed839 | 2013-02-24 05:38:31 +0000 | [diff] [blame] | 3079 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
| 3080 | |
| 3081 | disable_irq(gfar_irq(grp, TX)->irq); |
| 3082 | disable_irq(gfar_irq(grp, RX)->irq); |
| 3083 | disable_irq(gfar_irq(grp, ER)->irq); |
| 3084 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
| 3085 | enable_irq(gfar_irq(grp, ER)->irq); |
| 3086 | enable_irq(gfar_irq(grp, RX)->irq); |
| 3087 | enable_irq(gfar_irq(grp, TX)->irq); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3088 | } |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 3089 | } else { |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3090 | for (i = 0; i < priv->num_grps; i++) { |
Paul Gortmaker | 62ed839 | 2013-02-24 05:38:31 +0000 | [diff] [blame] | 3091 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
| 3092 | |
| 3093 | disable_irq(gfar_irq(grp, TX)->irq); |
| 3094 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); |
| 3095 | enable_irq(gfar_irq(grp, TX)->irq); |
Anton Vorontsov | 43de004 | 2009-12-09 02:52:19 -0800 | [diff] [blame] | 3096 | } |
Vitaly Wool | f2d71c2 | 2006-11-07 13:27:02 +0300 | [diff] [blame] | 3097 | } |
| 3098 | } |
| 3099 | #endif |
| 3100 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3101 | /* The interrupt handler for devices with one interrupt */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3102 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3103 | { |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3104 | struct gfar_priv_grp *gfargrp = grp_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3105 | |
| 3106 | /* Save ievent for future reference */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3107 | u32 events = gfar_read(&gfargrp->regs->ievent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3109 | /* Check for reception */ |
Sergei Shtylyov | 538cc7e | 2007-02-15 17:56:01 +0400 | [diff] [blame] | 3110 | if (events & IEVENT_RX_MASK) |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3111 | gfar_receive(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3112 | |
| 3113 | /* Check for transmit completion */ |
Sergei Shtylyov | 538cc7e | 2007-02-15 17:56:01 +0400 | [diff] [blame] | 3114 | if (events & IEVENT_TX_MASK) |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3115 | gfar_transmit(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3116 | |
Sergei Shtylyov | 538cc7e | 2007-02-15 17:56:01 +0400 | [diff] [blame] | 3117 | /* Check for errors */ |
| 3118 | if (events & IEVENT_ERR_MASK) |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3119 | gfar_error(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3120 | |
| 3121 | return IRQ_HANDLED; |
| 3122 | } |
| 3123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3124 | /* Called every time the controller might need to be made |
| 3125 | * aware of new link state. The PHY code conveys this |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3126 | * information through variables in the phydev structure, and this |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3127 | * function converts those variables into the appropriate |
| 3128 | * register values, and can bring down the device if needed. |
| 3129 | */ |
| 3130 | static void adjust_link(struct net_device *dev) |
| 3131 | { |
| 3132 | struct gfar_private *priv = netdev_priv(dev); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3133 | struct phy_device *phydev = priv->phydev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3134 | |
Claudiu Manoil | 6ce29b0 | 2014-04-30 14:27:21 +0300 | [diff] [blame] | 3135 | if (unlikely(phydev->link != priv->oldlink || |
| 3136 | phydev->duplex != priv->oldduplex || |
| 3137 | phydev->speed != priv->oldspeed)) |
| 3138 | gfar_update_link_state(priv); |
Andy Fleming | bb40dcb | 2005-09-23 22:54:21 -0400 | [diff] [blame] | 3139 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3140 | |
| 3141 | /* Update the hash table based on the current list of multicast |
| 3142 | * addresses we subscribe to. Also, change the promiscuity of |
| 3143 | * the device based on the flags (this function is called |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3144 | * whenever dev->flags is changed |
| 3145 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3146 | static void gfar_set_multi(struct net_device *dev) |
| 3147 | { |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3148 | struct netdev_hw_addr *ha; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3149 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3150 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3151 | u32 tempval; |
| 3152 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 3153 | if (dev->flags & IFF_PROMISC) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3154 | /* Set RCTRL to PROM */ |
| 3155 | tempval = gfar_read(®s->rctrl); |
| 3156 | tempval |= RCTRL_PROM; |
| 3157 | gfar_write(®s->rctrl, tempval); |
| 3158 | } else { |
| 3159 | /* Set RCTRL to not PROM */ |
| 3160 | tempval = gfar_read(®s->rctrl); |
| 3161 | tempval &= ~(RCTRL_PROM); |
| 3162 | gfar_write(®s->rctrl, tempval); |
| 3163 | } |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 3164 | |
Sandeep Gopalpet | a12f801 | 2009-11-02 07:03:00 +0000 | [diff] [blame] | 3165 | if (dev->flags & IFF_ALLMULTI) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3166 | /* Set the hash to rx all multicast frames */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3167 | gfar_write(®s->igaddr0, 0xffffffff); |
| 3168 | gfar_write(®s->igaddr1, 0xffffffff); |
| 3169 | gfar_write(®s->igaddr2, 0xffffffff); |
| 3170 | gfar_write(®s->igaddr3, 0xffffffff); |
| 3171 | gfar_write(®s->igaddr4, 0xffffffff); |
| 3172 | gfar_write(®s->igaddr5, 0xffffffff); |
| 3173 | gfar_write(®s->igaddr6, 0xffffffff); |
| 3174 | gfar_write(®s->igaddr7, 0xffffffff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3175 | gfar_write(®s->gaddr0, 0xffffffff); |
| 3176 | gfar_write(®s->gaddr1, 0xffffffff); |
| 3177 | gfar_write(®s->gaddr2, 0xffffffff); |
| 3178 | gfar_write(®s->gaddr3, 0xffffffff); |
| 3179 | gfar_write(®s->gaddr4, 0xffffffff); |
| 3180 | gfar_write(®s->gaddr5, 0xffffffff); |
| 3181 | gfar_write(®s->gaddr6, 0xffffffff); |
| 3182 | gfar_write(®s->gaddr7, 0xffffffff); |
| 3183 | } else { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3184 | int em_num; |
| 3185 | int idx; |
| 3186 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3187 | /* zero out the hash */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3188 | gfar_write(®s->igaddr0, 0x0); |
| 3189 | gfar_write(®s->igaddr1, 0x0); |
| 3190 | gfar_write(®s->igaddr2, 0x0); |
| 3191 | gfar_write(®s->igaddr3, 0x0); |
| 3192 | gfar_write(®s->igaddr4, 0x0); |
| 3193 | gfar_write(®s->igaddr5, 0x0); |
| 3194 | gfar_write(®s->igaddr6, 0x0); |
| 3195 | gfar_write(®s->igaddr7, 0x0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3196 | gfar_write(®s->gaddr0, 0x0); |
| 3197 | gfar_write(®s->gaddr1, 0x0); |
| 3198 | gfar_write(®s->gaddr2, 0x0); |
| 3199 | gfar_write(®s->gaddr3, 0x0); |
| 3200 | gfar_write(®s->gaddr4, 0x0); |
| 3201 | gfar_write(®s->gaddr5, 0x0); |
| 3202 | gfar_write(®s->gaddr6, 0x0); |
| 3203 | gfar_write(®s->gaddr7, 0x0); |
| 3204 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3205 | /* If we have extended hash tables, we need to |
| 3206 | * clear the exact match registers to prepare for |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3207 | * setting them |
| 3208 | */ |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3209 | if (priv->extended_hash) { |
| 3210 | em_num = GFAR_EM_NUM + 1; |
| 3211 | gfar_clear_exact_match(dev); |
| 3212 | idx = 1; |
| 3213 | } else { |
| 3214 | idx = 0; |
| 3215 | em_num = 0; |
| 3216 | } |
| 3217 | |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 3218 | if (netdev_mc_empty(dev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3219 | return; |
| 3220 | |
| 3221 | /* Parse the list, and set the appropriate bits */ |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3222 | netdev_for_each_mc_addr(ha, dev) { |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3223 | if (idx < em_num) { |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3224 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3225 | idx++; |
| 3226 | } else |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 3227 | gfar_set_hash_for_addr(dev, ha->addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3228 | } |
| 3229 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3230 | } |
| 3231 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3232 | |
| 3233 | /* Clears each of the exact match registers to zero, so they |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3234 | * don't interfere with normal reception |
| 3235 | */ |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3236 | static void gfar_clear_exact_match(struct net_device *dev) |
| 3237 | { |
| 3238 | int idx; |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 3239 | static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3240 | |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 3241 | for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
Joe Perches | b6bc765 | 2010-12-21 02:16:08 -0800 | [diff] [blame] | 3242 | gfar_set_mac_for_addr(dev, idx, zero_arr); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3243 | } |
| 3244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3245 | /* Set the appropriate hash bit for the given addr */ |
| 3246 | /* The algorithm works like so: |
| 3247 | * 1) Take the Destination Address (ie the multicast address), and |
| 3248 | * do a CRC on it (little endian), and reverse the bits of the |
| 3249 | * result. |
| 3250 | * 2) Use the 8 most significant bits as a hash into a 256-entry |
| 3251 | * table. The table is controlled through 8 32-bit registers: |
| 3252 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is |
| 3253 | * gaddr7. This means that the 3 most significant bits in the |
| 3254 | * hash index which gaddr register to use, and the 5 other bits |
| 3255 | * indicate which bit (assuming an IBM numbering scheme, which |
| 3256 | * for PowerPC (tm) is usually the case) in the register holds |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3257 | * the entry. |
| 3258 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3259 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
| 3260 | { |
| 3261 | u32 tempval; |
| 3262 | struct gfar_private *priv = netdev_priv(dev); |
Joe Perches | 6a3c910c | 2011-11-16 09:38:02 +0000 | [diff] [blame] | 3263 | u32 result = ether_crc(ETH_ALEN, addr); |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3264 | int width = priv->hash_width; |
| 3265 | u8 whichbit = (result >> (32 - width)) & 0x1f; |
| 3266 | u8 whichreg = result >> (32 - width + 5); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3267 | u32 value = (1 << (31-whichbit)); |
| 3268 | |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3269 | tempval = gfar_read(priv->hash_regs[whichreg]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3270 | tempval |= value; |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3271 | gfar_write(priv->hash_regs[whichreg], tempval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3272 | } |
| 3273 | |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3274 | |
| 3275 | /* There are multiple MAC Address register pairs on some controllers |
| 3276 | * This function sets the numth pair to a given address |
| 3277 | */ |
Joe Perches | b6bc765 | 2010-12-21 02:16:08 -0800 | [diff] [blame] | 3278 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
| 3279 | const u8 *addr) |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3280 | { |
| 3281 | struct gfar_private *priv = netdev_priv(dev); |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3282 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3283 | u32 tempval; |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3284 | u32 __iomem *macptr = ®s->macstnaddr1; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3285 | |
| 3286 | macptr += num*2; |
| 3287 | |
Claudiu Manoil | 83bfc3c | 2014-10-07 10:44:33 +0300 | [diff] [blame] | 3288 | /* For a station address of 0x12345678ABCD in transmission |
| 3289 | * order (BE), MACnADDR1 is set to 0xCDAB7856 and |
| 3290 | * MACnADDR2 is set to 0x34120000. |
Jan Ceuleers | 0977f81 | 2012-06-05 03:42:12 +0000 | [diff] [blame] | 3291 | */ |
Claudiu Manoil | 83bfc3c | 2014-10-07 10:44:33 +0300 | [diff] [blame] | 3292 | tempval = (addr[5] << 24) | (addr[4] << 16) | |
| 3293 | (addr[3] << 8) | addr[2]; |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3294 | |
Claudiu Manoil | 83bfc3c | 2014-10-07 10:44:33 +0300 | [diff] [blame] | 3295 | gfar_write(macptr, tempval); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3296 | |
Claudiu Manoil | 83bfc3c | 2014-10-07 10:44:33 +0300 | [diff] [blame] | 3297 | tempval = (addr[1] << 24) | (addr[0] << 16); |
Andy Fleming | 7f7f531 | 2005-11-11 12:38:59 -0600 | [diff] [blame] | 3298 | |
| 3299 | gfar_write(macptr+1, tempval); |
| 3300 | } |
| 3301 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3302 | /* GFAR error interrupt handler */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3303 | static irqreturn_t gfar_error(int irq, void *grp_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3304 | { |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3305 | struct gfar_priv_grp *gfargrp = grp_id; |
| 3306 | struct gfar __iomem *regs = gfargrp->regs; |
| 3307 | struct gfar_private *priv= gfargrp->priv; |
| 3308 | struct net_device *dev = priv->ndev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3309 | |
| 3310 | /* Save ievent for future reference */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3311 | u32 events = gfar_read(®s->ievent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3312 | |
| 3313 | /* Clear IEVENT */ |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3314 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 3315 | |
| 3316 | /* Magic Packet is not an error. */ |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3317 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
Scott Wood | d87eb12 | 2008-07-11 18:04:45 -0500 | [diff] [blame] | 3318 | (events & IEVENT_MAG)) |
| 3319 | events &= ~IEVENT_MAG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3320 | |
| 3321 | /* Hmm... */ |
Kumar Gala | 0bbaf06 | 2005-06-20 10:54:21 -0500 | [diff] [blame] | 3322 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
Jan Ceuleers | bc4598b | 2012-06-05 03:42:13 +0000 | [diff] [blame] | 3323 | netdev_dbg(dev, |
| 3324 | "error interrupt (ievent=0x%08x imask=0x%08x)\n", |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3325 | events, gfar_read(®s->imask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3326 | |
| 3327 | /* Update the error counters */ |
| 3328 | if (events & IEVENT_TXE) { |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3329 | dev->stats.tx_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3330 | |
| 3331 | if (events & IEVENT_LC) |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3332 | dev->stats.tx_window_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3333 | if (events & IEVENT_CRL) |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3334 | dev->stats.tx_aborted_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3335 | if (events & IEVENT_XFUN) { |
Anton Vorontsov | 836cf7f | 2009-11-10 14:11:08 +0000 | [diff] [blame] | 3336 | unsigned long flags; |
| 3337 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3338 | netif_dbg(priv, tx_err, dev, |
| 3339 | "TX FIFO underrun, packet dropped\n"); |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3340 | dev->stats.tx_dropped++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3341 | atomic64_inc(&priv->extra_stats.tx_underrun); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3342 | |
Anton Vorontsov | 836cf7f | 2009-11-10 14:11:08 +0000 | [diff] [blame] | 3343 | local_irq_save(flags); |
| 3344 | lock_tx_qs(priv); |
| 3345 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3346 | /* Reactivate the Tx Queues */ |
Sandeep Gopalpet | fba4ed0 | 2009-11-02 07:03:15 +0000 | [diff] [blame] | 3347 | gfar_write(®s->tstat, gfargrp->tstat); |
Anton Vorontsov | 836cf7f | 2009-11-10 14:11:08 +0000 | [diff] [blame] | 3348 | |
| 3349 | unlock_tx_qs(priv); |
| 3350 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3351 | } |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3352 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3353 | } |
| 3354 | if (events & IEVENT_BSY) { |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3355 | dev->stats.rx_errors++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3356 | atomic64_inc(&priv->extra_stats.rx_bsy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3357 | |
Sandeep Gopalpet | f498370 | 2009-11-02 07:03:09 +0000 | [diff] [blame] | 3358 | gfar_receive(irq, grp_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3359 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3360 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
| 3361 | gfar_read(®s->rstat)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3362 | } |
| 3363 | if (events & IEVENT_BABR) { |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 3364 | dev->stats.rx_errors++; |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3365 | atomic64_inc(&priv->extra_stats.rx_babr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3366 | |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3367 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3368 | } |
| 3369 | if (events & IEVENT_EBERR) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3370 | atomic64_inc(&priv->extra_stats.eberr); |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3371 | netif_dbg(priv, rx_err, dev, "bus error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3372 | } |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3373 | if (events & IEVENT_RXC) |
| 3374 | netif_dbg(priv, rx_status, dev, "control frame\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3375 | |
| 3376 | if (events & IEVENT_BABT) { |
Paul Gortmaker | 212079d | 2013-02-12 15:38:19 -0500 | [diff] [blame] | 3377 | atomic64_inc(&priv->extra_stats.tx_babt); |
Joe Perches | 59deab2 | 2011-06-14 08:57:47 +0000 | [diff] [blame] | 3378 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3379 | } |
| 3380 | return IRQ_HANDLED; |
| 3381 | } |
| 3382 | |
Claudiu Manoil | 6ce29b0 | 2014-04-30 14:27:21 +0300 | [diff] [blame] | 3383 | static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
| 3384 | { |
| 3385 | struct phy_device *phydev = priv->phydev; |
| 3386 | u32 val = 0; |
| 3387 | |
| 3388 | if (!phydev->duplex) |
| 3389 | return val; |
| 3390 | |
| 3391 | if (!priv->pause_aneg_en) { |
| 3392 | if (priv->tx_pause_en) |
| 3393 | val |= MACCFG1_TX_FLOW; |
| 3394 | if (priv->rx_pause_en) |
| 3395 | val |= MACCFG1_RX_FLOW; |
| 3396 | } else { |
| 3397 | u16 lcl_adv, rmt_adv; |
| 3398 | u8 flowctrl; |
| 3399 | /* get link partner capabilities */ |
| 3400 | rmt_adv = 0; |
| 3401 | if (phydev->pause) |
| 3402 | rmt_adv = LPA_PAUSE_CAP; |
| 3403 | if (phydev->asym_pause) |
| 3404 | rmt_adv |= LPA_PAUSE_ASYM; |
| 3405 | |
Pavaluca Matei-B46610 | 43ef8d2 | 2014-10-27 10:42:43 +0200 | [diff] [blame] | 3406 | lcl_adv = 0; |
| 3407 | if (phydev->advertising & ADVERTISED_Pause) |
| 3408 | lcl_adv |= ADVERTISE_PAUSE_CAP; |
| 3409 | if (phydev->advertising & ADVERTISED_Asym_Pause) |
| 3410 | lcl_adv |= ADVERTISE_PAUSE_ASYM; |
Claudiu Manoil | 6ce29b0 | 2014-04-30 14:27:21 +0300 | [diff] [blame] | 3411 | |
| 3412 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); |
| 3413 | if (flowctrl & FLOW_CTRL_TX) |
| 3414 | val |= MACCFG1_TX_FLOW; |
| 3415 | if (flowctrl & FLOW_CTRL_RX) |
| 3416 | val |= MACCFG1_RX_FLOW; |
| 3417 | } |
| 3418 | |
| 3419 | return val; |
| 3420 | } |
| 3421 | |
| 3422 | static noinline void gfar_update_link_state(struct gfar_private *priv) |
| 3423 | { |
| 3424 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 3425 | struct phy_device *phydev = priv->phydev; |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 3426 | struct gfar_priv_rx_q *rx_queue = NULL; |
| 3427 | int i; |
| 3428 | struct rxbd8 *bdp; |
Claudiu Manoil | 6ce29b0 | 2014-04-30 14:27:21 +0300 | [diff] [blame] | 3429 | |
| 3430 | if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) |
| 3431 | return; |
| 3432 | |
| 3433 | if (phydev->link) { |
| 3434 | u32 tempval1 = gfar_read(®s->maccfg1); |
| 3435 | u32 tempval = gfar_read(®s->maccfg2); |
| 3436 | u32 ecntrl = gfar_read(®s->ecntrl); |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 3437 | u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW); |
Claudiu Manoil | 6ce29b0 | 2014-04-30 14:27:21 +0300 | [diff] [blame] | 3438 | |
| 3439 | if (phydev->duplex != priv->oldduplex) { |
| 3440 | if (!(phydev->duplex)) |
| 3441 | tempval &= ~(MACCFG2_FULL_DUPLEX); |
| 3442 | else |
| 3443 | tempval |= MACCFG2_FULL_DUPLEX; |
| 3444 | |
| 3445 | priv->oldduplex = phydev->duplex; |
| 3446 | } |
| 3447 | |
| 3448 | if (phydev->speed != priv->oldspeed) { |
| 3449 | switch (phydev->speed) { |
| 3450 | case 1000: |
| 3451 | tempval = |
| 3452 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); |
| 3453 | |
| 3454 | ecntrl &= ~(ECNTRL_R100); |
| 3455 | break; |
| 3456 | case 100: |
| 3457 | case 10: |
| 3458 | tempval = |
| 3459 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); |
| 3460 | |
| 3461 | /* Reduced mode distinguishes |
| 3462 | * between 10 and 100 |
| 3463 | */ |
| 3464 | if (phydev->speed == SPEED_100) |
| 3465 | ecntrl |= ECNTRL_R100; |
| 3466 | else |
| 3467 | ecntrl &= ~(ECNTRL_R100); |
| 3468 | break; |
| 3469 | default: |
| 3470 | netif_warn(priv, link, priv->ndev, |
| 3471 | "Ack! Speed (%d) is not 10/100/1000!\n", |
| 3472 | phydev->speed); |
| 3473 | break; |
| 3474 | } |
| 3475 | |
| 3476 | priv->oldspeed = phydev->speed; |
| 3477 | } |
| 3478 | |
| 3479 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
| 3480 | tempval1 |= gfar_get_flowctrl_cfg(priv); |
| 3481 | |
Matei Pavaluca | 45b679c9 | 2014-10-27 10:42:44 +0200 | [diff] [blame^] | 3482 | /* Turn last free buffer recording on */ |
| 3483 | if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { |
| 3484 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 3485 | rx_queue = priv->rx_queue[i]; |
| 3486 | bdp = rx_queue->cur_rx; |
| 3487 | /* skip to previous bd */ |
| 3488 | bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, |
| 3489 | rx_queue->rx_bd_base, |
| 3490 | rx_queue->rx_ring_size); |
| 3491 | |
| 3492 | if (rx_queue->rfbptr) |
| 3493 | gfar_write(rx_queue->rfbptr, (u32)bdp); |
| 3494 | } |
| 3495 | |
| 3496 | priv->tx_actual_en = 1; |
| 3497 | } |
| 3498 | |
| 3499 | if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) |
| 3500 | priv->tx_actual_en = 0; |
| 3501 | |
Claudiu Manoil | 6ce29b0 | 2014-04-30 14:27:21 +0300 | [diff] [blame] | 3502 | gfar_write(®s->maccfg1, tempval1); |
| 3503 | gfar_write(®s->maccfg2, tempval); |
| 3504 | gfar_write(®s->ecntrl, ecntrl); |
| 3505 | |
| 3506 | if (!priv->oldlink) |
| 3507 | priv->oldlink = 1; |
| 3508 | |
| 3509 | } else if (priv->oldlink) { |
| 3510 | priv->oldlink = 0; |
| 3511 | priv->oldspeed = 0; |
| 3512 | priv->oldduplex = -1; |
| 3513 | } |
| 3514 | |
| 3515 | if (netif_msg_link(priv)) |
| 3516 | phy_print_status(phydev); |
| 3517 | } |
| 3518 | |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3519 | static struct of_device_id gfar_match[] = |
| 3520 | { |
| 3521 | { |
| 3522 | .type = "network", |
| 3523 | .compatible = "gianfar", |
| 3524 | }, |
Sandeep Gopalpet | 46ceb60 | 2009-11-02 07:03:34 +0000 | [diff] [blame] | 3525 | { |
| 3526 | .compatible = "fsl,etsec2", |
| 3527 | }, |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3528 | {}, |
| 3529 | }; |
Anton Vorontsov | e72701a | 2009-10-14 14:54:52 -0700 | [diff] [blame] | 3530 | MODULE_DEVICE_TABLE(of, gfar_match); |
Andy Fleming | b31a1d8 | 2008-12-16 15:29:15 -0800 | [diff] [blame] | 3531 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3532 | /* Structure for a device driver */ |
Grant Likely | 7488876 | 2011-02-22 21:05:51 -0700 | [diff] [blame] | 3533 | static struct platform_driver gfar_driver = { |
Grant Likely | 4018294 | 2010-04-13 16:13:02 -0700 | [diff] [blame] | 3534 | .driver = { |
| 3535 | .name = "fsl-gianfar", |
| 3536 | .owner = THIS_MODULE, |
| 3537 | .pm = GFAR_PM_OPS, |
| 3538 | .of_match_table = gfar_match, |
| 3539 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3540 | .probe = gfar_probe, |
| 3541 | .remove = gfar_remove, |
| 3542 | }; |
| 3543 | |
Axel Lin | db62f68 | 2011-11-27 16:44:17 +0000 | [diff] [blame] | 3544 | module_platform_driver(gfar_driver); |