blob: 94a8dc5935e701aa1e61b1f535bf605a4d5c4dcc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Gortmaker3396c782012-01-27 13:36:01 +00002 * drivers/net/ethernet/freescale/gianfar_ethtool.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000012 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -040014 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * by reference.
17 */
18
Joe Perches59deab22011-06-14 08:57:47 +000019#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/string.h>
23#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
David S. Miller65a85a82012-04-06 00:35:34 -040028#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/uaccess.h>
36#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/crc32.h>
38#include <asm/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ethtool.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040040#include <linux/mii.h>
41#include <linux/phy.h>
Sebastian Poehn4aa3a712011-06-20 13:57:59 -070042#include <linux/sort.h>
Sebastian Poehn380b1532011-07-07 04:30:29 -070043#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include "gianfar.h"
46
Andy Flemingbb40dcb2005-09-23 22:54:21 -040047#define GFAR_MAX_COAL_USECS 0xffff
48#define GFAR_MAX_COAL_FRAMES 0xff
Kumar Gala0bbaf062005-06-20 10:54:21 -050049static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000050 u64 *buf);
Kumar Gala0bbaf062005-06-20 10:54:21 -050051static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000052static int gfar_gcoalesce(struct net_device *dev,
53 struct ethtool_coalesce *cvals);
54static int gfar_scoalesce(struct net_device *dev,
55 struct ethtool_coalesce *cvals);
56static void gfar_gringparam(struct net_device *dev,
57 struct ethtool_ringparam *rvals);
58static int gfar_sringparam(struct net_device *dev,
59 struct ethtool_ringparam *rvals);
60static void gfar_gdrvinfo(struct net_device *dev,
61 struct ethtool_drvinfo *drvinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Paul Gortmaker30f7e312012-01-08 13:21:57 -050063static const char stat_gstrings[][ETH_GSTRING_LEN] = {
Claudiu Manoil76f31e82015-07-13 16:22:03 +030064 /* extra stats */
65 "rx-allocation-errors",
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 "rx-large-frame-errors",
67 "rx-short-frame-errors",
68 "rx-non-octet-errors",
69 "rx-crc-errors",
70 "rx-overrun-errors",
71 "rx-busy-errors",
72 "rx-babbling-errors",
73 "rx-truncated-frames",
74 "ethernet-bus-error",
75 "tx-babbling-errors",
76 "tx-underrun-errors",
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 "tx-timeout-errors",
Claudiu Manoil76f31e82015-07-13 16:22:03 +030078 /* rmon stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 "tx-rx-64-frames",
80 "tx-rx-65-127-frames",
81 "tx-rx-128-255-frames",
82 "tx-rx-256-511-frames",
83 "tx-rx-512-1023-frames",
84 "tx-rx-1024-1518-frames",
85 "tx-rx-1519-1522-good-vlan",
86 "rx-bytes",
87 "rx-packets",
88 "rx-fcs-errors",
89 "receive-multicast-packet",
90 "receive-broadcast-packet",
91 "rx-control-frame-packets",
92 "rx-pause-frame-packets",
93 "rx-unknown-op-code",
94 "rx-alignment-error",
95 "rx-frame-length-error",
96 "rx-code-error",
97 "rx-carrier-sense-error",
98 "rx-undersize-packets",
99 "rx-oversize-packets",
100 "rx-fragmented-frames",
101 "rx-jabber-frames",
102 "rx-dropped-frames",
103 "tx-byte-counter",
104 "tx-packets",
105 "tx-multicast-packets",
106 "tx-broadcast-packets",
107 "tx-pause-control-frames",
108 "tx-deferral-packets",
109 "tx-excessive-deferral-packets",
110 "tx-single-collision-packets",
111 "tx-multiple-collision-packets",
112 "tx-late-collision-packets",
113 "tx-excessive-collision-packets",
114 "tx-total-collision",
115 "reserved",
116 "tx-dropped-frames",
117 "tx-jabber-frames",
118 "tx-fcs-errors",
119 "tx-control-frames",
120 "tx-oversize-frames",
121 "tx-undersize-frames",
122 "tx-fragmented-frames",
123};
124
Kumar Gala0bbaf062005-06-20 10:54:21 -0500125/* Fill in a buffer with the strings which correspond to the
126 * stats */
127static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
128{
129 struct gfar_private *priv = netdev_priv(dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600130
Andy Flemingb31a1d82008-12-16 15:29:15 -0800131 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500132 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
133 else
134 memcpy(buf, stat_gstrings,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000135 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/* Fill in an array of 64-bit statistics from various sources.
139 * This array will be appended to the end of the ethtool_stats
140 * structure, and returned to user space
141 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000142static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
143 u64 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
145 int i;
146 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000147 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Paul Gortmaker212079d2013-02-12 15:38:19 -0500148 atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Paul Gortmaker68719782013-02-12 15:28:35 -0500150 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
Paul Gortmaker212079d2013-02-12 15:38:19 -0500151 buf[i] = atomic64_read(&extra[i]);
Paul Gortmaker68719782013-02-12 15:28:35 -0500152
Andy Flemingb31a1d82008-12-16 15:29:15 -0800153 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000154 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Paul Gortmaker68719782013-02-12 15:28:35 -0500156 for (; i < GFAR_STATS_LEN; i++, rmon++)
157 buf[i] = (u64) gfar_read(rmon);
158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
160
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700161static int gfar_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700165 switch (sset) {
166 case ETH_SS_STATS:
Andy Flemingb31a1d82008-12-16 15:29:15 -0800167 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700168 return GFAR_STATS_LEN;
169 else
170 return GFAR_EXTRA_STATS_LEN;
171 default:
172 return -EOPNOTSUPP;
173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/* Fills in the drvinfo structure with some basic info */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000177static void gfar_gdrvinfo(struct net_device *dev,
178 struct ethtool_drvinfo *drvinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
Jiri Pirko7826d432013-01-06 00:44:26 +0000180 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
181 strlcpy(drvinfo->version, gfar_driver_version,
182 sizeof(drvinfo->version));
183 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
184 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400187
Philippe Reynes0d1bcdc2016-05-01 17:08:08 +0200188static int gfar_set_ksettings(struct net_device *dev,
189 const struct ethtool_link_ksettings *cmd)
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400190{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200191 struct phy_device *phydev = dev->phydev;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400192
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200193 if (!phydev)
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400194 return -ENODEV;
195
Philippe Reynes0d1bcdc2016-05-01 17:08:08 +0200196 return phy_ethtool_ksettings_set(phydev, cmd);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400197}
198
Philippe Reynes0d1bcdc2016-05-01 17:08:08 +0200199static int gfar_get_ksettings(struct net_device *dev,
200 struct ethtool_link_ksettings *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200202 struct phy_device *phydev = dev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200204 if (!phydev)
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400205 return -ENODEV;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400206
Philippe Reynes0d1bcdc2016-05-01 17:08:08 +0200207 return phy_ethtool_ksettings_get(phydev, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
210/* Return the length of the register structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500211static int gfar_reglen(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
213 return sizeof (struct gfar);
214}
215
216/* Return a dump of the GFAR register space */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000217static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
218 void *regbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
220 int i;
221 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000222 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 u32 *buf = (u32 *) regbuf;
224
225 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
Kumar Galacc8c6e32006-02-01 15:18:03 -0600226 buf[i] = gfar_read(&theregs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229/* Convert microseconds to ethernet clock ticks, which changes
230 * depending on what speed the controller is running at */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000231static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
232 unsigned int usecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200234 struct net_device *ndev = priv->ndev;
235 struct phy_device *phydev = ndev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 unsigned int count;
237
238 /* The timer is different, depending on the interface speed */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200239 switch (phydev->speed) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400240 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 count = GFAR_GBIT_TIME;
242 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400243 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 count = GFAR_100_TIME;
245 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400246 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 default:
248 count = GFAR_10_TIME;
249 break;
250 }
251
252 /* Make sure we return a number greater than 0
253 * if usecs > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000254 return (usecs * 1000 + count - 1) / count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
257/* Convert ethernet clock ticks to microseconds */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000258static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
259 unsigned int ticks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200261 struct net_device *ndev = priv->ndev;
262 struct phy_device *phydev = ndev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 unsigned int count;
264
265 /* The timer is different, depending on the interface speed */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200266 switch (phydev->speed) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400267 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 count = GFAR_GBIT_TIME;
269 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400270 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 count = GFAR_100_TIME;
272 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400273 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 default:
275 count = GFAR_10_TIME;
276 break;
277 }
278
279 /* Make sure we return a number greater than 0 */
280 /* if ticks is > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000281 return (ticks * count) / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
284/* Get the coalescing parameters, and put them in the cvals
285 * structure. */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000286static int gfar_gcoalesce(struct net_device *dev,
287 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000290 struct gfar_priv_rx_q *rx_queue = NULL;
291 struct gfar_priv_tx_q *tx_queue = NULL;
Dai Harukib46a8452008-12-16 15:29:52 -0800292 unsigned long rxtime;
293 unsigned long rxcount;
294 unsigned long txtime;
295 unsigned long txcount;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400296
Andy Flemingb31a1d82008-12-16 15:29:15 -0800297 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500298 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200300 if (!dev->phydev)
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400301 return -ENODEV;
302
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000303 rx_queue = priv->rx_queue[0];
304 tx_queue = priv->tx_queue[0];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000305
306 rxtime = get_ictt_value(rx_queue->rxic);
307 rxcount = get_icft_value(rx_queue->rxic);
308 txtime = get_ictt_value(tx_queue->txic);
309 txcount = get_icft_value(tx_queue->txic);
Dai Harukib46a8452008-12-16 15:29:52 -0800310 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
311 cvals->rx_max_coalesced_frames = rxcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Dai Harukib46a8452008-12-16 15:29:52 -0800313 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
314 cvals->tx_max_coalesced_frames = txcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 cvals->use_adaptive_rx_coalesce = 0;
317 cvals->use_adaptive_tx_coalesce = 0;
318
319 cvals->pkt_rate_low = 0;
320 cvals->rx_coalesce_usecs_low = 0;
321 cvals->rx_max_coalesced_frames_low = 0;
322 cvals->tx_coalesce_usecs_low = 0;
323 cvals->tx_max_coalesced_frames_low = 0;
324
325 /* When the packet rate is below pkt_rate_high but above
326 * pkt_rate_low (both measured in packets per second) the
327 * normal {rx,tx}_* coalescing parameters are used.
328 */
329
330 /* When the packet rate is (measured in packets per second)
331 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
332 * used.
333 */
334 cvals->pkt_rate_high = 0;
335 cvals->rx_coalesce_usecs_high = 0;
336 cvals->rx_max_coalesced_frames_high = 0;
337 cvals->tx_coalesce_usecs_high = 0;
338 cvals->tx_max_coalesced_frames_high = 0;
339
340 /* How often to do adaptive coalescing packet rate sampling,
341 * measured in seconds. Must not be zero.
342 */
343 cvals->rate_sample_interval = 0;
344
345 return 0;
346}
347
348/* Change the coalescing values.
349 * Both cvals->*_usecs and cvals->*_frames have to be > 0
350 * in order for coalescing to be active
351 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000352static int gfar_scoalesce(struct net_device *dev,
353 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
355 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200356 int i, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Andy Flemingb31a1d82008-12-16 15:29:15 -0800358 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500359 return -EOPNOTSUPP;
360
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200361 if (!dev->phydev)
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400362 return -ENODEV;
363
364 /* Check the bounds of the values */
365 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
Joe Perches375d6a12013-04-13 19:03:18 +0000366 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
367 GFAR_MAX_COAL_USECS);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400368 return -EINVAL;
369 }
370
371 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
Joe Perches375d6a12013-04-13 19:03:18 +0000372 netdev_info(dev, "Coalescing is limited to %d frames\n",
373 GFAR_MAX_COAL_FRAMES);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400374 return -EINVAL;
375 }
376
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200377 /* Check the bounds of the values */
378 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
379 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
380 GFAR_MAX_COAL_USECS);
381 return -EINVAL;
382 }
383
384 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
385 netdev_info(dev, "Coalescing is limited to %d frames\n",
386 GFAR_MAX_COAL_FRAMES);
387 return -EINVAL;
388 }
389
390 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
391 cpu_relax();
392
393 /* Set up rx coalescing */
394 if ((cvals->rx_coalesce_usecs == 0) ||
395 (cvals->rx_max_coalesced_frames == 0)) {
396 for (i = 0; i < priv->num_rx_queues; i++)
397 priv->rx_queue[i]->rxcoalescing = 0;
398 } else {
399 for (i = 0; i < priv->num_rx_queues; i++)
400 priv->rx_queue[i]->rxcoalescing = 1;
401 }
402
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000403 for (i = 0; i < priv->num_rx_queues; i++) {
404 priv->rx_queue[i]->rxic = mk_ic_value(
405 cvals->rx_max_coalesced_frames,
406 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 /* Set up tx coalescing */
410 if ((cvals->tx_coalesce_usecs == 0) ||
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000411 (cvals->tx_max_coalesced_frames == 0)) {
412 for (i = 0; i < priv->num_tx_queues; i++)
413 priv->tx_queue[i]->txcoalescing = 0;
414 } else {
415 for (i = 0; i < priv->num_tx_queues; i++)
416 priv->tx_queue[i]->txcoalescing = 1;
417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000419 for (i = 0; i < priv->num_tx_queues; i++) {
420 priv->tx_queue[i]->txic = mk_ic_value(
421 cvals->tx_max_coalesced_frames,
422 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200425 if (dev->flags & IFF_UP) {
426 stop_gfar(dev);
427 err = startup_gfar(dev);
428 } else {
429 gfar_mac_reset(priv);
430 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200432 clear_bit_unlock(GFAR_RESETTING, &priv->state);
433
434 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435}
436
437/* Fills in rvals with the current ring parameters. Currently,
438 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
439 * jumbo are ignored by the driver */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000440static void gfar_gringparam(struct net_device *dev,
441 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000444 struct gfar_priv_tx_q *tx_queue = NULL;
445 struct gfar_priv_rx_q *rx_queue = NULL;
446
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000447 tx_queue = priv->tx_queue[0];
448 rx_queue = priv->rx_queue[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
451 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
452 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
453 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
454
455 /* Values changeable by the user. The valid values are
456 * in the range 1 to the "*_max_pending" counterpart above.
457 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000458 rvals->rx_pending = rx_queue->rx_ring_size;
459 rvals->rx_mini_pending = rx_queue->rx_ring_size;
460 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
461 rvals->tx_pending = tx_queue->tx_ring_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
464/* Change the current ring parameters, stopping the controller if
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200465 * necessary so that we don't mess things up while we're in motion.
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000466 */
467static int gfar_sringparam(struct net_device *dev,
468 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200471 int err = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
474 return -EINVAL;
475
476 if (!is_power_of_2(rvals->rx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000477 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return -EINVAL;
479 }
480
481 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
482 return -EINVAL;
483
484 if (!is_power_of_2(rvals->tx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000485 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 return -EINVAL;
487 }
488
Claudiu Manoil08511332014-02-24 12:13:45 +0200489 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
490 cpu_relax();
491
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200492 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200495 /* Change the sizes */
496 for (i = 0; i < priv->num_rx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000497 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200498
499 for (i = 0; i < priv->num_tx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000500 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Kumar Gala0bbaf062005-06-20 10:54:21 -0500502 /* Rebuild the rings with the new size */
Claudiu Manoil08511332014-02-24 12:13:45 +0200503 if (dev->flags & IFF_UP)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500504 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200505
506 clear_bit_unlock(GFAR_RESETTING, &priv->state);
507
Kumar Gala0bbaf062005-06-20 10:54:21 -0500508 return err;
509}
510
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300511static void gfar_gpauseparam(struct net_device *dev,
512 struct ethtool_pauseparam *epause)
513{
514 struct gfar_private *priv = netdev_priv(dev);
515
516 epause->autoneg = !!priv->pause_aneg_en;
517 epause->rx_pause = !!priv->rx_pause_en;
518 epause->tx_pause = !!priv->tx_pause_en;
519}
520
521static int gfar_spauseparam(struct net_device *dev,
522 struct ethtool_pauseparam *epause)
523{
524 struct gfar_private *priv = netdev_priv(dev);
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200525 struct phy_device *phydev = dev->phydev;
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300526 struct gfar __iomem *regs = priv->gfargrp[0].regs;
527 u32 oldadv, newadv;
528
Claudiu Manoil98a46d42014-04-23 16:38:47 +0300529 if (!phydev)
530 return -ENODEV;
531
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300532 if (!(phydev->supported & SUPPORTED_Pause) ||
533 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
534 (epause->rx_pause != epause->tx_pause)))
535 return -EINVAL;
536
537 priv->rx_pause_en = priv->tx_pause_en = 0;
538 if (epause->rx_pause) {
539 priv->rx_pause_en = 1;
540
541 if (epause->tx_pause) {
542 priv->tx_pause_en = 1;
543 /* FLOW_CTRL_RX & TX */
544 newadv = ADVERTISED_Pause;
545 } else /* FLOW_CTLR_RX */
546 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
547 } else if (epause->tx_pause) {
548 priv->tx_pause_en = 1;
549 /* FLOW_CTLR_TX */
550 newadv = ADVERTISED_Asym_Pause;
551 } else
552 newadv = 0;
553
554 if (epause->autoneg)
555 priv->pause_aneg_en = 1;
556 else
557 priv->pause_aneg_en = 0;
558
559 oldadv = phydev->advertising &
560 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
561 if (oldadv != newadv) {
562 phydev->advertising &=
563 ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
564 phydev->advertising |= newadv;
565 if (phydev->autoneg)
566 /* inform link partner of our
567 * new flow ctrl settings
568 */
569 return phy_start_aneg(phydev);
570
571 if (!epause->autoneg) {
572 u32 tempval;
573 tempval = gfar_read(&regs->maccfg1);
574 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200575
576 priv->tx_actual_en = 0;
577 if (priv->tx_pause_en) {
578 priv->tx_actual_en = 1;
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300579 tempval |= MACCFG1_TX_FLOW;
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200580 }
581
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300582 if (priv->rx_pause_en)
583 tempval |= MACCFG1_RX_FLOW;
584 gfar_write(&regs->maccfg1, tempval);
585 }
586 }
587
588 return 0;
589}
590
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000591int gfar_set_features(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500592{
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000593 netdev_features_t changed = dev->features ^ features;
Claudiu Manoil08511332014-02-24 12:13:45 +0200594 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200595 int err = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500596
Claudiu Manoil88302642014-02-24 12:13:43 +0200597 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
598 NETIF_F_RXCSUM)))
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000599 return 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000600
Claudiu Manoil08511332014-02-24 12:13:45 +0200601 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
602 cpu_relax();
603
Claudiu Manoil88302642014-02-24 12:13:43 +0200604 dev->features = features;
605
Kumar Gala0bbaf062005-06-20 10:54:21 -0500606 if (dev->flags & IFF_UP) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500607 /* Now we take down the rings to rebuild them */
608 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200610 } else {
611 gfar_mac_reset(priv);
Dai Haruki12dea572008-12-16 15:30:20 -0800612 }
Claudiu Manoil08511332014-02-24 12:13:45 +0200613
614 clear_bit_unlock(GFAR_RESETTING, &priv->state);
615
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return err;
617}
618
Kumar Gala0bbaf062005-06-20 10:54:21 -0500619static uint32_t gfar_get_msglevel(struct net_device *dev)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400620{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500621 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000622
Kumar Gala0bbaf062005-06-20 10:54:21 -0500623 return priv->msg_enable;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400624}
625
Kumar Gala0bbaf062005-06-20 10:54:21 -0500626static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400627{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500628 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000629
Kumar Gala0bbaf062005-06-20 10:54:21 -0500630 priv->msg_enable = data;
631}
632
Scott Woodd87eb122008-07-11 18:04:45 -0500633#ifdef CONFIG_PM
634static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
635{
636 struct gfar_private *priv = netdev_priv(dev);
637
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300638 wol->supported = 0;
639 wol->wolopts = 0;
640
641 if (priv->wol_supported & GFAR_WOL_MAGIC)
642 wol->supported |= WAKE_MAGIC;
643
644 if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
645 wol->supported |= WAKE_UCAST;
646
647 if (priv->wol_opts & GFAR_WOL_MAGIC)
648 wol->wolopts |= WAKE_MAGIC;
649
650 if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
651 wol->wolopts |= WAKE_UCAST;
Scott Woodd87eb122008-07-11 18:04:45 -0500652}
653
654static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
655{
656 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300657 u16 wol_opts = 0;
658 int err;
Scott Woodd87eb122008-07-11 18:04:45 -0500659
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300660 if (!priv->wol_supported && wol->wolopts)
Scott Woodd87eb122008-07-11 18:04:45 -0500661 return -EINVAL;
662
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300663 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
Scott Woodd87eb122008-07-11 18:04:45 -0500664 return -EINVAL;
665
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300666 if (wol->wolopts & WAKE_MAGIC) {
667 wol_opts |= GFAR_WOL_MAGIC;
668 } else {
669 if (wol->wolopts & WAKE_UCAST)
670 wol_opts |= GFAR_WOL_FILER_UCAST;
671 }
Rafael J. Wysocki6c4f1992010-11-09 11:54:19 +0000672
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300673 wol_opts &= priv->wol_supported;
674 priv->wol_opts = 0;
675
676 err = device_set_wakeup_enable(priv->dev, wol_opts);
677 if (err)
678 return err;
679
680 priv->wol_opts = wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -0500681
682 return 0;
683}
684#endif
Kumar Gala0bbaf062005-06-20 10:54:21 -0500685
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000686static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
687{
688 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
689
690 if (ethflow & RXH_L2DA) {
Claudiu Manoil5188f7e2015-10-23 11:41:58 +0300691 fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000692 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000693 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
694 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000695 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
696 priv->cur_filer_idx = priv->cur_filer_idx - 1;
697
Claudiu Manoil5188f7e2015-10-23 11:41:58 +0300698 fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000699 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000700 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
701 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000702 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
703 priv->cur_filer_idx = priv->cur_filer_idx - 1;
704 }
705
706 if (ethflow & RXH_VLAN) {
707 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000708 RQFCR_AND | RQFCR_HASHTBL_0;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000709 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000710 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
711 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000712 priv->cur_filer_idx = priv->cur_filer_idx - 1;
713 }
714
715 if (ethflow & RXH_IP_SRC) {
716 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000717 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000718 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
719 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000720 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
721 priv->cur_filer_idx = priv->cur_filer_idx - 1;
722 }
723
724 if (ethflow & (RXH_IP_DST)) {
725 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000726 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000727 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
728 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000729 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
730 priv->cur_filer_idx = priv->cur_filer_idx - 1;
731 }
732
733 if (ethflow & RXH_L3_PROTO) {
734 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000735 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000736 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
737 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000738 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
739 priv->cur_filer_idx = priv->cur_filer_idx - 1;
740 }
741
742 if (ethflow & RXH_L4_B_0_1) {
743 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000744 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000745 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
746 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000747 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
748 priv->cur_filer_idx = priv->cur_filer_idx - 1;
749 }
750
751 if (ethflow & RXH_L4_B_2_3) {
752 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000753 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000754 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
755 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000756 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
757 priv->cur_filer_idx = priv->cur_filer_idx - 1;
758 }
759}
760
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000761static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
762 u64 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000763{
764 unsigned int last_rule_idx = priv->cur_filer_idx;
765 unsigned int cmp_rqfpr;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000766 unsigned int *local_rqfpr;
767 unsigned int *local_rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000768 int i = 0x0, k = 0x0;
769 int j = MAX_FILER_IDX, l = 0x0;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000770 int ret = 1;
771
Joe Perchesb2adaca2013-02-03 17:43:58 +0000772 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
773 GFP_KERNEL);
774 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
775 GFP_KERNEL);
Wang Shaoyan588dc912011-08-11 17:07:25 +0000776 if (!local_rqfpr || !local_rqfcr) {
Wang Shaoyan588dc912011-08-11 17:07:25 +0000777 ret = 0;
778 goto err;
779 }
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000780
781 switch (class) {
782 case TCP_V4_FLOW:
783 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
784 break;
785 case UDP_V4_FLOW:
786 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
787 break;
788 case TCP_V6_FLOW:
789 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
790 break;
791 case UDP_V6_FLOW:
792 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
793 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000794 default:
Joe Perches375d6a12013-04-13 19:03:18 +0000795 netdev_err(priv->ndev,
796 "Right now this class is not supported\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000797 ret = 0;
798 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000799 }
800
801 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000802 local_rqfpr[j] = priv->ftp_rqfpr[i];
803 local_rqfcr[j] = priv->ftp_rqfcr[i];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000804 j--;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000805 if ((priv->ftp_rqfcr[i] ==
806 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
807 (priv->ftp_rqfpr[i] == cmp_rqfpr))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000808 break;
809 }
810
811 if (i == MAX_FILER_IDX + 1) {
Joe Perches375d6a12013-04-13 19:03:18 +0000812 netdev_err(priv->ndev,
813 "No parse rule found, can't create hash rules\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000814 ret = 0;
815 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000816 }
817
818 /* If a match was found, then it begins the starting of a cluster rule
819 * if it was already programmed, we need to overwrite these rules
820 */
821 for (l = i+1; l < MAX_FILER_IDX; l++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000822 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000823 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000824 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000825 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000826 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
827 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000828 priv->ftp_rqfpr[l]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000829 break;
830 }
831
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000832 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
833 (priv->ftp_rqfcr[l] & RQFCR_AND))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000834 continue;
835 else {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000836 local_rqfpr[j] = priv->ftp_rqfpr[l];
837 local_rqfcr[j] = priv->ftp_rqfcr[l];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000838 j--;
839 }
840 }
841
842 priv->cur_filer_idx = l - 1;
843 last_rule_idx = l;
844
845 /* hash rules */
846 ethflow_to_filer_rules(priv, ethflow);
847
848 /* Write back the popped out rules again */
849 for (k = j+1; k < MAX_FILER_IDX; k++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000850 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
851 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000852 gfar_write_filer(priv, priv->cur_filer_idx,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000853 local_rqfcr[k], local_rqfpr[k]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000854 if (!priv->cur_filer_idx)
855 break;
856 priv->cur_filer_idx = priv->cur_filer_idx - 1;
857 }
858
Wang Shaoyan588dc912011-08-11 17:07:25 +0000859err:
860 kfree(local_rqfcr);
861 kfree(local_rqfpr);
862 return ret;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000863}
864
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000865static int gfar_set_hash_opts(struct gfar_private *priv,
866 struct ethtool_rxnfc *cmd)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000867{
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000868 /* write the filer rules here */
869 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
Ben Hutchingsbde35282011-04-08 13:45:11 +0000870 return -EINVAL;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000871
872 return 0;
873}
874
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700875static int gfar_check_filer_hardware(struct gfar_private *priv)
876{
Claudiu Manoil42851e82014-01-14 15:35:00 +0200877 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700878 u32 i;
879
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700880 /* Check if we are in FIFO mode */
881 i = gfar_read(&regs->ecntrl);
882 i &= ECNTRL_FIFM;
883 if (i == ECNTRL_FIFM) {
884 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
885 i = gfar_read(&regs->rctrl);
886 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
887 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
888 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000889 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700890 } else {
891 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000892 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700893 return -EOPNOTSUPP;
894 }
895 }
896 /* Or in standard mode */
897 else {
898 i = gfar_read(&regs->rctrl);
899 i &= RCTRL_PRSDEP_MASK;
900 if (i == RCTRL_PRSDEP_MASK) {
901 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000902 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700903 } else {
904 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000905 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700906 return -EOPNOTSUPP;
907 }
908 }
909
910 /* Sets the properties for arbitrary filer rule
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000911 * to the first 4 Layer 4 Bytes
912 */
Claudiu Manoil42851e82014-01-14 15:35:00 +0200913 gfar_write(&regs->rbifx, 0xC0C1C2C3);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700914 return 0;
915}
916
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700917/* Write a mask to filer cache */
918static void gfar_set_mask(u32 mask, struct filer_table *tab)
919{
920 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
921 tab->fe[tab->index].prop = mask;
922 tab->index++;
923}
924
925/* Sets parse bits (e.g. IP or TCP) */
926static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
927{
928 gfar_set_mask(mask, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000929 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
930 RQFCR_AND;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700931 tab->fe[tab->index].prop = value;
932 tab->index++;
933}
934
935static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000936 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700937{
938 gfar_set_mask(mask, tab);
939 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
940 tab->fe[tab->index].prop = value;
941 tab->index++;
942}
943
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000944/* For setting a tuple of value and mask of type flag
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700945 * Example:
946 * IP-Src = 10.0.0.0/255.0.0.0
947 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
948 *
949 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
950 * For a don't care mask it gives us a 0
951 *
952 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
953 * and MAC stuff on an upper level (due to missing information on this level).
954 * For these guys we can discard them if they are value=0 and mask=0.
955 *
956 * Further the all masks are one-padded for better hardware efficiency.
957 */
958static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000959 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700960{
961 switch (flag) {
Sebastian Poehn380b1532011-07-07 04:30:29 -0700962 /* 3bit */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700963 case RQFCR_PID_PRI:
964 if (!(value | mask))
965 return;
966 mask |= RQFCR_PID_PRI_MASK;
967 break;
968 /* 8bit */
969 case RQFCR_PID_L4P:
970 case RQFCR_PID_TOS:
971 if (!~(mask | RQFCR_PID_L4P_MASK))
972 return;
973 if (!mask)
974 mask = ~0;
975 else
976 mask |= RQFCR_PID_L4P_MASK;
977 break;
978 /* 12bit */
979 case RQFCR_PID_VID:
980 if (!(value | mask))
981 return;
982 mask |= RQFCR_PID_VID_MASK;
983 break;
984 /* 16bit */
985 case RQFCR_PID_DPT:
986 case RQFCR_PID_SPT:
987 case RQFCR_PID_ETY:
988 if (!~(mask | RQFCR_PID_PORT_MASK))
989 return;
990 if (!mask)
991 mask = ~0;
992 else
993 mask |= RQFCR_PID_PORT_MASK;
994 break;
995 /* 24bit */
996 case RQFCR_PID_DAH:
997 case RQFCR_PID_DAL:
998 case RQFCR_PID_SAH:
999 case RQFCR_PID_SAL:
1000 if (!(value | mask))
1001 return;
1002 mask |= RQFCR_PID_MAC_MASK;
1003 break;
1004 /* for all real 32bit masks */
1005 default:
1006 if (!~mask)
1007 return;
1008 if (!mask)
1009 mask = ~0;
1010 break;
1011 }
1012 gfar_set_general_attribute(value, mask, flag, tab);
1013}
1014
1015/* Translates value and mask for UDP, TCP or SCTP */
1016static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001017 struct ethtool_tcpip4_spec *mask,
1018 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001019{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001020 gfar_set_attribute(be32_to_cpu(value->ip4src),
1021 be32_to_cpu(mask->ip4src),
1022 RQFCR_PID_SIA, tab);
1023 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1024 be32_to_cpu(mask->ip4dst),
1025 RQFCR_PID_DIA, tab);
1026 gfar_set_attribute(be16_to_cpu(value->pdst),
1027 be16_to_cpu(mask->pdst),
1028 RQFCR_PID_DPT, tab);
1029 gfar_set_attribute(be16_to_cpu(value->psrc),
1030 be16_to_cpu(mask->psrc),
1031 RQFCR_PID_SPT, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001032 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1033}
1034
1035/* Translates value and mask for RAW-IP4 */
1036static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001037 struct ethtool_usrip4_spec *mask,
1038 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001039{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001040 gfar_set_attribute(be32_to_cpu(value->ip4src),
1041 be32_to_cpu(mask->ip4src),
1042 RQFCR_PID_SIA, tab);
1043 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1044 be32_to_cpu(mask->ip4dst),
1045 RQFCR_PID_DIA, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001046 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1047 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
Claudiu Manoil42851e82014-01-14 15:35:00 +02001048 gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
1049 be32_to_cpu(mask->l4_4_bytes),
1050 RQFCR_PID_ARB, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001051
1052}
1053
1054/* Translates value and mask for ETHER spec */
1055static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001056 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001057{
1058 u32 upper_temp_mask = 0;
1059 u32 lower_temp_mask = 0;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001060
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001061 /* Source address */
1062 if (!is_broadcast_ether_addr(mask->h_source)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001063 if (is_zero_ether_addr(mask->h_source)) {
1064 upper_temp_mask = 0xFFFFFFFF;
1065 lower_temp_mask = 0xFFFFFFFF;
1066 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001067 upper_temp_mask = mask->h_source[0] << 16 |
1068 mask->h_source[1] << 8 |
1069 mask->h_source[2];
1070 lower_temp_mask = mask->h_source[3] << 16 |
1071 mask->h_source[4] << 8 |
1072 mask->h_source[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001073 }
1074 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001075 gfar_set_attribute(value->h_source[0] << 16 |
1076 value->h_source[1] << 8 |
1077 value->h_source[2],
1078 upper_temp_mask, RQFCR_PID_SAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001079 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001080 gfar_set_attribute(value->h_source[3] << 16 |
1081 value->h_source[4] << 8 |
1082 value->h_source[5],
1083 lower_temp_mask, RQFCR_PID_SAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001084 }
1085 /* Destination address */
1086 if (!is_broadcast_ether_addr(mask->h_dest)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001087 /* Special for destination is limited broadcast */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001088 if ((is_broadcast_ether_addr(value->h_dest) &&
1089 is_zero_ether_addr(mask->h_dest))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001090 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1091 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001092 if (is_zero_ether_addr(mask->h_dest)) {
1093 upper_temp_mask = 0xFFFFFFFF;
1094 lower_temp_mask = 0xFFFFFFFF;
1095 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001096 upper_temp_mask = mask->h_dest[0] << 16 |
1097 mask->h_dest[1] << 8 |
1098 mask->h_dest[2];
1099 lower_temp_mask = mask->h_dest[3] << 16 |
1100 mask->h_dest[4] << 8 |
1101 mask->h_dest[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001102 }
1103
1104 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001105 gfar_set_attribute(value->h_dest[0] << 16 |
1106 value->h_dest[1] << 8 |
1107 value->h_dest[2],
1108 upper_temp_mask, RQFCR_PID_DAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001109 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001110 gfar_set_attribute(value->h_dest[3] << 16 |
1111 value->h_dest[4] << 8 |
1112 value->h_dest[5],
1113 lower_temp_mask, RQFCR_PID_DAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001114 }
1115 }
1116
Claudiu Manoil42851e82014-01-14 15:35:00 +02001117 gfar_set_attribute(be16_to_cpu(value->h_proto),
1118 be16_to_cpu(mask->h_proto),
1119 RQFCR_PID_ETY, tab);
1120}
1121
1122static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1123{
1124 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1125}
1126
1127static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1128{
1129 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1130}
1131
1132static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1133{
1134 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1135}
1136
1137static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1138{
1139 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1140}
1141
1142static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1143{
1144 return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1145 VLAN_PRIO_SHIFT;
1146}
1147
1148static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1149{
1150 return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1151 VLAN_PRIO_SHIFT;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001152}
1153
1154/* Convert a rule to binary filter format of gianfar */
1155static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001156 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001157{
1158 u32 vlan = 0, vlan_mask = 0;
1159 u32 id = 0, id_mask = 0;
1160 u32 cfi = 0, cfi_mask = 0;
1161 u32 prio = 0, prio_mask = 0;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001162 u32 old_index = tab->index;
1163
1164 /* Check if vlan is wanted */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001165 if ((rule->flow_type & FLOW_EXT) &&
1166 (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001167 if (!rule->m_ext.vlan_tci)
Claudiu Manoil42851e82014-01-14 15:35:00 +02001168 rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001169
1170 vlan = RQFPR_VLN;
1171 vlan_mask = RQFPR_VLN;
1172
1173 /* Separate the fields */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001174 id = vlan_tci_vid(rule);
1175 id_mask = vlan_tci_vidm(rule);
1176 cfi = vlan_tci_cfi(rule);
1177 cfi_mask = vlan_tci_cfim(rule);
1178 prio = vlan_tci_prio(rule);
1179 prio_mask = vlan_tci_priom(rule);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001180
Sebastian Poehn380b1532011-07-07 04:30:29 -07001181 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001182 vlan |= RQFPR_CFI;
1183 vlan_mask |= RQFPR_CFI;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001184 } else if (cfi != VLAN_TAG_PRESENT &&
1185 cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001186 vlan_mask |= RQFPR_CFI;
1187 }
1188 }
1189
1190 switch (rule->flow_type & ~FLOW_EXT) {
1191 case TCP_V4_FLOW:
1192 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001193 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001194 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001195 &rule->m_u.tcp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001196 break;
1197 case UDP_V4_FLOW:
1198 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001199 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001200 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001201 &rule->m_u.udp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001202 break;
1203 case SCTP_V4_FLOW:
1204 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001205 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001206 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001207 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1208 (struct ethtool_tcpip4_spec *)&rule->m_u,
1209 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001210 break;
1211 case IP_USER_FLOW:
1212 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001213 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001214 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001215 (struct ethtool_usrip4_spec *) &rule->m_u,
1216 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001217 break;
1218 case ETHER_FLOW:
1219 if (vlan)
1220 gfar_set_parse_bits(vlan, vlan_mask, tab);
1221 gfar_set_ether((struct ethhdr *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001222 (struct ethhdr *) &rule->m_u, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001223 break;
1224 default:
1225 return -1;
1226 }
1227
1228 /* Set the vlan attributes in the end */
1229 if (vlan) {
1230 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1231 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1232 }
1233
1234 /* If there has been nothing written till now, it must be a default */
1235 if (tab->index == old_index) {
1236 gfar_set_mask(0xFFFFFFFF, tab);
1237 tab->fe[tab->index].ctrl = 0x20;
1238 tab->fe[tab->index].prop = 0x0;
1239 tab->index++;
1240 }
1241
1242 /* Remove last AND */
1243 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1244
1245 /* Specify which queue to use or to drop */
1246 if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1247 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1248 else
1249 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1250
1251 /* Only big enough entries can be clustered */
1252 if (tab->index > (old_index + 2)) {
1253 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1254 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1255 }
1256
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001257 /* In rare cases the cache can be full while there is
1258 * free space in hw
1259 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001260 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1261 return -EBUSY;
1262
1263 return 0;
1264}
1265
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001266/* Write the bit-pattern from software's buffer to hardware registers */
1267static int gfar_write_filer_table(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001268 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001269{
1270 u32 i = 0;
1271 if (tab->index > MAX_FILER_IDX - 1)
1272 return -EBUSY;
1273
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001274 /* Fill regular entries */
Jakub Kicinskia898fe02015-08-12 02:41:55 +02001275 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001276 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1277 /* Fill the rest with fall-troughs */
Jakub Kicinskia898fe02015-08-12 02:41:55 +02001278 for (; i < MAX_FILER_IDX; i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001279 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1280 /* Last entry must be default accept
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001281 * because that's what people expect
1282 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001283 gfar_write_filer(priv, i, 0x20, 0x0);
1284
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001285 return 0;
1286}
1287
1288static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001289 struct gfar_private *priv)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001290{
1291
1292 if (flow->flow_type & FLOW_EXT) {
1293 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1294 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001295 "User-specific data not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001296 if (~flow->m_ext.vlan_etype)
1297 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001298 "VLAN-etype not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001299 }
1300 if (flow->flow_type == IP_USER_FLOW)
1301 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1302 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001303 "IP-Version differing from IPv4 not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001304
1305 return 0;
1306}
1307
1308static int gfar_process_filer_changes(struct gfar_private *priv)
1309{
1310 struct ethtool_flow_spec_container *j;
1311 struct filer_table *tab;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001312 s32 ret = 0;
1313
1314 /* So index is set to zero, too! */
1315 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1316 if (tab == NULL)
1317 return -ENOMEM;
1318
1319 /* Now convert the existing filer data from flow_spec into
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001320 * filer tables binary format
1321 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001322 list_for_each_entry(j, &priv->rx_list.list, list) {
1323 ret = gfar_convert_to_filer(&j->fs, tab);
1324 if (ret == -EBUSY) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001325 netdev_err(priv->ndev,
1326 "Rule not added: No free space!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001327 goto end;
1328 }
1329 if (ret == -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001330 netdev_err(priv->ndev,
1331 "Rule not added: Unsupported Flow-type!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001332 goto end;
1333 }
1334 }
1335
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001336 /* Write everything to hardware */
1337 ret = gfar_write_filer_table(priv, tab);
1338 if (ret == -EBUSY) {
1339 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1340 goto end;
1341 }
1342
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001343end:
1344 kfree(tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001345 return ret;
1346}
1347
1348static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1349{
1350 u32 i = 0;
1351
1352 for (i = 0; i < sizeof(flow->m_u); i++)
1353 flow->m_u.hdata[i] ^= 0xFF;
1354
Claudiu Manoil42851e82014-01-14 15:35:00 +02001355 flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1356 flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1357 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1358 flow->m_ext.data[1] ^= cpu_to_be32(~0);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001359}
1360
1361static int gfar_add_cls(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001362 struct ethtool_rx_flow_spec *flow)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001363{
1364 struct ethtool_flow_spec_container *temp, *comp;
1365 int ret = 0;
1366
1367 temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1368 if (temp == NULL)
1369 return -ENOMEM;
1370 memcpy(&temp->fs, flow, sizeof(temp->fs));
1371
1372 gfar_invert_masks(&temp->fs);
1373 ret = gfar_check_capability(&temp->fs, priv);
1374 if (ret)
1375 goto clean_mem;
1376 /* Link in the new element at the right @location */
1377 if (list_empty(&priv->rx_list.list)) {
1378 ret = gfar_check_filer_hardware(priv);
1379 if (ret != 0)
1380 goto clean_mem;
1381 list_add(&temp->list, &priv->rx_list.list);
1382 goto process;
1383 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001384 list_for_each_entry(comp, &priv->rx_list.list, list) {
1385 if (comp->fs.location > flow->location) {
1386 list_add_tail(&temp->list, &comp->list);
1387 goto process;
1388 }
1389 if (comp->fs.location == flow->location) {
1390 netdev_err(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001391 "Rule not added: ID %d not free!\n",
1392 flow->location);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001393 ret = -EBUSY;
1394 goto clean_mem;
1395 }
1396 }
1397 list_add_tail(&temp->list, &priv->rx_list.list);
1398 }
1399
1400process:
Jakub Kicinskib5c8c892015-08-12 02:41:56 +02001401 priv->rx_list.count++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001402 ret = gfar_process_filer_changes(priv);
1403 if (ret)
1404 goto clean_list;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001405 return ret;
1406
1407clean_list:
Jakub Kicinskib5c8c892015-08-12 02:41:56 +02001408 priv->rx_list.count--;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001409 list_del(&temp->list);
1410clean_mem:
1411 kfree(temp);
1412 return ret;
1413}
1414
1415static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1416{
1417 struct ethtool_flow_spec_container *comp;
1418 u32 ret = -EINVAL;
1419
1420 if (list_empty(&priv->rx_list.list))
1421 return ret;
1422
1423 list_for_each_entry(comp, &priv->rx_list.list, list) {
1424 if (comp->fs.location == loc) {
1425 list_del(&comp->list);
1426 kfree(comp);
1427 priv->rx_list.count--;
1428 gfar_process_filer_changes(priv);
1429 ret = 0;
1430 break;
1431 }
1432 }
1433
1434 return ret;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001435}
1436
1437static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1438{
1439 struct ethtool_flow_spec_container *comp;
1440 u32 ret = -EINVAL;
1441
1442 list_for_each_entry(comp, &priv->rx_list.list, list) {
1443 if (comp->fs.location == cmd->fs.location) {
1444 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1445 gfar_invert_masks(&cmd->fs);
1446 ret = 0;
1447 break;
1448 }
1449 }
1450
1451 return ret;
1452}
1453
1454static int gfar_get_cls_all(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001455 struct ethtool_rxnfc *cmd, u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001456{
1457 struct ethtool_flow_spec_container *comp;
1458 u32 i = 0;
1459
1460 list_for_each_entry(comp, &priv->rx_list.list, list) {
David S. Miller8decf862011-09-22 03:23:13 -04001461 if (i == cmd->rule_cnt)
1462 return -EMSGSIZE;
1463 rule_locs[i] = comp->fs.location;
1464 i++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001465 }
1466
1467 cmd->data = MAX_FILER_IDX;
Ben Hutchings473e64e2011-09-06 13:52:47 +00001468 cmd->rule_cnt = i;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001469
1470 return 0;
1471}
1472
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001473static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1474{
1475 struct gfar_private *priv = netdev_priv(dev);
1476 int ret = 0;
1477
Claudiu Manoil08511332014-02-24 12:13:45 +02001478 if (test_bit(GFAR_RESETTING, &priv->state))
1479 return -EBUSY;
1480
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001481 mutex_lock(&priv->rx_queue_access);
1482
1483 switch (cmd->cmd) {
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001484 case ETHTOOL_SRXFH:
1485 ret = gfar_set_hash_opts(priv, cmd);
1486 break;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001487 case ETHTOOL_SRXCLSRLINS:
Ben Hutchings3a73e492012-01-03 11:59:30 +00001488 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1489 cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1490 cmd->fs.location >= MAX_FILER_IDX) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001491 ret = -EINVAL;
1492 break;
1493 }
1494 ret = gfar_add_cls(priv, &cmd->fs);
1495 break;
1496 case ETHTOOL_SRXCLSRLDEL:
1497 ret = gfar_del_cls(priv, cmd->fs.location);
1498 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001499 default:
1500 ret = -EINVAL;
1501 }
1502
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001503 mutex_unlock(&priv->rx_queue_access);
1504
1505 return ret;
1506}
1507
1508static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001509 u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001510{
1511 struct gfar_private *priv = netdev_priv(dev);
1512 int ret = 0;
1513
1514 switch (cmd->cmd) {
1515 case ETHTOOL_GRXRINGS:
1516 cmd->data = priv->num_rx_queues;
1517 break;
1518 case ETHTOOL_GRXCLSRLCNT:
1519 cmd->rule_cnt = priv->rx_list.count;
1520 break;
1521 case ETHTOOL_GRXCLSRULE:
1522 ret = gfar_get_cls(priv, cmd);
1523 break;
1524 case ETHTOOL_GRXCLSRLALL:
Ben Hutchings815c7db2011-09-06 13:49:12 +00001525 ret = gfar_get_cls_all(priv, cmd, rule_locs);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001526 break;
1527 default:
1528 ret = -EINVAL;
1529 break;
1530 }
1531
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001532 return ret;
1533}
1534
Richard Cochran66636282012-04-03 22:59:19 +00001535int gfar_phc_index = -1;
Richard Cochran28889b72012-09-20 19:11:12 +00001536EXPORT_SYMBOL(gfar_phc_index);
Richard Cochran66636282012-04-03 22:59:19 +00001537
1538static int gfar_get_ts_info(struct net_device *dev,
1539 struct ethtool_ts_info *info)
1540{
1541 struct gfar_private *priv = netdev_priv(dev);
1542
1543 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001544 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1545 SOF_TIMESTAMPING_SOFTWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001546 info->phc_index = -1;
1547 return 0;
1548 }
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001549 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1550 SOF_TIMESTAMPING_RX_HARDWARE |
1551 SOF_TIMESTAMPING_RAW_HARDWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001552 info->phc_index = gfar_phc_index;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001553 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1554 (1 << HWTSTAMP_TX_ON);
1555 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1556 (1 << HWTSTAMP_FILTER_ALL);
Richard Cochran66636282012-04-03 22:59:19 +00001557 return 0;
1558}
1559
Jeff Garzik7282d492006-09-13 14:30:00 -04001560const struct ethtool_ops gfar_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 .get_drvinfo = gfar_gdrvinfo,
1562 .get_regs_len = gfar_reglen,
1563 .get_regs = gfar_get_regs,
1564 .get_link = ethtool_op_get_link,
1565 .get_coalesce = gfar_gcoalesce,
1566 .set_coalesce = gfar_scoalesce,
1567 .get_ringparam = gfar_gringparam,
1568 .set_ringparam = gfar_sringparam,
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001569 .get_pauseparam = gfar_gpauseparam,
1570 .set_pauseparam = gfar_spauseparam,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 .get_strings = gfar_gstrings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001572 .get_sset_count = gfar_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 .get_ethtool_stats = gfar_fill_stats,
Kumar Gala0bbaf062005-06-20 10:54:21 -05001574 .get_msglevel = gfar_get_msglevel,
1575 .set_msglevel = gfar_set_msglevel,
Scott Woodd87eb122008-07-11 18:04:45 -05001576#ifdef CONFIG_PM
1577 .get_wol = gfar_get_wol,
1578 .set_wol = gfar_set_wol,
1579#endif
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001580 .set_rxnfc = gfar_set_nfc,
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001581 .get_rxnfc = gfar_get_nfc,
Richard Cochran66636282012-04-03 22:59:19 +00001582 .get_ts_info = gfar_get_ts_info,
Philippe Reynes0d1bcdc2016-05-01 17:08:08 +02001583 .get_link_ksettings = gfar_get_ksettings,
1584 .set_link_ksettings = gfar_set_ksettings,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585};