blob: ba1f6c928b9127819b2a1bdaf26ea385bcfb74e8 [file] [log] [blame]
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001/* SuperH Ethernet device driver
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002 *
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +00003 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00004 * Copyright (C) 2008-2013 Renesas Solutions Corp.
5 * Copyright (C) 2013 Cogent Embedded, Inc.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07006 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070015 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 */
19
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070020#include <linux/init.h>
Yoshihiro Shimoda06540112011-09-29 17:16:57 +000021#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
David S. Miller823dcd22011-08-20 10:39:12 -070024#include <linux/interrupt.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070025#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/platform_device.h>
29#include <linux/mdio-bitbang.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32#include <linux/cache.h>
33#include <linux/io.h>
Magnus Dammbcd51492009-10-09 00:20:04 +000034#include <linux/pm_runtime.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000036#include <linux/ethtool.h>
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +000037#include <linux/if_vlan.h>
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +000038#include <linux/clk.h>
Yoshihiro Shimodad4fa0e32011-09-27 21:49:12 +000039#include <linux/sh_eth.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070040
41#include "sh_eth.h"
42
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000043#define SH_ETH_DEF_MSG_ENABLE \
44 (NETIF_MSG_LINK | \
45 NETIF_MSG_TIMER | \
46 NETIF_MSG_RX_ERR| \
47 NETIF_MSG_TX_ERR)
48
Sergei Shtylyovc0013f62013-03-28 11:48:26 +000049static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
50 [EDSR] = 0x0000,
51 [EDMR] = 0x0400,
52 [EDTRR] = 0x0408,
53 [EDRRR] = 0x0410,
54 [EESR] = 0x0428,
55 [EESIPR] = 0x0430,
56 [TDLAR] = 0x0010,
57 [TDFAR] = 0x0014,
58 [TDFXR] = 0x0018,
59 [TDFFR] = 0x001c,
60 [RDLAR] = 0x0030,
61 [RDFAR] = 0x0034,
62 [RDFXR] = 0x0038,
63 [RDFFR] = 0x003c,
64 [TRSCER] = 0x0438,
65 [RMFCR] = 0x0440,
66 [TFTR] = 0x0448,
67 [FDR] = 0x0450,
68 [RMCR] = 0x0458,
69 [RPADIR] = 0x0460,
70 [FCFTR] = 0x0468,
71 [CSMR] = 0x04E4,
72
73 [ECMR] = 0x0500,
74 [ECSR] = 0x0510,
75 [ECSIPR] = 0x0518,
76 [PIR] = 0x0520,
77 [PSR] = 0x0528,
78 [PIPR] = 0x052c,
79 [RFLR] = 0x0508,
80 [APR] = 0x0554,
81 [MPR] = 0x0558,
82 [PFTCR] = 0x055c,
83 [PFRCR] = 0x0560,
84 [TPAUSER] = 0x0564,
85 [GECMR] = 0x05b0,
86 [BCULR] = 0x05b4,
87 [MAHR] = 0x05c0,
88 [MALR] = 0x05c8,
89 [TROCR] = 0x0700,
90 [CDCR] = 0x0708,
91 [LCCR] = 0x0710,
92 [CEFCR] = 0x0740,
93 [FRECR] = 0x0748,
94 [TSFRCR] = 0x0750,
95 [TLFRCR] = 0x0758,
96 [RFCR] = 0x0760,
97 [CERCR] = 0x0768,
98 [CEECR] = 0x0770,
99 [MAFCR] = 0x0778,
100 [RMII_MII] = 0x0790,
101
102 [ARSTR] = 0x0000,
103 [TSU_CTRST] = 0x0004,
104 [TSU_FWEN0] = 0x0010,
105 [TSU_FWEN1] = 0x0014,
106 [TSU_FCM] = 0x0018,
107 [TSU_BSYSL0] = 0x0020,
108 [TSU_BSYSL1] = 0x0024,
109 [TSU_PRISL0] = 0x0028,
110 [TSU_PRISL1] = 0x002c,
111 [TSU_FWSL0] = 0x0030,
112 [TSU_FWSL1] = 0x0034,
113 [TSU_FWSLC] = 0x0038,
114 [TSU_QTAG0] = 0x0040,
115 [TSU_QTAG1] = 0x0044,
116 [TSU_FWSR] = 0x0050,
117 [TSU_FWINMK] = 0x0054,
118 [TSU_ADQT0] = 0x0048,
119 [TSU_ADQT1] = 0x004c,
120 [TSU_VTAG0] = 0x0058,
121 [TSU_VTAG1] = 0x005c,
122 [TSU_ADSBSY] = 0x0060,
123 [TSU_TEN] = 0x0064,
124 [TSU_POST1] = 0x0070,
125 [TSU_POST2] = 0x0074,
126 [TSU_POST3] = 0x0078,
127 [TSU_POST4] = 0x007c,
128 [TSU_ADRH0] = 0x0100,
129 [TSU_ADRL0] = 0x0104,
130 [TSU_ADRH31] = 0x01f8,
131 [TSU_ADRL31] = 0x01fc,
132
133 [TXNLCR0] = 0x0080,
134 [TXALCR0] = 0x0084,
135 [RXNLCR0] = 0x0088,
136 [RXALCR0] = 0x008c,
137 [FWNLCR0] = 0x0090,
138 [FWALCR0] = 0x0094,
139 [TXNLCR1] = 0x00a0,
140 [TXALCR1] = 0x00a0,
141 [RXNLCR1] = 0x00a8,
142 [RXALCR1] = 0x00ac,
143 [FWNLCR1] = 0x00b0,
144 [FWALCR1] = 0x00b4,
145};
146
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000147static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
148 [ECMR] = 0x0300,
149 [RFLR] = 0x0308,
150 [ECSR] = 0x0310,
151 [ECSIPR] = 0x0318,
152 [PIR] = 0x0320,
153 [PSR] = 0x0328,
154 [RDMLR] = 0x0340,
155 [IPGR] = 0x0350,
156 [APR] = 0x0354,
157 [MPR] = 0x0358,
158 [RFCF] = 0x0360,
159 [TPAUSER] = 0x0364,
160 [TPAUSECR] = 0x0368,
161 [MAHR] = 0x03c0,
162 [MALR] = 0x03c8,
163 [TROCR] = 0x03d0,
164 [CDCR] = 0x03d4,
165 [LCCR] = 0x03d8,
166 [CNDCR] = 0x03dc,
167 [CEFCR] = 0x03e4,
168 [FRECR] = 0x03e8,
169 [TSFRCR] = 0x03ec,
170 [TLFRCR] = 0x03f0,
171 [RFCR] = 0x03f4,
172 [MAFCR] = 0x03f8,
173
174 [EDMR] = 0x0200,
175 [EDTRR] = 0x0208,
176 [EDRRR] = 0x0210,
177 [TDLAR] = 0x0218,
178 [RDLAR] = 0x0220,
179 [EESR] = 0x0228,
180 [EESIPR] = 0x0230,
181 [TRSCER] = 0x0238,
182 [RMFCR] = 0x0240,
183 [TFTR] = 0x0248,
184 [FDR] = 0x0250,
185 [RMCR] = 0x0258,
186 [TFUCR] = 0x0264,
187 [RFOCR] = 0x0268,
Simon Horman55754f12013-07-23 10:18:04 +0900188 [RMIIMODE] = 0x026c,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000189 [FCFTR] = 0x0270,
190 [TRIMD] = 0x027c,
191};
192
Sergei Shtylyovc0013f62013-03-28 11:48:26 +0000193static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
194 [ECMR] = 0x0100,
195 [RFLR] = 0x0108,
196 [ECSR] = 0x0110,
197 [ECSIPR] = 0x0118,
198 [PIR] = 0x0120,
199 [PSR] = 0x0128,
200 [RDMLR] = 0x0140,
201 [IPGR] = 0x0150,
202 [APR] = 0x0154,
203 [MPR] = 0x0158,
204 [TPAUSER] = 0x0164,
205 [RFCF] = 0x0160,
206 [TPAUSECR] = 0x0168,
207 [BCFRR] = 0x016c,
208 [MAHR] = 0x01c0,
209 [MALR] = 0x01c8,
210 [TROCR] = 0x01d0,
211 [CDCR] = 0x01d4,
212 [LCCR] = 0x01d8,
213 [CNDCR] = 0x01dc,
214 [CEFCR] = 0x01e4,
215 [FRECR] = 0x01e8,
216 [TSFRCR] = 0x01ec,
217 [TLFRCR] = 0x01f0,
218 [RFCR] = 0x01f4,
219 [MAFCR] = 0x01f8,
220 [RTRATE] = 0x01fc,
221
222 [EDMR] = 0x0000,
223 [EDTRR] = 0x0008,
224 [EDRRR] = 0x0010,
225 [TDLAR] = 0x0018,
226 [RDLAR] = 0x0020,
227 [EESR] = 0x0028,
228 [EESIPR] = 0x0030,
229 [TRSCER] = 0x0038,
230 [RMFCR] = 0x0040,
231 [TFTR] = 0x0048,
232 [FDR] = 0x0050,
233 [RMCR] = 0x0058,
234 [TFUCR] = 0x0064,
235 [RFOCR] = 0x0068,
236 [FCFTR] = 0x0070,
237 [RPADIR] = 0x0078,
238 [TRIMD] = 0x007c,
239 [RBWAR] = 0x00c8,
240 [RDFAR] = 0x00cc,
241 [TBRAR] = 0x00d4,
242 [TDFAR] = 0x00d8,
243};
244
245static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
246 [ECMR] = 0x0160,
247 [ECSR] = 0x0164,
248 [ECSIPR] = 0x0168,
249 [PIR] = 0x016c,
250 [MAHR] = 0x0170,
251 [MALR] = 0x0174,
252 [RFLR] = 0x0178,
253 [PSR] = 0x017c,
254 [TROCR] = 0x0180,
255 [CDCR] = 0x0184,
256 [LCCR] = 0x0188,
257 [CNDCR] = 0x018c,
258 [CEFCR] = 0x0194,
259 [FRECR] = 0x0198,
260 [TSFRCR] = 0x019c,
261 [TLFRCR] = 0x01a0,
262 [RFCR] = 0x01a4,
263 [MAFCR] = 0x01a8,
264 [IPGR] = 0x01b4,
265 [APR] = 0x01b8,
266 [MPR] = 0x01bc,
267 [TPAUSER] = 0x01c4,
268 [BCFR] = 0x01cc,
269
270 [ARSTR] = 0x0000,
271 [TSU_CTRST] = 0x0004,
272 [TSU_FWEN0] = 0x0010,
273 [TSU_FWEN1] = 0x0014,
274 [TSU_FCM] = 0x0018,
275 [TSU_BSYSL0] = 0x0020,
276 [TSU_BSYSL1] = 0x0024,
277 [TSU_PRISL0] = 0x0028,
278 [TSU_PRISL1] = 0x002c,
279 [TSU_FWSL0] = 0x0030,
280 [TSU_FWSL1] = 0x0034,
281 [TSU_FWSLC] = 0x0038,
282 [TSU_QTAGM0] = 0x0040,
283 [TSU_QTAGM1] = 0x0044,
284 [TSU_ADQT0] = 0x0048,
285 [TSU_ADQT1] = 0x004c,
286 [TSU_FWSR] = 0x0050,
287 [TSU_FWINMK] = 0x0054,
288 [TSU_ADSBSY] = 0x0060,
289 [TSU_TEN] = 0x0064,
290 [TSU_POST1] = 0x0070,
291 [TSU_POST2] = 0x0074,
292 [TSU_POST3] = 0x0078,
293 [TSU_POST4] = 0x007c,
294
295 [TXNLCR0] = 0x0080,
296 [TXALCR0] = 0x0084,
297 [RXNLCR0] = 0x0088,
298 [RXALCR0] = 0x008c,
299 [FWNLCR0] = 0x0090,
300 [FWALCR0] = 0x0094,
301 [TXNLCR1] = 0x00a0,
302 [TXALCR1] = 0x00a0,
303 [RXNLCR1] = 0x00a8,
304 [RXALCR1] = 0x00ac,
305 [FWNLCR1] = 0x00b0,
306 [FWALCR1] = 0x00b4,
307
308 [TSU_ADRH0] = 0x0100,
309 [TSU_ADRL0] = 0x0104,
310 [TSU_ADRL31] = 0x01fc,
311};
312
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000313static int sh_eth_is_gether(struct sh_eth_private *mdp)
314{
315 if (mdp->reg_offset == sh_eth_offset_gigabit)
316 return 1;
317 else
318 return 0;
319}
320
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400321static void sh_eth_select_mii(struct net_device *ndev)
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000322{
323 u32 value = 0x0;
324 struct sh_eth_private *mdp = netdev_priv(ndev);
325
326 switch (mdp->phy_interface) {
327 case PHY_INTERFACE_MODE_GMII:
328 value = 0x2;
329 break;
330 case PHY_INTERFACE_MODE_MII:
331 value = 0x1;
332 break;
333 case PHY_INTERFACE_MODE_RMII:
334 value = 0x0;
335 break;
336 default:
337 pr_warn("PHY interface mode was not setup. Set to MII.\n");
338 value = 0x1;
339 break;
340 }
341
342 sh_eth_write(ndev, value, RMII_MII);
343}
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000344
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400345static void sh_eth_set_duplex(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000346{
347 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000348
349 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000350 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000351 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000352 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000353}
354
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000355/* There is CPU dependent code */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000356static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000357{
358 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000359
360 switch (mdp->speed) {
361 case 10: /* 10BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000362 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000363 break;
364 case 100:/* 100BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000365 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
366 break;
367 default:
368 break;
369 }
370}
371
Sergei Shtylyov674853b2013-04-27 10:44:24 +0000372/* R8A7778/9 */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000373static struct sh_eth_cpu_data r8a777x_data = {
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000374 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000375 .set_rate = sh_eth_set_rate_r8a777x,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000376
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400377 .register_type = SH_ETH_REG_FAST_RCAR,
378
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000379 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
380 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
381 .eesipr_value = 0x01ff009f,
382
383 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400384 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
385 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
386 EESR_ECI,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000387
388 .apr = 1,
389 .mpr = 1,
390 .tpauser = 1,
391 .hw_swap = 1,
392};
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000393
Sergei Shtylyov94a12b12013-12-08 02:59:18 +0300394/* R8A7790/1 */
395static struct sh_eth_cpu_data r8a779x_data = {
Simon Hormane18dbf72013-07-23 10:18:05 +0900396 .set_duplex = sh_eth_set_duplex,
397 .set_rate = sh_eth_set_rate_r8a777x,
398
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400399 .register_type = SH_ETH_REG_FAST_RCAR,
400
Simon Hormane18dbf72013-07-23 10:18:05 +0900401 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
402 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
403 .eesipr_value = 0x01ff009f,
404
405 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Laurent Pinchartba361cb2013-07-31 16:42:11 +0900406 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
407 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
408 EESR_ECI,
Simon Hormane18dbf72013-07-23 10:18:05 +0900409
410 .apr = 1,
411 .mpr = 1,
412 .tpauser = 1,
413 .hw_swap = 1,
414 .rmiimode = 1,
Kouei Abefd9af072013-08-30 12:41:08 +0900415 .shift_rd0 = 1,
Simon Hormane18dbf72013-07-23 10:18:05 +0900416};
417
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000418static void sh_eth_set_rate_sh7724(struct net_device *ndev)
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000419{
420 struct sh_eth_private *mdp = netdev_priv(ndev);
421
422 switch (mdp->speed) {
423 case 10: /* 10BASE */
424 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
425 break;
426 case 100:/* 100BASE */
427 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000428 break;
429 default:
430 break;
431 }
432}
433
434/* SH7724 */
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000435static struct sh_eth_cpu_data sh7724_data = {
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000436 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000437 .set_rate = sh_eth_set_rate_sh7724,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000438
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400439 .register_type = SH_ETH_REG_FAST_SH4,
440
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000441 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
442 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
Sergei Shtylyova80c3de2013-06-20 02:24:54 +0400443 .eesipr_value = 0x01ff009f,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000444
445 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400446 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
447 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
448 EESR_ECI,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000449
450 .apr = 1,
451 .mpr = 1,
452 .tpauser = 1,
453 .hw_swap = 1,
Magnus Damm503914c2009-12-15 21:16:55 -0800454 .rpadir = 1,
455 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000456};
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000457
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000458static void sh_eth_set_rate_sh7757(struct net_device *ndev)
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000459{
460 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000461
462 switch (mdp->speed) {
463 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000464 sh_eth_write(ndev, 0, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000465 break;
466 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000467 sh_eth_write(ndev, 1, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000468 break;
469 default:
470 break;
471 }
472}
473
474/* SH7757 */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000475static struct sh_eth_cpu_data sh7757_data = {
476 .set_duplex = sh_eth_set_duplex,
477 .set_rate = sh_eth_set_rate_sh7757,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000478
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400479 .register_type = SH_ETH_REG_FAST_SH4,
480
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000481 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Sergei Shtylyov305a3382013-10-16 02:29:58 +0400482 .rmcr_value = RMCR_RNC,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000483
484 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400485 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
486 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
487 EESR_ECI,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000488
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000489 .irq_flags = IRQF_SHARED,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000490 .apr = 1,
491 .mpr = 1,
492 .tpauser = 1,
493 .hw_swap = 1,
494 .no_ade = 1,
Yoshihiro Shimoda2e98e792011-07-05 20:33:57 +0000495 .rpadir = 1,
496 .rpadir_value = 2 << 16,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000497};
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000498
David S. Millere403d292013-06-07 23:40:41 -0700499#define SH_GIGA_ETH_BASE 0xfee00000UL
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000500#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
501#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
502static void sh_eth_chip_reset_giga(struct net_device *ndev)
503{
504 int i;
505 unsigned long mahr[2], malr[2];
506
507 /* save MAHR and MALR */
508 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000509 malr[i] = ioread32((void *)GIGA_MALR(i));
510 mahr[i] = ioread32((void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000511 }
512
513 /* reset device */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000514 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000515 mdelay(1);
516
517 /* restore MAHR and MALR */
518 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000519 iowrite32(malr[i], (void *)GIGA_MALR(i));
520 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000521 }
522}
523
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000524static void sh_eth_set_rate_giga(struct net_device *ndev)
525{
526 struct sh_eth_private *mdp = netdev_priv(ndev);
527
528 switch (mdp->speed) {
529 case 10: /* 10BASE */
530 sh_eth_write(ndev, 0x00000000, GECMR);
531 break;
532 case 100:/* 100BASE */
533 sh_eth_write(ndev, 0x00000010, GECMR);
534 break;
535 case 1000: /* 1000BASE */
536 sh_eth_write(ndev, 0x00000020, GECMR);
537 break;
538 default:
539 break;
540 }
541}
542
543/* SH7757(GETHERC) */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000544static struct sh_eth_cpu_data sh7757_data_giga = {
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000545 .chip_reset = sh_eth_chip_reset_giga,
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000546 .set_duplex = sh_eth_set_duplex,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000547 .set_rate = sh_eth_set_rate_giga,
548
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400549 .register_type = SH_ETH_REG_GIGABIT,
550
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000551 .ecsr_value = ECSR_ICD | ECSR_MPD,
552 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
553 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
554
555 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400556 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
557 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
558 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000559 .fdr_value = 0x0000072f,
Sergei Shtylyov305a3382013-10-16 02:29:58 +0400560 .rmcr_value = RMCR_RNC,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000561
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000562 .irq_flags = IRQF_SHARED,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000563 .apr = 1,
564 .mpr = 1,
565 .tpauser = 1,
566 .bculr = 1,
567 .hw_swap = 1,
568 .rpadir = 1,
569 .rpadir_value = 2 << 16,
570 .no_trimd = 1,
571 .no_ade = 1,
Yoshihiro Shimoda3acbc972012-02-15 17:54:51 +0000572 .tsu = 1,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000573};
574
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000575static void sh_eth_chip_reset(struct net_device *ndev)
576{
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000577 struct sh_eth_private *mdp = netdev_priv(ndev);
578
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000579 /* reset device */
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000580 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000581 mdelay(1);
582}
583
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000584static void sh_eth_set_rate_gether(struct net_device *ndev)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000585{
586 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000587
588 switch (mdp->speed) {
589 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000590 sh_eth_write(ndev, GECMR_10, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000591 break;
592 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000593 sh_eth_write(ndev, GECMR_100, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000594 break;
595 case 1000: /* 1000BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000596 sh_eth_write(ndev, GECMR_1000, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000597 break;
598 default:
599 break;
600 }
601}
602
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000603/* SH7734 */
604static struct sh_eth_cpu_data sh7734_data = {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000605 .chip_reset = sh_eth_chip_reset,
606 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000607 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000608
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400609 .register_type = SH_ETH_REG_GIGABIT,
610
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000611 .ecsr_value = ECSR_ICD | ECSR_MPD,
612 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
613 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
614
615 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400616 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
617 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
618 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000619
620 .apr = 1,
621 .mpr = 1,
622 .tpauser = 1,
623 .bculr = 1,
624 .hw_swap = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000625 .no_trimd = 1,
626 .no_ade = 1,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000627 .tsu = 1,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000628 .hw_crc = 1,
629 .select_mii = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000630};
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000631
632/* SH7763 */
633static struct sh_eth_cpu_data sh7763_data = {
634 .chip_reset = sh_eth_chip_reset,
635 .set_duplex = sh_eth_set_duplex,
636 .set_rate = sh_eth_set_rate_gether,
637
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400638 .register_type = SH_ETH_REG_GIGABIT,
639
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000640 .ecsr_value = ECSR_ICD | ECSR_MPD,
641 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
642 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
643
644 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300645 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
646 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000647 EESR_ECI,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000648
649 .apr = 1,
650 .mpr = 1,
651 .tpauser = 1,
652 .bculr = 1,
653 .hw_swap = 1,
654 .no_trimd = 1,
655 .no_ade = 1,
656 .tsu = 1,
657 .irq_flags = IRQF_SHARED,
658};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000659
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000660static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000661{
662 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000663
664 /* reset device */
665 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
666 mdelay(1);
667
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000668 sh_eth_select_mii(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000669}
670
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000671/* R8A7740 */
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000672static struct sh_eth_cpu_data r8a7740_data = {
673 .chip_reset = sh_eth_chip_reset_r8a7740,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000674 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000675 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000676
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400677 .register_type = SH_ETH_REG_GIGABIT,
678
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000679 .ecsr_value = ECSR_ICD | ECSR_MPD,
680 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
681 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
682
683 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400684 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
685 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
686 EESR_TDE | EESR_ECI,
Simon Hormancc235282013-10-10 14:51:16 +0900687 .fdr_value = 0x0000070f,
Sergei Shtylyov305a3382013-10-16 02:29:58 +0400688 .rmcr_value = RMCR_RNC,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000689
690 .apr = 1,
691 .mpr = 1,
692 .tpauser = 1,
693 .bculr = 1,
694 .hw_swap = 1,
Simon Hormancc235282013-10-10 14:51:16 +0900695 .rpadir = 1,
696 .rpadir_value = 2 << 16,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000697 .no_trimd = 1,
698 .no_ade = 1,
699 .tsu = 1,
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000700 .select_mii = 1,
Sergei Shtylyovac8025a2013-06-13 22:12:45 +0400701 .shift_rd0 = 1,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000702};
703
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +0000704static struct sh_eth_cpu_data sh7619_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400705 .register_type = SH_ETH_REG_FAST_SH3_SH2,
706
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000707 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
708
709 .apr = 1,
710 .mpr = 1,
711 .tpauser = 1,
712 .hw_swap = 1,
713};
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +0000714
715static struct sh_eth_cpu_data sh771x_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400716 .register_type = SH_ETH_REG_FAST_SH3_SH2,
717
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000718 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000719 .tsu = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000720};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000721
722static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
723{
724 if (!cd->ecsr_value)
725 cd->ecsr_value = DEFAULT_ECSR_INIT;
726
727 if (!cd->ecsipr_value)
728 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
729
730 if (!cd->fcftr_value)
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300731 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000732 DEFAULT_FIFO_F_D_RFD;
733
734 if (!cd->fdr_value)
735 cd->fdr_value = DEFAULT_FDR_INIT;
736
737 if (!cd->rmcr_value)
738 cd->rmcr_value = DEFAULT_RMCR_VALUE;
739
740 if (!cd->tx_check)
741 cd->tx_check = DEFAULT_TX_CHECK;
742
743 if (!cd->eesr_err_check)
744 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000745}
746
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000747static int sh_eth_check_reset(struct net_device *ndev)
748{
749 int ret = 0;
750 int cnt = 100;
751
752 while (cnt > 0) {
753 if (!(sh_eth_read(ndev, EDMR) & 0x3))
754 break;
755 mdelay(1);
756 cnt--;
757 }
Sergei Shtylyov9f8c4262013-06-05 23:54:01 +0400758 if (cnt <= 0) {
759 pr_err("Device reset failed\n");
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000760 ret = -ETIMEDOUT;
761 }
762 return ret;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000763}
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000764
765static int sh_eth_reset(struct net_device *ndev)
766{
767 struct sh_eth_private *mdp = netdev_priv(ndev);
768 int ret = 0;
769
770 if (sh_eth_is_gether(mdp)) {
771 sh_eth_write(ndev, EDSR_ENALL, EDSR);
772 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
773 EDMR);
774
775 ret = sh_eth_check_reset(ndev);
776 if (ret)
777 goto out;
778
779 /* Table Init */
780 sh_eth_write(ndev, 0x0, TDLAR);
781 sh_eth_write(ndev, 0x0, TDFAR);
782 sh_eth_write(ndev, 0x0, TDFXR);
783 sh_eth_write(ndev, 0x0, TDFFR);
784 sh_eth_write(ndev, 0x0, RDLAR);
785 sh_eth_write(ndev, 0x0, RDFAR);
786 sh_eth_write(ndev, 0x0, RDFXR);
787 sh_eth_write(ndev, 0x0, RDFFR);
788
789 /* Reset HW CRC register */
790 if (mdp->cd->hw_crc)
791 sh_eth_write(ndev, 0x0, CSMR);
792
793 /* Select MII mode */
794 if (mdp->cd->select_mii)
795 sh_eth_select_mii(ndev);
796 } else {
797 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
798 EDMR);
799 mdelay(3);
800 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
801 EDMR);
802 }
803
804out:
805 return ret;
806}
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000807
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000808#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000809static void sh_eth_set_receive_align(struct sk_buff *skb)
810{
811 int reserve;
812
813 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
814 if (reserve)
815 skb_reserve(skb, reserve);
816}
817#else
818static void sh_eth_set_receive_align(struct sk_buff *skb)
819{
820 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
821}
822#endif
823
824
Yoshinori Sato71557a32008-08-06 19:49:00 -0400825/* CPU <-> EDMAC endian convert */
826static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
827{
828 switch (mdp->edmac_endian) {
829 case EDMAC_LITTLE_ENDIAN:
830 return cpu_to_le32(x);
831 case EDMAC_BIG_ENDIAN:
832 return cpu_to_be32(x);
833 }
834 return x;
835}
836
837static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
838{
839 switch (mdp->edmac_endian) {
840 case EDMAC_LITTLE_ENDIAN:
841 return le32_to_cpu(x);
842 case EDMAC_BIG_ENDIAN:
843 return be32_to_cpu(x);
844 }
845 return x;
846}
847
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300848/* Program the hardware MAC address from dev->dev_addr. */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700849static void update_mac_address(struct net_device *ndev)
850{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000851 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300852 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
853 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000854 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300855 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700856}
857
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300858/* Get MAC address from SuperH MAC address register
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700859 *
860 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
861 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
862 * When you want use this device, you must set MAC address in bootloader.
863 *
864 */
Magnus Damm748031f2009-10-09 00:17:14 +0000865static void read_mac_address(struct net_device *ndev, unsigned char *mac)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700866{
Magnus Damm748031f2009-10-09 00:17:14 +0000867 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
Joe Perchesd458cdf2013-10-01 19:04:40 -0700868 memcpy(ndev->dev_addr, mac, ETH_ALEN);
Magnus Damm748031f2009-10-09 00:17:14 +0000869 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000870 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
871 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
872 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
873 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
874 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
875 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
Magnus Damm748031f2009-10-09 00:17:14 +0000876 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700877}
878
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000879static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
880{
881 if (sh_eth_is_gether(mdp))
882 return EDTRR_TRNS_GETHER;
883 else
884 return EDTRR_TRNS_ETHER;
885}
886
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700887struct bb_info {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000888 void (*set_gate)(void *addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700889 struct mdiobb_ctrl ctrl;
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000890 void *addr;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700891 u32 mmd_msk;/* MMD */
892 u32 mdo_msk;
893 u32 mdi_msk;
894 u32 mdc_msk;
895};
896
897/* PHY bit set */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000898static void bb_set(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700899{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000900 iowrite32(ioread32(addr) | msk, addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700901}
902
903/* PHY bit clear */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000904static void bb_clr(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700905{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000906 iowrite32((ioread32(addr) & ~msk), addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700907}
908
909/* PHY bit read */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000910static int bb_read(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700911{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000912 return (ioread32(addr) & msk) != 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700913}
914
915/* Data I/O pin control */
916static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
917{
918 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000919
920 if (bitbang->set_gate)
921 bitbang->set_gate(bitbang->addr);
922
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700923 if (bit)
924 bb_set(bitbang->addr, bitbang->mmd_msk);
925 else
926 bb_clr(bitbang->addr, bitbang->mmd_msk);
927}
928
929/* Set bit data*/
930static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
931{
932 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
933
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000934 if (bitbang->set_gate)
935 bitbang->set_gate(bitbang->addr);
936
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700937 if (bit)
938 bb_set(bitbang->addr, bitbang->mdo_msk);
939 else
940 bb_clr(bitbang->addr, bitbang->mdo_msk);
941}
942
943/* Get bit data*/
944static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
945{
946 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000947
948 if (bitbang->set_gate)
949 bitbang->set_gate(bitbang->addr);
950
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700951 return bb_read(bitbang->addr, bitbang->mdi_msk);
952}
953
954/* MDC pin control */
955static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
956{
957 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
958
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000959 if (bitbang->set_gate)
960 bitbang->set_gate(bitbang->addr);
961
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700962 if (bit)
963 bb_set(bitbang->addr, bitbang->mdc_msk);
964 else
965 bb_clr(bitbang->addr, bitbang->mdc_msk);
966}
967
968/* mdio bus control struct */
969static struct mdiobb_ops bb_ops = {
970 .owner = THIS_MODULE,
971 .set_mdc = sh_mdc_ctrl,
972 .set_mdio_dir = sh_mmd_ctrl,
973 .set_mdio_data = sh_set_mdio,
974 .get_mdio_data = sh_get_mdio,
975};
976
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700977/* free skb and descriptor buffer */
978static void sh_eth_ring_free(struct net_device *ndev)
979{
980 struct sh_eth_private *mdp = netdev_priv(ndev);
981 int i;
982
983 /* Free Rx skb ringbuffer */
984 if (mdp->rx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +0000985 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700986 if (mdp->rx_skbuff[i])
987 dev_kfree_skb(mdp->rx_skbuff[i]);
988 }
989 }
990 kfree(mdp->rx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +0000991 mdp->rx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700992
993 /* Free Tx skb ringbuffer */
994 if (mdp->tx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +0000995 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700996 if (mdp->tx_skbuff[i])
997 dev_kfree_skb(mdp->tx_skbuff[i]);
998 }
999 }
1000 kfree(mdp->tx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001001 mdp->tx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001002}
1003
1004/* format skb and descriptor buffer */
1005static void sh_eth_ring_format(struct net_device *ndev)
1006{
1007 struct sh_eth_private *mdp = netdev_priv(ndev);
1008 int i;
1009 struct sk_buff *skb;
1010 struct sh_eth_rxdesc *rxdesc = NULL;
1011 struct sh_eth_txdesc *txdesc = NULL;
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001012 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1013 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001014
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001015 mdp->cur_rx = 0;
1016 mdp->cur_tx = 0;
1017 mdp->dirty_rx = 0;
1018 mdp->dirty_tx = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001019
1020 memset(mdp->rx_ring, 0, rx_ringsize);
1021
1022 /* build Rx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001023 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001024 /* skb */
1025 mdp->rx_skbuff[i] = NULL;
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001026 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001027 mdp->rx_skbuff[i] = skb;
1028 if (skb == NULL)
1029 break;
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001030 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001031 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001032 sh_eth_set_receive_align(skb);
1033
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001034 /* RX descriptor */
1035 rxdesc = &mdp->rx_ring[i];
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001036 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Yoshinori Sato71557a32008-08-06 19:49:00 -04001037 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001038
1039 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001040 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001041 /* Rx descriptor address set */
1042 if (i == 0) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001043 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001044 if (sh_eth_is_gether(mdp))
1045 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001046 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001047 }
1048
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001049 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001050
1051 /* Mark the last entry as wrapping the ring. */
Yoshinori Sato71557a32008-08-06 19:49:00 -04001052 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001053
1054 memset(mdp->tx_ring, 0, tx_ringsize);
1055
1056 /* build Tx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001057 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001058 mdp->tx_skbuff[i] = NULL;
1059 txdesc = &mdp->tx_ring[i];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001060 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001061 txdesc->buffer_length = 0;
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001062 if (i == 0) {
Yoshinori Sato71557a32008-08-06 19:49:00 -04001063 /* Tx descriptor address set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001064 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001065 if (sh_eth_is_gether(mdp))
1066 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001067 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001068 }
1069
Yoshinori Sato71557a32008-08-06 19:49:00 -04001070 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001071}
1072
1073/* Get skb and descriptor buffer */
1074static int sh_eth_ring_init(struct net_device *ndev)
1075{
1076 struct sh_eth_private *mdp = netdev_priv(ndev);
1077 int rx_ringsize, tx_ringsize, ret = 0;
1078
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001079 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001080 * card needs room to do 8 byte alignment, +2 so we can reserve
1081 * the first 2 bytes, and +16 gets room for the status word from the
1082 * card.
1083 */
1084 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1085 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
Magnus Damm503914c2009-12-15 21:16:55 -08001086 if (mdp->cd->rpadir)
1087 mdp->rx_buf_sz += NET_IP_ALIGN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001088
1089 /* Allocate RX and TX skb rings */
Joe Perchesb2adaca2013-02-03 17:43:58 +00001090 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1091 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001092 if (!mdp->rx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001093 ret = -ENOMEM;
1094 return ret;
1095 }
1096
Joe Perchesb2adaca2013-02-03 17:43:58 +00001097 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1098 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001099 if (!mdp->tx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001100 ret = -ENOMEM;
1101 goto skb_ring_free;
1102 }
1103
1104 /* Allocate all Rx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001105 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001106 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001107 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001108 if (!mdp->rx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001109 ret = -ENOMEM;
1110 goto desc_ring_free;
1111 }
1112
1113 mdp->dirty_rx = 0;
1114
1115 /* Allocate all Tx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001116 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001117 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001118 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001119 if (!mdp->tx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001120 ret = -ENOMEM;
1121 goto desc_ring_free;
1122 }
1123 return ret;
1124
1125desc_ring_free:
1126 /* free DMA buffer */
1127 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1128
1129skb_ring_free:
1130 /* Free Rx and Tx skb ring buffer */
1131 sh_eth_ring_free(ndev);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001132 mdp->tx_ring = NULL;
1133 mdp->rx_ring = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001134
1135 return ret;
1136}
1137
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001138static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1139{
1140 int ringsize;
1141
1142 if (mdp->rx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001143 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001144 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1145 mdp->rx_desc_dma);
1146 mdp->rx_ring = NULL;
1147 }
1148
1149 if (mdp->tx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001150 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001151 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1152 mdp->tx_desc_dma);
1153 mdp->tx_ring = NULL;
1154 }
1155}
1156
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001157static int sh_eth_dev_init(struct net_device *ndev, bool start)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001158{
1159 int ret = 0;
1160 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001161 u32 val;
1162
1163 /* Soft Reset */
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001164 ret = sh_eth_reset(ndev);
1165 if (ret)
1166 goto out;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001167
Simon Horman55754f12013-07-23 10:18:04 +09001168 if (mdp->cd->rmiimode)
1169 sh_eth_write(ndev, 0x1, RMIIMODE);
1170
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001171 /* Descriptor format */
1172 sh_eth_ring_format(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001173 if (mdp->cd->rpadir)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001174 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001175
1176 /* all sh_eth int mask */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001177 sh_eth_write(ndev, 0, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001178
Yoshihiro Shimoda10b91942012-03-29 19:32:08 +00001179#if defined(__LITTLE_ENDIAN)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001180 if (mdp->cd->hw_swap)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001181 sh_eth_write(ndev, EDMR_EL, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001182 else
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001183#endif
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001184 sh_eth_write(ndev, 0, EDMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001185
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001186 /* FIFO size set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001187 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1188 sh_eth_write(ndev, 0, TFTR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001189
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001190 /* Frame recv control */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001191 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001192
Yoshihiro Shimoda2ecbb782012-06-26 19:59:58 +00001193 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001194
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001195 if (mdp->cd->bculr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001196 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001197
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001198 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001199
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001200 if (!mdp->cd->no_trimd)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001201 sh_eth_write(ndev, 0, TRIMD);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001202
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001203 /* Recv frame limit set register */
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +00001204 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1205 RFLR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001206
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001207 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001208 if (start)
1209 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001210
1211 /* PAUSE Prohibition */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001212 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001213 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1214
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001215 sh_eth_write(ndev, val, ECMR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001216
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001217 if (mdp->cd->set_rate)
1218 mdp->cd->set_rate(ndev);
1219
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001220 /* E-MAC Status Register clear */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001221 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001222
1223 /* E-MAC Interrupt Enable register */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001224 if (start)
1225 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001226
1227 /* Set MAC address */
1228 update_mac_address(ndev);
1229
1230 /* mask reset */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001231 if (mdp->cd->apr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001232 sh_eth_write(ndev, APR_AP, APR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001233 if (mdp->cd->mpr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001234 sh_eth_write(ndev, MPR_MP, MPR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001235 if (mdp->cd->tpauser)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001236 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001237
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001238 if (start) {
1239 /* Setting the Rx mode will start the Rx process. */
1240 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001241
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001242 netif_start_queue(ndev);
1243 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001244
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001245out:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001246 return ret;
1247}
1248
1249/* free Tx skb function */
1250static int sh_eth_txfree(struct net_device *ndev)
1251{
1252 struct sh_eth_private *mdp = netdev_priv(ndev);
1253 struct sh_eth_txdesc *txdesc;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001254 int free_num = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001255 int entry = 0;
1256
1257 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001258 entry = mdp->dirty_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001259 txdesc = &mdp->tx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001260 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001261 break;
1262 /* Free the original skb. */
1263 if (mdp->tx_skbuff[entry]) {
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00001264 dma_unmap_single(&ndev->dev, txdesc->addr,
1265 txdesc->buffer_length, DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001266 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1267 mdp->tx_skbuff[entry] = NULL;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001268 free_num++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001269 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001270 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001271 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001272 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001273
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001274 ndev->stats.tx_packets++;
1275 ndev->stats.tx_bytes += txdesc->buffer_length;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001276 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001277 return free_num;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001278}
1279
1280/* Packet receive function */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001281static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001282{
1283 struct sh_eth_private *mdp = netdev_priv(ndev);
1284 struct sh_eth_rxdesc *rxdesc;
1285
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001286 int entry = mdp->cur_rx % mdp->num_rx_ring;
1287 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001288 struct sk_buff *skb;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001289 int exceeded = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001290 u16 pkt_len = 0;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001291 u32 desc_status;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001292
1293 rxdesc = &mdp->rx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001294 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1295 desc_status = edmac_to_cpu(mdp, rxdesc->status);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001296 pkt_len = rxdesc->frame_length;
1297
1298 if (--boguscnt < 0)
1299 break;
1300
Sergei Shtylyov37191092013-06-19 23:30:23 +04001301 if (*quota <= 0) {
1302 exceeded = 1;
1303 break;
1304 }
1305 (*quota)--;
1306
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001307 if (!(desc_status & RDFEND))
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001308 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001309
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001310 /* In case of almost all GETHER/ETHERs, the Receive Frame State
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001311 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1312 * bit 0. However, in case of the R8A7740's GETHER, the RFS
1313 * bits are from bit 25 to bit 16. So, the driver needs right
1314 * shifting by 16.
1315 */
Sergei Shtylyovac8025a2013-06-13 22:12:45 +04001316 if (mdp->cd->shift_rd0)
1317 desc_status >>= 16;
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001318
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001319 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1320 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001321 ndev->stats.rx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001322 if (desc_status & RD_RFS1)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001323 ndev->stats.rx_crc_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001324 if (desc_status & RD_RFS2)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001325 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001326 if (desc_status & RD_RFS3)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001327 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001328 if (desc_status & RD_RFS4)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001329 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001330 if (desc_status & RD_RFS6)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001331 ndev->stats.rx_missed_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001332 if (desc_status & RD_RFS10)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001333 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001334 } else {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001335 if (!mdp->cd->hw_swap)
1336 sh_eth_soft_swap(
1337 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1338 pkt_len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001339 skb = mdp->rx_skbuff[entry];
1340 mdp->rx_skbuff[entry] = NULL;
Magnus Damm503914c2009-12-15 21:16:55 -08001341 if (mdp->cd->rpadir)
1342 skb_reserve(skb, NET_IP_ALIGN);
Kouei Abe7db8e0c2013-08-30 12:41:07 +09001343 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1344 mdp->rx_buf_sz,
1345 DMA_FROM_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001346 skb_put(skb, pkt_len);
1347 skb->protocol = eth_type_trans(skb, ndev);
Sergei Shtylyova8e9fd02013-09-03 03:03:10 +04001348 netif_receive_skb(skb);
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001349 ndev->stats.rx_packets++;
1350 ndev->stats.rx_bytes += pkt_len;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001351 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001352 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001353 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
Yoshihiro Shimoda862df492009-05-24 23:53:40 +00001354 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001355 }
1356
1357 /* Refill the Rx ring buffers. */
1358 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001359 entry = mdp->dirty_rx % mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001360 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001361 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001362 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001363
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001364 if (mdp->rx_skbuff[entry] == NULL) {
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001365 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001366 mdp->rx_skbuff[entry] = skb;
1367 if (skb == NULL)
1368 break; /* Better luck next round. */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001369 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001370 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001371 sh_eth_set_receive_align(skb);
1372
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001373 skb_checksum_none_assert(skb);
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001374 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001375 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001376 if (entry >= mdp->num_rx_ring - 1)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001377 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001378 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001379 else
1380 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001381 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001382 }
1383
1384 /* Restart Rx engine if stopped. */
1385 /* If we don't need to check status, don't. -KDU */
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001386 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
Yoshihiro Shimodaa18e08b2012-06-20 15:26:34 +00001387 /* fix the values for the next receiving if RDE is set */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001388 if (intr_status & EESR_RDE) {
1389 u32 count = (sh_eth_read(ndev, RDFAR) -
1390 sh_eth_read(ndev, RDLAR)) >> 4;
1391
1392 mdp->cur_rx = count;
1393 mdp->dirty_rx = count;
1394 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001395 sh_eth_write(ndev, EDRRR_R, EDRRR);
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001396 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001397
Sergei Shtylyov37191092013-06-19 23:30:23 +04001398 return exceeded;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001399}
1400
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001401static void sh_eth_rcv_snd_disable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001402{
1403 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001404 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1405 ~(ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001406}
1407
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001408static void sh_eth_rcv_snd_enable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001409{
1410 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001411 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1412 (ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001413}
1414
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001415/* error control function */
1416static void sh_eth_error(struct net_device *ndev, int intr_status)
1417{
1418 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001419 u32 felic_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001420 u32 link_stat;
1421 u32 mask;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001422
1423 if (intr_status & EESR_ECI) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001424 felic_stat = sh_eth_read(ndev, ECSR);
1425 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001426 if (felic_stat & ECSR_ICD)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001427 ndev->stats.tx_carrier_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001428 if (felic_stat & ECSR_LCHNG) {
1429 /* Link Changed */
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001430 if (mdp->cd->no_psr || mdp->no_ether_link) {
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001431 goto ignore_link;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001432 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001433 link_stat = (sh_eth_read(ndev, PSR));
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001434 if (mdp->ether_link_active_low)
1435 link_stat = ~link_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001436 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001437 if (!(link_stat & PHY_ST_LINK)) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001438 sh_eth_rcv_snd_disable(ndev);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001439 } else {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001440 /* Link Up */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001441 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001442 ~DMAC_M_ECI, EESIPR);
1443 /* clear int */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001444 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001445 ECSR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001446 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001447 DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001448 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001449 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001450 }
1451 }
1452 }
1453
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001454ignore_link:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001455 if (intr_status & EESR_TWB) {
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001456 /* Unused write back interrupt */
1457 if (intr_status & EESR_TABT) { /* Transmit Abort int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001458 ndev->stats.tx_aborted_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001459 if (netif_msg_tx_err(mdp))
1460 dev_err(&ndev->dev, "Transmit Abort\n");
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001461 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001462 }
1463
1464 if (intr_status & EESR_RABT) {
1465 /* Receive Abort int */
1466 if (intr_status & EESR_RFRMER) {
1467 /* Receive Frame Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001468 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001469 if (netif_msg_rx_err(mdp))
1470 dev_err(&ndev->dev, "Receive Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001471 }
1472 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001473
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001474 if (intr_status & EESR_TDE) {
1475 /* Transmit Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001476 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001477 if (netif_msg_tx_err(mdp))
1478 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1479 }
1480
1481 if (intr_status & EESR_TFE) {
1482 /* FIFO under flow */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001483 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001484 if (netif_msg_tx_err(mdp))
1485 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001486 }
1487
1488 if (intr_status & EESR_RDE) {
1489 /* Receive Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001490 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001491
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001492 if (netif_msg_rx_err(mdp))
1493 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001494 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001495
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001496 if (intr_status & EESR_RFE) {
1497 /* Receive FIFO Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001498 ndev->stats.rx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001499 if (netif_msg_rx_err(mdp))
1500 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1501 }
1502
1503 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1504 /* Address Error */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001505 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001506 if (netif_msg_tx_err(mdp))
1507 dev_err(&ndev->dev, "Address Error\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001508 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001509
1510 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1511 if (mdp->cd->no_ade)
1512 mask &= ~EESR_ADE;
1513 if (intr_status & mask) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001514 /* Tx error */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001515 u32 edtrr = sh_eth_read(ndev, EDTRR);
Sergei Shtylyov090d5602014-01-11 02:41:49 +03001516
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001517 /* dmesg */
Sergei Shtylyov090d5602014-01-11 02:41:49 +03001518 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1519 intr_status, mdp->cur_tx, mdp->dirty_tx,
1520 (u32)ndev->state, edtrr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001521 /* dirty buffer free */
1522 sh_eth_txfree(ndev);
1523
1524 /* SH7712 BUG */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001525 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001526 /* tx dma start */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001527 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001528 }
1529 /* wakeup */
1530 netif_wake_queue(ndev);
1531 }
1532}
1533
1534static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1535{
1536 struct net_device *ndev = netdev;
1537 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001538 struct sh_eth_cpu_data *cd = mdp->cd;
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001539 irqreturn_t ret = IRQ_NONE;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001540 unsigned long intr_status, intr_enable;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001541
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001542 spin_lock(&mdp->lock);
1543
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001544 /* Get interrupt status */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001545 intr_status = sh_eth_read(ndev, EESR);
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001546 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1547 * enabled since it's the one that comes thru regardless of the mask,
1548 * and we need to fully handle it in sh_eth_error() in order to quench
1549 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1550 */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001551 intr_enable = sh_eth_read(ndev, EESIPR);
1552 intr_status &= intr_enable | DMAC_M_ECI;
1553 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001554 ret = IRQ_HANDLED;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001555 else
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001556 goto other_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001557
Sergei Shtylyov37191092013-06-19 23:30:23 +04001558 if (intr_status & EESR_RX_CHECK) {
1559 if (napi_schedule_prep(&mdp->napi)) {
1560 /* Mask Rx interrupts */
1561 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1562 EESIPR);
1563 __napi_schedule(&mdp->napi);
1564 } else {
1565 dev_warn(&ndev->dev,
1566 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1567 intr_status, intr_enable);
1568 }
1569 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001570
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001571 /* Tx Check */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001572 if (intr_status & cd->tx_check) {
Sergei Shtylyov37191092013-06-19 23:30:23 +04001573 /* Clear Tx interrupts */
1574 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1575
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001576 sh_eth_txfree(ndev);
1577 netif_wake_queue(ndev);
1578 }
1579
Sergei Shtylyov37191092013-06-19 23:30:23 +04001580 if (intr_status & cd->eesr_err_check) {
1581 /* Clear error interrupts */
1582 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1583
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001584 sh_eth_error(ndev, intr_status);
Sergei Shtylyov37191092013-06-19 23:30:23 +04001585 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001586
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001587other_irq:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001588 spin_unlock(&mdp->lock);
1589
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001590 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001591}
1592
Sergei Shtylyov37191092013-06-19 23:30:23 +04001593static int sh_eth_poll(struct napi_struct *napi, int budget)
1594{
1595 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1596 napi);
1597 struct net_device *ndev = napi->dev;
1598 int quota = budget;
1599 unsigned long intr_status;
1600
1601 for (;;) {
1602 intr_status = sh_eth_read(ndev, EESR);
1603 if (!(intr_status & EESR_RX_CHECK))
1604 break;
1605 /* Clear Rx interrupts */
1606 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1607
1608 if (sh_eth_rx(ndev, intr_status, &quota))
1609 goto out;
1610 }
1611
1612 napi_complete(napi);
1613
1614 /* Reenable Rx interrupts */
1615 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1616out:
1617 return budget - quota;
1618}
1619
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001620/* PHY state control function */
1621static void sh_eth_adjust_link(struct net_device *ndev)
1622{
1623 struct sh_eth_private *mdp = netdev_priv(ndev);
1624 struct phy_device *phydev = mdp->phydev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001625 int new_state = 0;
1626
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001627 if (phydev->link) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001628 if (phydev->duplex != mdp->duplex) {
1629 new_state = 1;
1630 mdp->duplex = phydev->duplex;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001631 if (mdp->cd->set_duplex)
1632 mdp->cd->set_duplex(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001633 }
1634
1635 if (phydev->speed != mdp->speed) {
1636 new_state = 1;
1637 mdp->speed = phydev->speed;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001638 if (mdp->cd->set_rate)
1639 mdp->cd->set_rate(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001640 }
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001641 if (!mdp->link) {
Yoshihiro Shimoda91a56152011-07-05 20:33:51 +00001642 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001643 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1644 ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001645 new_state = 1;
1646 mdp->link = phydev->link;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001647 if (mdp->cd->no_psr || mdp->no_ether_link)
1648 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001649 }
1650 } else if (mdp->link) {
1651 new_state = 1;
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001652 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001653 mdp->speed = 0;
1654 mdp->duplex = -1;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001655 if (mdp->cd->no_psr || mdp->no_ether_link)
1656 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001657 }
1658
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001659 if (new_state && netif_msg_link(mdp))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001660 phy_print_status(phydev);
1661}
1662
1663/* PHY init function */
1664static int sh_eth_phy_init(struct net_device *ndev)
1665{
1666 struct sh_eth_private *mdp = netdev_priv(ndev);
David S. Miller0a372eb2009-05-26 21:11:09 -07001667 char phy_id[MII_BUS_ID_SIZE + 3];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001668 struct phy_device *phydev = NULL;
1669
Kay Sieversfb28ad352008-11-10 13:55:14 -08001670 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001671 mdp->mii_bus->id, mdp->phy_id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001672
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001673 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001674 mdp->speed = 0;
1675 mdp->duplex = -1;
1676
1677 /* Try connect to PHY */
Joe Perchesc061b182010-08-23 18:20:03 +00001678 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
Florian Fainellif9a8f832013-01-14 00:52:52 +00001679 mdp->phy_interface);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001680 if (IS_ERR(phydev)) {
1681 dev_err(&ndev->dev, "phy_connect failed\n");
1682 return PTR_ERR(phydev);
1683 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001684
Sergei Shtylyov18be0992013-12-20 01:39:52 +03001685 dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
1686 phydev->addr, phydev->irq, phydev->drv->name);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001687
1688 mdp->phydev = phydev;
1689
1690 return 0;
1691}
1692
1693/* PHY control start function */
1694static int sh_eth_phy_start(struct net_device *ndev)
1695{
1696 struct sh_eth_private *mdp = netdev_priv(ndev);
1697 int ret;
1698
1699 ret = sh_eth_phy_init(ndev);
1700 if (ret)
1701 return ret;
1702
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001703 phy_start(mdp->phydev);
1704
1705 return 0;
1706}
1707
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001708static int sh_eth_get_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001709 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001710{
1711 struct sh_eth_private *mdp = netdev_priv(ndev);
1712 unsigned long flags;
1713 int ret;
1714
1715 spin_lock_irqsave(&mdp->lock, flags);
1716 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1717 spin_unlock_irqrestore(&mdp->lock, flags);
1718
1719 return ret;
1720}
1721
1722static int sh_eth_set_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001723 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001724{
1725 struct sh_eth_private *mdp = netdev_priv(ndev);
1726 unsigned long flags;
1727 int ret;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001728
1729 spin_lock_irqsave(&mdp->lock, flags);
1730
1731 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001732 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001733
1734 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1735 if (ret)
1736 goto error_exit;
1737
1738 if (ecmd->duplex == DUPLEX_FULL)
1739 mdp->duplex = 1;
1740 else
1741 mdp->duplex = 0;
1742
1743 if (mdp->cd->set_duplex)
1744 mdp->cd->set_duplex(ndev);
1745
1746error_exit:
1747 mdelay(1);
1748
1749 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001750 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001751
1752 spin_unlock_irqrestore(&mdp->lock, flags);
1753
1754 return ret;
1755}
1756
1757static int sh_eth_nway_reset(struct net_device *ndev)
1758{
1759 struct sh_eth_private *mdp = netdev_priv(ndev);
1760 unsigned long flags;
1761 int ret;
1762
1763 spin_lock_irqsave(&mdp->lock, flags);
1764 ret = phy_start_aneg(mdp->phydev);
1765 spin_unlock_irqrestore(&mdp->lock, flags);
1766
1767 return ret;
1768}
1769
1770static u32 sh_eth_get_msglevel(struct net_device *ndev)
1771{
1772 struct sh_eth_private *mdp = netdev_priv(ndev);
1773 return mdp->msg_enable;
1774}
1775
1776static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1777{
1778 struct sh_eth_private *mdp = netdev_priv(ndev);
1779 mdp->msg_enable = value;
1780}
1781
1782static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1783 "rx_current", "tx_current",
1784 "rx_dirty", "tx_dirty",
1785};
1786#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1787
1788static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1789{
1790 switch (sset) {
1791 case ETH_SS_STATS:
1792 return SH_ETH_STATS_LEN;
1793 default:
1794 return -EOPNOTSUPP;
1795 }
1796}
1797
1798static void sh_eth_get_ethtool_stats(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001799 struct ethtool_stats *stats, u64 *data)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001800{
1801 struct sh_eth_private *mdp = netdev_priv(ndev);
1802 int i = 0;
1803
1804 /* device-specific stats */
1805 data[i++] = mdp->cur_rx;
1806 data[i++] = mdp->cur_tx;
1807 data[i++] = mdp->dirty_rx;
1808 data[i++] = mdp->dirty_tx;
1809}
1810
1811static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1812{
1813 switch (stringset) {
1814 case ETH_SS_STATS:
1815 memcpy(data, *sh_eth_gstrings_stats,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001816 sizeof(sh_eth_gstrings_stats));
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001817 break;
1818 }
1819}
1820
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001821static void sh_eth_get_ringparam(struct net_device *ndev,
1822 struct ethtool_ringparam *ring)
1823{
1824 struct sh_eth_private *mdp = netdev_priv(ndev);
1825
1826 ring->rx_max_pending = RX_RING_MAX;
1827 ring->tx_max_pending = TX_RING_MAX;
1828 ring->rx_pending = mdp->num_rx_ring;
1829 ring->tx_pending = mdp->num_tx_ring;
1830}
1831
1832static int sh_eth_set_ringparam(struct net_device *ndev,
1833 struct ethtool_ringparam *ring)
1834{
1835 struct sh_eth_private *mdp = netdev_priv(ndev);
1836 int ret;
1837
1838 if (ring->tx_pending > TX_RING_MAX ||
1839 ring->rx_pending > RX_RING_MAX ||
1840 ring->tx_pending < TX_RING_MIN ||
1841 ring->rx_pending < RX_RING_MIN)
1842 return -EINVAL;
1843 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1844 return -EINVAL;
1845
1846 if (netif_running(ndev)) {
1847 netif_tx_disable(ndev);
1848 /* Disable interrupts by clearing the interrupt mask. */
1849 sh_eth_write(ndev, 0x0000, EESIPR);
1850 /* Stop the chip's Tx and Rx processes. */
1851 sh_eth_write(ndev, 0, EDTRR);
1852 sh_eth_write(ndev, 0, EDRRR);
1853 synchronize_irq(ndev->irq);
1854 }
1855
1856 /* Free all the skbuffs in the Rx queue. */
1857 sh_eth_ring_free(ndev);
1858 /* Free DMA buffer */
1859 sh_eth_free_dma_buffer(mdp);
1860
1861 /* Set new parameters */
1862 mdp->num_rx_ring = ring->rx_pending;
1863 mdp->num_tx_ring = ring->tx_pending;
1864
1865 ret = sh_eth_ring_init(ndev);
1866 if (ret < 0) {
1867 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1868 return ret;
1869 }
1870 ret = sh_eth_dev_init(ndev, false);
1871 if (ret < 0) {
1872 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1873 return ret;
1874 }
1875
1876 if (netif_running(ndev)) {
1877 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1878 /* Setting the Rx mode will start the Rx process. */
1879 sh_eth_write(ndev, EDRRR_R, EDRRR);
1880 netif_wake_queue(ndev);
1881 }
1882
1883 return 0;
1884}
1885
stephen hemminger9b07be42012-01-04 12:59:49 +00001886static const struct ethtool_ops sh_eth_ethtool_ops = {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001887 .get_settings = sh_eth_get_settings,
1888 .set_settings = sh_eth_set_settings,
stephen hemminger9b07be42012-01-04 12:59:49 +00001889 .nway_reset = sh_eth_nway_reset,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001890 .get_msglevel = sh_eth_get_msglevel,
1891 .set_msglevel = sh_eth_set_msglevel,
stephen hemminger9b07be42012-01-04 12:59:49 +00001892 .get_link = ethtool_op_get_link,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001893 .get_strings = sh_eth_get_strings,
1894 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1895 .get_sset_count = sh_eth_get_sset_count,
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001896 .get_ringparam = sh_eth_get_ringparam,
1897 .set_ringparam = sh_eth_set_ringparam,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001898};
1899
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001900/* network device open function */
1901static int sh_eth_open(struct net_device *ndev)
1902{
1903 int ret = 0;
1904 struct sh_eth_private *mdp = netdev_priv(ndev);
1905
Magnus Dammbcd51492009-10-09 00:20:04 +00001906 pm_runtime_get_sync(&mdp->pdev->dev);
1907
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04001908 napi_enable(&mdp->napi);
1909
Joe Perchesa0607fd2009-11-18 23:29:17 -08001910 ret = request_irq(ndev->irq, sh_eth_interrupt,
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +00001911 mdp->cd->irq_flags, ndev->name, ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001912 if (ret) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001913 dev_err(&ndev->dev, "Can not assign IRQ number\n");
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04001914 goto out_napi_off;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001915 }
1916
1917 /* Descriptor set */
1918 ret = sh_eth_ring_init(ndev);
1919 if (ret)
1920 goto out_free_irq;
1921
1922 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001923 ret = sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001924 if (ret)
1925 goto out_free_irq;
1926
1927 /* PHY control start*/
1928 ret = sh_eth_phy_start(ndev);
1929 if (ret)
1930 goto out_free_irq;
1931
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001932 return ret;
1933
1934out_free_irq:
1935 free_irq(ndev->irq, ndev);
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04001936out_napi_off:
1937 napi_disable(&mdp->napi);
Magnus Dammbcd51492009-10-09 00:20:04 +00001938 pm_runtime_put_sync(&mdp->pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001939 return ret;
1940}
1941
1942/* Timeout function */
1943static void sh_eth_tx_timeout(struct net_device *ndev)
1944{
1945 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001946 struct sh_eth_rxdesc *rxdesc;
1947 int i;
1948
1949 netif_stop_queue(ndev);
1950
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001951 if (netif_msg_timer(mdp)) {
1952 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
1953 ndev->name, (int)sh_eth_read(ndev, EESR));
1954 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001955
1956 /* tx_errors count up */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001957 ndev->stats.tx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001958
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001959 /* Free all the skbuffs in the Rx queue. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001960 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001961 rxdesc = &mdp->rx_ring[i];
1962 rxdesc->status = 0;
1963 rxdesc->addr = 0xBADF00D0;
1964 if (mdp->rx_skbuff[i])
1965 dev_kfree_skb(mdp->rx_skbuff[i]);
1966 mdp->rx_skbuff[i] = NULL;
1967 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001968 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001969 if (mdp->tx_skbuff[i])
1970 dev_kfree_skb(mdp->tx_skbuff[i]);
1971 mdp->tx_skbuff[i] = NULL;
1972 }
1973
1974 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001975 sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001976}
1977
1978/* Packet transmit function */
1979static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1980{
1981 struct sh_eth_private *mdp = netdev_priv(ndev);
1982 struct sh_eth_txdesc *txdesc;
1983 u32 entry;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00001984 unsigned long flags;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001985
1986 spin_lock_irqsave(&mdp->lock, flags);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001987 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001988 if (!sh_eth_txfree(ndev)) {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001989 if (netif_msg_tx_queued(mdp))
1990 dev_warn(&ndev->dev, "TxFD exhausted.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001991 netif_stop_queue(ndev);
1992 spin_unlock_irqrestore(&mdp->lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +00001993 return NETDEV_TX_BUSY;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001994 }
1995 }
1996 spin_unlock_irqrestore(&mdp->lock, flags);
1997
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001998 entry = mdp->cur_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001999 mdp->tx_skbuff[entry] = skb;
2000 txdesc = &mdp->tx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002001 /* soft swap. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002002 if (!mdp->cd->hw_swap)
2003 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2004 skb->len + 2);
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00002005 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2006 DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002007 if (skb->len < ETHERSMALL)
2008 txdesc->buffer_length = ETHERSMALL;
2009 else
2010 txdesc->buffer_length = skb->len;
2011
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002012 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04002013 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002014 else
Yoshinori Sato71557a32008-08-06 19:49:00 -04002015 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002016
2017 mdp->cur_tx++;
2018
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002019 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2020 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09002021
Patrick McHardy6ed10652009-06-23 06:03:08 +00002022 return NETDEV_TX_OK;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002023}
2024
2025/* device close function */
2026static int sh_eth_close(struct net_device *ndev)
2027{
2028 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002029
2030 netif_stop_queue(ndev);
2031
2032 /* Disable interrupts by clearing the interrupt mask. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002033 sh_eth_write(ndev, 0x0000, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002034
2035 /* Stop the chip's Tx and Rx processes. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002036 sh_eth_write(ndev, 0, EDTRR);
2037 sh_eth_write(ndev, 0, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002038
2039 /* PHY Disconnect */
2040 if (mdp->phydev) {
2041 phy_stop(mdp->phydev);
2042 phy_disconnect(mdp->phydev);
2043 }
2044
2045 free_irq(ndev->irq, ndev);
2046
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002047 napi_disable(&mdp->napi);
2048
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002049 /* Free all the skbuffs in the Rx queue. */
2050 sh_eth_ring_free(ndev);
2051
2052 /* free DMA buffer */
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00002053 sh_eth_free_dma_buffer(mdp);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002054
Magnus Dammbcd51492009-10-09 00:20:04 +00002055 pm_runtime_put_sync(&mdp->pdev->dev);
2056
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002057 return 0;
2058}
2059
2060static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2061{
2062 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002063
Magnus Dammbcd51492009-10-09 00:20:04 +00002064 pm_runtime_get_sync(&mdp->pdev->dev);
2065
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002066 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002067 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002068 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002069 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002070 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002071 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002072 if (sh_eth_is_gether(mdp)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002073 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002074 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002075 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002076 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2077 } else {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002078 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002079 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2080 }
Magnus Dammbcd51492009-10-09 00:20:04 +00002081 pm_runtime_put_sync(&mdp->pdev->dev);
2082
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002083 return &ndev->stats;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002084}
2085
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002086/* ioctl to device function */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002087static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002088{
2089 struct sh_eth_private *mdp = netdev_priv(ndev);
2090 struct phy_device *phydev = mdp->phydev;
2091
2092 if (!netif_running(ndev))
2093 return -EINVAL;
2094
2095 if (!phydev)
2096 return -ENODEV;
2097
Richard Cochran28b04112010-07-17 08:48:55 +00002098 return phy_mii_ioctl(phydev, rq, cmd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002099}
2100
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002101/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2102static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2103 int entry)
2104{
2105 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2106}
2107
2108static u32 sh_eth_tsu_get_post_mask(int entry)
2109{
2110 return 0x0f << (28 - ((entry % 8) * 4));
2111}
2112
2113static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2114{
2115 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2116}
2117
2118static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2119 int entry)
2120{
2121 struct sh_eth_private *mdp = netdev_priv(ndev);
2122 u32 tmp;
2123 void *reg_offset;
2124
2125 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2126 tmp = ioread32(reg_offset);
2127 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2128}
2129
2130static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2131 int entry)
2132{
2133 struct sh_eth_private *mdp = netdev_priv(ndev);
2134 u32 post_mask, ref_mask, tmp;
2135 void *reg_offset;
2136
2137 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2138 post_mask = sh_eth_tsu_get_post_mask(entry);
2139 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2140
2141 tmp = ioread32(reg_offset);
2142 iowrite32(tmp & ~post_mask, reg_offset);
2143
2144 /* If other port enables, the function returns "true" */
2145 return tmp & ref_mask;
2146}
2147
2148static int sh_eth_tsu_busy(struct net_device *ndev)
2149{
2150 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2151 struct sh_eth_private *mdp = netdev_priv(ndev);
2152
2153 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2154 udelay(10);
2155 timeout--;
2156 if (timeout <= 0) {
2157 dev_err(&ndev->dev, "%s: timeout\n", __func__);
2158 return -ETIMEDOUT;
2159 }
2160 }
2161
2162 return 0;
2163}
2164
2165static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2166 const u8 *addr)
2167{
2168 u32 val;
2169
2170 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2171 iowrite32(val, reg);
2172 if (sh_eth_tsu_busy(ndev) < 0)
2173 return -EBUSY;
2174
2175 val = addr[4] << 8 | addr[5];
2176 iowrite32(val, reg + 4);
2177 if (sh_eth_tsu_busy(ndev) < 0)
2178 return -EBUSY;
2179
2180 return 0;
2181}
2182
2183static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2184{
2185 u32 val;
2186
2187 val = ioread32(reg);
2188 addr[0] = (val >> 24) & 0xff;
2189 addr[1] = (val >> 16) & 0xff;
2190 addr[2] = (val >> 8) & 0xff;
2191 addr[3] = val & 0xff;
2192 val = ioread32(reg + 4);
2193 addr[4] = (val >> 8) & 0xff;
2194 addr[5] = val & 0xff;
2195}
2196
2197
2198static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2199{
2200 struct sh_eth_private *mdp = netdev_priv(ndev);
2201 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2202 int i;
2203 u8 c_addr[ETH_ALEN];
2204
2205 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2206 sh_eth_tsu_read_entry(reg_offset, c_addr);
dingtianhongc4bde292013-12-30 15:41:17 +08002207 if (ether_addr_equal(addr, c_addr))
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002208 return i;
2209 }
2210
2211 return -ENOENT;
2212}
2213
2214static int sh_eth_tsu_find_empty(struct net_device *ndev)
2215{
2216 u8 blank[ETH_ALEN];
2217 int entry;
2218
2219 memset(blank, 0, sizeof(blank));
2220 entry = sh_eth_tsu_find_entry(ndev, blank);
2221 return (entry < 0) ? -ENOMEM : entry;
2222}
2223
2224static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2225 int entry)
2226{
2227 struct sh_eth_private *mdp = netdev_priv(ndev);
2228 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2229 int ret;
2230 u8 blank[ETH_ALEN];
2231
2232 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2233 ~(1 << (31 - entry)), TSU_TEN);
2234
2235 memset(blank, 0, sizeof(blank));
2236 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2237 if (ret < 0)
2238 return ret;
2239 return 0;
2240}
2241
2242static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2243{
2244 struct sh_eth_private *mdp = netdev_priv(ndev);
2245 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2246 int i, ret;
2247
2248 if (!mdp->cd->tsu)
2249 return 0;
2250
2251 i = sh_eth_tsu_find_entry(ndev, addr);
2252 if (i < 0) {
2253 /* No entry found, create one */
2254 i = sh_eth_tsu_find_empty(ndev);
2255 if (i < 0)
2256 return -ENOMEM;
2257 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2258 if (ret < 0)
2259 return ret;
2260
2261 /* Enable the entry */
2262 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2263 (1 << (31 - i)), TSU_TEN);
2264 }
2265
2266 /* Entry found or created, enable POST */
2267 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2268
2269 return 0;
2270}
2271
2272static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2273{
2274 struct sh_eth_private *mdp = netdev_priv(ndev);
2275 int i, ret;
2276
2277 if (!mdp->cd->tsu)
2278 return 0;
2279
2280 i = sh_eth_tsu_find_entry(ndev, addr);
2281 if (i) {
2282 /* Entry found */
2283 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2284 goto done;
2285
2286 /* Disable the entry if both ports was disabled */
2287 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2288 if (ret < 0)
2289 return ret;
2290 }
2291done:
2292 return 0;
2293}
2294
2295static int sh_eth_tsu_purge_all(struct net_device *ndev)
2296{
2297 struct sh_eth_private *mdp = netdev_priv(ndev);
2298 int i, ret;
2299
2300 if (unlikely(!mdp->cd->tsu))
2301 return 0;
2302
2303 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2304 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2305 continue;
2306
2307 /* Disable the entry if both ports was disabled */
2308 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2309 if (ret < 0)
2310 return ret;
2311 }
2312
2313 return 0;
2314}
2315
2316static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2317{
2318 struct sh_eth_private *mdp = netdev_priv(ndev);
2319 u8 addr[ETH_ALEN];
2320 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2321 int i;
2322
2323 if (unlikely(!mdp->cd->tsu))
2324 return;
2325
2326 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2327 sh_eth_tsu_read_entry(reg_offset, addr);
2328 if (is_multicast_ether_addr(addr))
2329 sh_eth_tsu_del_entry(ndev, addr);
2330 }
2331}
2332
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002333/* Multicast reception directions set */
2334static void sh_eth_set_multicast_list(struct net_device *ndev)
2335{
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002336 struct sh_eth_private *mdp = netdev_priv(ndev);
2337 u32 ecmr_bits;
2338 int mcast_all = 0;
2339 unsigned long flags;
2340
2341 spin_lock_irqsave(&mdp->lock, flags);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002342 /* Initial condition is MCT = 1, PRM = 0.
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002343 * Depending on ndev->flags, set PRM or clear MCT
2344 */
2345 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2346
2347 if (!(ndev->flags & IFF_MULTICAST)) {
2348 sh_eth_tsu_purge_mcast(ndev);
2349 mcast_all = 1;
2350 }
2351 if (ndev->flags & IFF_ALLMULTI) {
2352 sh_eth_tsu_purge_mcast(ndev);
2353 ecmr_bits &= ~ECMR_MCT;
2354 mcast_all = 1;
2355 }
2356
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002357 if (ndev->flags & IFF_PROMISC) {
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002358 sh_eth_tsu_purge_all(ndev);
2359 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2360 } else if (mdp->cd->tsu) {
2361 struct netdev_hw_addr *ha;
2362 netdev_for_each_mc_addr(ha, ndev) {
2363 if (mcast_all && is_multicast_ether_addr(ha->addr))
2364 continue;
2365
2366 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2367 if (!mcast_all) {
2368 sh_eth_tsu_purge_mcast(ndev);
2369 ecmr_bits &= ~ECMR_MCT;
2370 mcast_all = 1;
2371 }
2372 }
2373 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002374 } else {
2375 /* Normal, unicast/broadcast-only mode. */
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002376 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002377 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002378
2379 /* update the ethernet mode */
2380 sh_eth_write(ndev, ecmr_bits, ECMR);
2381
2382 spin_unlock_irqrestore(&mdp->lock, flags);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002383}
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002384
2385static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2386{
2387 if (!mdp->port)
2388 return TSU_VTAG0;
2389 else
2390 return TSU_VTAG1;
2391}
2392
Patrick McHardy80d5c362013-04-19 02:04:28 +00002393static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2394 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002395{
2396 struct sh_eth_private *mdp = netdev_priv(ndev);
2397 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2398
2399 if (unlikely(!mdp->cd->tsu))
2400 return -EPERM;
2401
2402 /* No filtering if vid = 0 */
2403 if (!vid)
2404 return 0;
2405
2406 mdp->vlan_num_ids++;
2407
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002408 /* The controller has one VLAN tag HW filter. So, if the filter is
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002409 * already enabled, the driver disables it and the filte
2410 */
2411 if (mdp->vlan_num_ids > 1) {
2412 /* disable VLAN filter */
2413 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2414 return 0;
2415 }
2416
2417 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2418 vtag_reg_index);
2419
2420 return 0;
2421}
2422
Patrick McHardy80d5c362013-04-19 02:04:28 +00002423static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2424 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002425{
2426 struct sh_eth_private *mdp = netdev_priv(ndev);
2427 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2428
2429 if (unlikely(!mdp->cd->tsu))
2430 return -EPERM;
2431
2432 /* No filtering if vid = 0 */
2433 if (!vid)
2434 return 0;
2435
2436 mdp->vlan_num_ids--;
2437 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2438
2439 return 0;
2440}
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002441
2442/* SuperH's TSU register init function */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002443static void sh_eth_tsu_init(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002444{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002445 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2446 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2447 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2448 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2449 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2450 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2451 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2452 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2453 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2454 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002455 if (sh_eth_is_gether(mdp)) {
2456 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2457 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2458 } else {
2459 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2460 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2461 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002462 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2463 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2464 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2465 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2466 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2467 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2468 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002469}
2470
2471/* MDIO bus release function */
2472static int sh_mdio_release(struct net_device *ndev)
2473{
2474 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2475
2476 /* unregister mdio bus */
2477 mdiobus_unregister(bus);
2478
2479 /* remove mdio bus info from net_device */
2480 dev_set_drvdata(&ndev->dev, NULL);
2481
2482 /* free bitbang info */
2483 free_mdio_bitbang(bus);
2484
2485 return 0;
2486}
2487
2488/* MDIO bus init function */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002489static int sh_mdio_init(struct net_device *ndev, int id,
2490 struct sh_eth_plat_data *pd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002491{
2492 int ret, i;
2493 struct bb_info *bitbang;
2494 struct sh_eth_private *mdp = netdev_priv(ndev);
2495
2496 /* create bit control struct for PHY */
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002497 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2498 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002499 if (!bitbang) {
2500 ret = -ENOMEM;
2501 goto out;
2502 }
2503
2504 /* bitbang init */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002505 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002506 bitbang->set_gate = pd->set_mdio_gate;
Sergei Shtylyovdfed5e72013-03-21 10:37:54 +00002507 bitbang->mdi_msk = PIR_MDI;
2508 bitbang->mdo_msk = PIR_MDO;
2509 bitbang->mmd_msk = PIR_MMD;
2510 bitbang->mdc_msk = PIR_MDC;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002511 bitbang->ctrl.ops = &bb_ops;
2512
Stefan Weilc2e07b32010-08-03 19:44:52 +02002513 /* MII controller setting */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002514 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2515 if (!mdp->mii_bus) {
2516 ret = -ENOMEM;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002517 goto out;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002518 }
2519
2520 /* Hook up MII support for ethtool */
2521 mdp->mii_bus->name = "sh_mii";
Lennert Buytenhek18ee49d2008-10-01 15:41:33 +00002522 mdp->mii_bus->parent = &ndev->dev;
Florian Fainelli5278fb52012-01-09 23:59:17 +00002523 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002524 mdp->pdev->name, id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002525
2526 /* PHY IRQ */
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002527 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2528 sizeof(int) * PHY_MAX_ADDR,
2529 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002530 if (!mdp->mii_bus->irq) {
2531 ret = -ENOMEM;
2532 goto out_free_bus;
2533 }
2534
2535 for (i = 0; i < PHY_MAX_ADDR; i++)
2536 mdp->mii_bus->irq[i] = PHY_POLL;
Sergei Shtylyov18be0992013-12-20 01:39:52 +03002537 if (pd->phy_irq > 0)
2538 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002539
YOSHIFUJI Hideaki / 吉藤英明8f6352f2012-11-02 04:45:07 +00002540 /* register mdio bus */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002541 ret = mdiobus_register(mdp->mii_bus);
2542 if (ret)
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002543 goto out_free_bus;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002544
2545 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2546
2547 return 0;
2548
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002549out_free_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002550 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002551
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002552out:
2553 return ret;
2554}
2555
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002556static const u16 *sh_eth_get_register_offset(int register_type)
2557{
2558 const u16 *reg_offset = NULL;
2559
2560 switch (register_type) {
2561 case SH_ETH_REG_GIGABIT:
2562 reg_offset = sh_eth_offset_gigabit;
2563 break;
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00002564 case SH_ETH_REG_FAST_RCAR:
2565 reg_offset = sh_eth_offset_fast_rcar;
2566 break;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002567 case SH_ETH_REG_FAST_SH4:
2568 reg_offset = sh_eth_offset_fast_sh4;
2569 break;
2570 case SH_ETH_REG_FAST_SH3_SH2:
2571 reg_offset = sh_eth_offset_fast_sh3_sh2;
2572 break;
2573 default:
Nobuhiro Iwamatsu14c33262013-03-20 22:46:55 +00002574 pr_err("Unknown register type (%d)\n", register_type);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002575 break;
2576 }
2577
2578 return reg_offset;
2579}
2580
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002581static const struct net_device_ops sh_eth_netdev_ops = {
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002582 .ndo_open = sh_eth_open,
2583 .ndo_stop = sh_eth_close,
2584 .ndo_start_xmit = sh_eth_start_xmit,
2585 .ndo_get_stats = sh_eth_get_stats,
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002586 .ndo_tx_timeout = sh_eth_tx_timeout,
2587 .ndo_do_ioctl = sh_eth_do_ioctl,
2588 .ndo_validate_addr = eth_validate_addr,
2589 .ndo_set_mac_address = eth_mac_addr,
2590 .ndo_change_mtu = eth_change_mtu,
2591};
2592
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002593static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2594 .ndo_open = sh_eth_open,
2595 .ndo_stop = sh_eth_close,
2596 .ndo_start_xmit = sh_eth_start_xmit,
2597 .ndo_get_stats = sh_eth_get_stats,
2598 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2599 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2600 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2601 .ndo_tx_timeout = sh_eth_tx_timeout,
2602 .ndo_do_ioctl = sh_eth_do_ioctl,
2603 .ndo_validate_addr = eth_validate_addr,
2604 .ndo_set_mac_address = eth_mac_addr,
2605 .ndo_change_mtu = eth_change_mtu,
2606};
2607
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002608static int sh_eth_drv_probe(struct platform_device *pdev)
2609{
Kuninori Morimoto9c386572010-08-19 00:39:45 -07002610 int ret, devno = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002611 struct resource *res;
2612 struct net_device *ndev = NULL;
Kuninori Morimotoec0d7552011-06-23 16:02:38 +00002613 struct sh_eth_private *mdp = NULL;
Jingoo Han0b76b862013-08-30 14:00:11 +09002614 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002615 const struct platform_device_id *id = platform_get_device_id(pdev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002616
2617 /* get base addr */
2618 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2619 if (unlikely(res == NULL)) {
2620 dev_err(&pdev->dev, "invalid resource\n");
2621 ret = -EINVAL;
2622 goto out;
2623 }
2624
2625 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2626 if (!ndev) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002627 ret = -ENOMEM;
2628 goto out;
2629 }
2630
2631 /* The sh Ether-specific entries in the device structure. */
2632 ndev->base_addr = res->start;
2633 devno = pdev->id;
2634 if (devno < 0)
2635 devno = 0;
2636
2637 ndev->dma = -1;
roel kluincc3c0802008-09-10 19:22:44 +02002638 ret = platform_get_irq(pdev, 0);
2639 if (ret < 0) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002640 ret = -ENODEV;
2641 goto out_release;
2642 }
roel kluincc3c0802008-09-10 19:22:44 +02002643 ndev->irq = ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002644
2645 SET_NETDEV_DEV(ndev, &pdev->dev);
2646
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002647 mdp = netdev_priv(ndev);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002648 mdp->num_tx_ring = TX_RING_SIZE;
2649 mdp->num_rx_ring = RX_RING_SIZE;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002650 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2651 if (IS_ERR(mdp->addr)) {
2652 ret = PTR_ERR(mdp->addr);
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002653 goto out_release;
2654 }
2655
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002656 spin_lock_init(&mdp->lock);
Magnus Dammbcd51492009-10-09 00:20:04 +00002657 mdp->pdev = pdev;
2658 pm_runtime_enable(&pdev->dev);
2659 pm_runtime_resume(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002660
Sergei Shtylyov3b4c5cb2013-10-30 23:30:19 +03002661 if (!pd) {
2662 dev_err(&pdev->dev, "no platform data\n");
2663 ret = -EINVAL;
2664 goto out_release;
2665 }
2666
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002667 /* get PHY ID */
Yoshinori Sato71557a32008-08-06 19:49:00 -04002668 mdp->phy_id = pd->phy;
Yoshihiro Shimodae47c9052011-03-07 21:59:45 +00002669 mdp->phy_interface = pd->phy_interface;
Yoshinori Sato71557a32008-08-06 19:49:00 -04002670 /* EDMAC endian */
2671 mdp->edmac_endian = pd->edmac_endian;
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00002672 mdp->no_ether_link = pd->no_ether_link;
2673 mdp->ether_link_active_low = pd->ether_link_active_low;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002674
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002675 /* set cpu data */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +00002676 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
Sergei Shtylyova3153d82013-08-18 03:11:28 +04002677 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002678 sh_eth_set_default_cpu_data(mdp->cd);
2679
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002680 /* set function */
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002681 if (mdp->cd->tsu)
2682 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2683 else
2684 ndev->netdev_ops = &sh_eth_netdev_ops;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002685 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002686 ndev->watchdog_timeo = TX_TIMEOUT;
2687
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002688 /* debug message level */
2689 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002690
2691 /* read and set MAC address */
Magnus Damm748031f2009-10-09 00:17:14 +00002692 read_mac_address(ndev, pd->mac_addr);
Sergei Shtylyovff6e7222013-04-29 09:49:42 +00002693 if (!is_valid_ether_addr(ndev->dev_addr)) {
2694 dev_warn(&pdev->dev,
2695 "no valid MAC address supplied, using a random one.\n");
2696 eth_hw_addr_random(ndev);
2697 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002698
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002699 /* ioremap the TSU registers */
2700 if (mdp->cd->tsu) {
2701 struct resource *rtsu;
2702 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002703 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2704 if (IS_ERR(mdp->tsu_addr)) {
2705 ret = PTR_ERR(mdp->tsu_addr);
Sergei Shtylyovfc0c0902013-03-19 13:41:32 +00002706 goto out_release;
2707 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002708 mdp->port = devno % 2;
Patrick McHardyf6469682013-04-19 02:04:27 +00002709 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002710 }
2711
Yoshihiro Shimoda150647f2012-02-15 17:54:56 +00002712 /* initialize first or needed device */
2713 if (!devno || pd->needs_init) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002714 if (mdp->cd->chip_reset)
2715 mdp->cd->chip_reset(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002716
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00002717 if (mdp->cd->tsu) {
2718 /* TSU init (Init only)*/
2719 sh_eth_tsu_init(mdp);
2720 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002721 }
2722
Sergei Shtylyov37191092013-06-19 23:30:23 +04002723 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2724
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002725 /* network device register */
2726 ret = register_netdev(ndev);
2727 if (ret)
Sergei Shtylyov37191092013-06-19 23:30:23 +04002728 goto out_napi_del;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002729
2730 /* mdio bus init */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002731 ret = sh_mdio_init(ndev, pdev->id, pd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002732 if (ret)
2733 goto out_unregister;
2734
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002735 /* print device information */
H Hartley Sweeten6cd9b492009-12-29 20:10:35 -08002736 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002737 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002738
2739 platform_set_drvdata(pdev, ndev);
2740
2741 return ret;
2742
2743out_unregister:
2744 unregister_netdev(ndev);
2745
Sergei Shtylyov37191092013-06-19 23:30:23 +04002746out_napi_del:
2747 netif_napi_del(&mdp->napi);
2748
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002749out_release:
2750 /* net_dev free */
2751 if (ndev)
2752 free_netdev(ndev);
2753
2754out:
2755 return ret;
2756}
2757
2758static int sh_eth_drv_remove(struct platform_device *pdev)
2759{
2760 struct net_device *ndev = platform_get_drvdata(pdev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002761 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002762
2763 sh_mdio_release(ndev);
2764 unregister_netdev(ndev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002765 netif_napi_del(&mdp->napi);
Magnus Dammbcd51492009-10-09 00:20:04 +00002766 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002767 free_netdev(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002768
2769 return 0;
2770}
2771
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002772#ifdef CONFIG_PM
Magnus Dammbcd51492009-10-09 00:20:04 +00002773static int sh_eth_runtime_nop(struct device *dev)
2774{
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002775 /* Runtime PM callback shared between ->runtime_suspend()
Magnus Dammbcd51492009-10-09 00:20:04 +00002776 * and ->runtime_resume(). Simply returns success.
2777 *
2778 * This driver re-initializes all registers after
2779 * pm_runtime_get_sync() anyway so there is no need
2780 * to save and restore registers here.
2781 */
2782 return 0;
2783}
2784
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002785static const struct dev_pm_ops sh_eth_dev_pm_ops = {
Magnus Dammbcd51492009-10-09 00:20:04 +00002786 .runtime_suspend = sh_eth_runtime_nop,
2787 .runtime_resume = sh_eth_runtime_nop,
2788};
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002789#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2790#else
2791#define SH_ETH_PM_OPS NULL
2792#endif
Magnus Dammbcd51492009-10-09 00:20:04 +00002793
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002794static struct platform_device_id sh_eth_id_table[] = {
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +00002795 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +00002796 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +00002797 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002798 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
Sergei Shtylyov24549e22013-06-07 13:59:21 +00002799 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2800 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002801 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +00002802 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
Sergei Shtylyov589ebde2013-06-07 14:05:59 +00002803 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
Sergei Shtylyov94a12b12013-12-08 02:59:18 +03002804 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2805 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002806 { }
2807};
2808MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2809
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002810static struct platform_driver sh_eth_driver = {
2811 .probe = sh_eth_drv_probe,
2812 .remove = sh_eth_drv_remove,
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002813 .id_table = sh_eth_id_table,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002814 .driver = {
2815 .name = CARDNAME,
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002816 .pm = SH_ETH_PM_OPS,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002817 },
2818};
2819
Axel Lindb62f682011-11-27 16:44:17 +00002820module_platform_driver(sh_eth_driver);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002821
2822MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2823MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2824MODULE_LICENSE("GPL v2");