blob: fe2c8bb4d2f19009e1408b45a8223031daad4bd8 [file] [log] [blame]
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001/* SuperH Ethernet device driver
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002 *
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +00003 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00004 * Copyright (C) 2008-2013 Renesas Solutions Corp.
5 * Copyright (C) 2013 Cogent Embedded, Inc.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07006 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070015 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 */
19
Yoshihiro Shimoda06540112011-09-29 17:16:57 +000020#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/spinlock.h>
David S. Miller823dcd22011-08-20 10:39:12 -070023#include <linux/interrupt.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070024#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/mdio-bitbang.h>
29#include <linux/netdevice.h>
30#include <linux/phy.h>
31#include <linux/cache.h>
32#include <linux/io.h>
Magnus Dammbcd51492009-10-09 00:20:04 +000033#include <linux/pm_runtime.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000035#include <linux/ethtool.h>
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +000036#include <linux/if_vlan.h>
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +000037#include <linux/clk.h>
Yoshihiro Shimodad4fa0e32011-09-27 21:49:12 +000038#include <linux/sh_eth.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070039
40#include "sh_eth.h"
41
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000042#define SH_ETH_DEF_MSG_ENABLE \
43 (NETIF_MSG_LINK | \
44 NETIF_MSG_TIMER | \
45 NETIF_MSG_RX_ERR| \
46 NETIF_MSG_TX_ERR)
47
Sergei Shtylyovc0013f62013-03-28 11:48:26 +000048static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
49 [EDSR] = 0x0000,
50 [EDMR] = 0x0400,
51 [EDTRR] = 0x0408,
52 [EDRRR] = 0x0410,
53 [EESR] = 0x0428,
54 [EESIPR] = 0x0430,
55 [TDLAR] = 0x0010,
56 [TDFAR] = 0x0014,
57 [TDFXR] = 0x0018,
58 [TDFFR] = 0x001c,
59 [RDLAR] = 0x0030,
60 [RDFAR] = 0x0034,
61 [RDFXR] = 0x0038,
62 [RDFFR] = 0x003c,
63 [TRSCER] = 0x0438,
64 [RMFCR] = 0x0440,
65 [TFTR] = 0x0448,
66 [FDR] = 0x0450,
67 [RMCR] = 0x0458,
68 [RPADIR] = 0x0460,
69 [FCFTR] = 0x0468,
70 [CSMR] = 0x04E4,
71
72 [ECMR] = 0x0500,
73 [ECSR] = 0x0510,
74 [ECSIPR] = 0x0518,
75 [PIR] = 0x0520,
76 [PSR] = 0x0528,
77 [PIPR] = 0x052c,
78 [RFLR] = 0x0508,
79 [APR] = 0x0554,
80 [MPR] = 0x0558,
81 [PFTCR] = 0x055c,
82 [PFRCR] = 0x0560,
83 [TPAUSER] = 0x0564,
84 [GECMR] = 0x05b0,
85 [BCULR] = 0x05b4,
86 [MAHR] = 0x05c0,
87 [MALR] = 0x05c8,
88 [TROCR] = 0x0700,
89 [CDCR] = 0x0708,
90 [LCCR] = 0x0710,
91 [CEFCR] = 0x0740,
92 [FRECR] = 0x0748,
93 [TSFRCR] = 0x0750,
94 [TLFRCR] = 0x0758,
95 [RFCR] = 0x0760,
96 [CERCR] = 0x0768,
97 [CEECR] = 0x0770,
98 [MAFCR] = 0x0778,
99 [RMII_MII] = 0x0790,
100
101 [ARSTR] = 0x0000,
102 [TSU_CTRST] = 0x0004,
103 [TSU_FWEN0] = 0x0010,
104 [TSU_FWEN1] = 0x0014,
105 [TSU_FCM] = 0x0018,
106 [TSU_BSYSL0] = 0x0020,
107 [TSU_BSYSL1] = 0x0024,
108 [TSU_PRISL0] = 0x0028,
109 [TSU_PRISL1] = 0x002c,
110 [TSU_FWSL0] = 0x0030,
111 [TSU_FWSL1] = 0x0034,
112 [TSU_FWSLC] = 0x0038,
113 [TSU_QTAG0] = 0x0040,
114 [TSU_QTAG1] = 0x0044,
115 [TSU_FWSR] = 0x0050,
116 [TSU_FWINMK] = 0x0054,
117 [TSU_ADQT0] = 0x0048,
118 [TSU_ADQT1] = 0x004c,
119 [TSU_VTAG0] = 0x0058,
120 [TSU_VTAG1] = 0x005c,
121 [TSU_ADSBSY] = 0x0060,
122 [TSU_TEN] = 0x0064,
123 [TSU_POST1] = 0x0070,
124 [TSU_POST2] = 0x0074,
125 [TSU_POST3] = 0x0078,
126 [TSU_POST4] = 0x007c,
127 [TSU_ADRH0] = 0x0100,
128 [TSU_ADRL0] = 0x0104,
129 [TSU_ADRH31] = 0x01f8,
130 [TSU_ADRL31] = 0x01fc,
131
132 [TXNLCR0] = 0x0080,
133 [TXALCR0] = 0x0084,
134 [RXNLCR0] = 0x0088,
135 [RXALCR0] = 0x008c,
136 [FWNLCR0] = 0x0090,
137 [FWALCR0] = 0x0094,
138 [TXNLCR1] = 0x00a0,
139 [TXALCR1] = 0x00a0,
140 [RXNLCR1] = 0x00a8,
141 [RXALCR1] = 0x00ac,
142 [FWNLCR1] = 0x00b0,
143 [FWALCR1] = 0x00b4,
144};
145
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000146static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
147 [ECMR] = 0x0300,
148 [RFLR] = 0x0308,
149 [ECSR] = 0x0310,
150 [ECSIPR] = 0x0318,
151 [PIR] = 0x0320,
152 [PSR] = 0x0328,
153 [RDMLR] = 0x0340,
154 [IPGR] = 0x0350,
155 [APR] = 0x0354,
156 [MPR] = 0x0358,
157 [RFCF] = 0x0360,
158 [TPAUSER] = 0x0364,
159 [TPAUSECR] = 0x0368,
160 [MAHR] = 0x03c0,
161 [MALR] = 0x03c8,
162 [TROCR] = 0x03d0,
163 [CDCR] = 0x03d4,
164 [LCCR] = 0x03d8,
165 [CNDCR] = 0x03dc,
166 [CEFCR] = 0x03e4,
167 [FRECR] = 0x03e8,
168 [TSFRCR] = 0x03ec,
169 [TLFRCR] = 0x03f0,
170 [RFCR] = 0x03f4,
171 [MAFCR] = 0x03f8,
172
173 [EDMR] = 0x0200,
174 [EDTRR] = 0x0208,
175 [EDRRR] = 0x0210,
176 [TDLAR] = 0x0218,
177 [RDLAR] = 0x0220,
178 [EESR] = 0x0228,
179 [EESIPR] = 0x0230,
180 [TRSCER] = 0x0238,
181 [RMFCR] = 0x0240,
182 [TFTR] = 0x0248,
183 [FDR] = 0x0250,
184 [RMCR] = 0x0258,
185 [TFUCR] = 0x0264,
186 [RFOCR] = 0x0268,
Simon Horman55754f12013-07-23 10:18:04 +0900187 [RMIIMODE] = 0x026c,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000188 [FCFTR] = 0x0270,
189 [TRIMD] = 0x027c,
190};
191
Sergei Shtylyovc0013f62013-03-28 11:48:26 +0000192static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
193 [ECMR] = 0x0100,
194 [RFLR] = 0x0108,
195 [ECSR] = 0x0110,
196 [ECSIPR] = 0x0118,
197 [PIR] = 0x0120,
198 [PSR] = 0x0128,
199 [RDMLR] = 0x0140,
200 [IPGR] = 0x0150,
201 [APR] = 0x0154,
202 [MPR] = 0x0158,
203 [TPAUSER] = 0x0164,
204 [RFCF] = 0x0160,
205 [TPAUSECR] = 0x0168,
206 [BCFRR] = 0x016c,
207 [MAHR] = 0x01c0,
208 [MALR] = 0x01c8,
209 [TROCR] = 0x01d0,
210 [CDCR] = 0x01d4,
211 [LCCR] = 0x01d8,
212 [CNDCR] = 0x01dc,
213 [CEFCR] = 0x01e4,
214 [FRECR] = 0x01e8,
215 [TSFRCR] = 0x01ec,
216 [TLFRCR] = 0x01f0,
217 [RFCR] = 0x01f4,
218 [MAFCR] = 0x01f8,
219 [RTRATE] = 0x01fc,
220
221 [EDMR] = 0x0000,
222 [EDTRR] = 0x0008,
223 [EDRRR] = 0x0010,
224 [TDLAR] = 0x0018,
225 [RDLAR] = 0x0020,
226 [EESR] = 0x0028,
227 [EESIPR] = 0x0030,
228 [TRSCER] = 0x0038,
229 [RMFCR] = 0x0040,
230 [TFTR] = 0x0048,
231 [FDR] = 0x0050,
232 [RMCR] = 0x0058,
233 [TFUCR] = 0x0064,
234 [RFOCR] = 0x0068,
235 [FCFTR] = 0x0070,
236 [RPADIR] = 0x0078,
237 [TRIMD] = 0x007c,
238 [RBWAR] = 0x00c8,
239 [RDFAR] = 0x00cc,
240 [TBRAR] = 0x00d4,
241 [TDFAR] = 0x00d8,
242};
243
244static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
245 [ECMR] = 0x0160,
246 [ECSR] = 0x0164,
247 [ECSIPR] = 0x0168,
248 [PIR] = 0x016c,
249 [MAHR] = 0x0170,
250 [MALR] = 0x0174,
251 [RFLR] = 0x0178,
252 [PSR] = 0x017c,
253 [TROCR] = 0x0180,
254 [CDCR] = 0x0184,
255 [LCCR] = 0x0188,
256 [CNDCR] = 0x018c,
257 [CEFCR] = 0x0194,
258 [FRECR] = 0x0198,
259 [TSFRCR] = 0x019c,
260 [TLFRCR] = 0x01a0,
261 [RFCR] = 0x01a4,
262 [MAFCR] = 0x01a8,
263 [IPGR] = 0x01b4,
264 [APR] = 0x01b8,
265 [MPR] = 0x01bc,
266 [TPAUSER] = 0x01c4,
267 [BCFR] = 0x01cc,
268
269 [ARSTR] = 0x0000,
270 [TSU_CTRST] = 0x0004,
271 [TSU_FWEN0] = 0x0010,
272 [TSU_FWEN1] = 0x0014,
273 [TSU_FCM] = 0x0018,
274 [TSU_BSYSL0] = 0x0020,
275 [TSU_BSYSL1] = 0x0024,
276 [TSU_PRISL0] = 0x0028,
277 [TSU_PRISL1] = 0x002c,
278 [TSU_FWSL0] = 0x0030,
279 [TSU_FWSL1] = 0x0034,
280 [TSU_FWSLC] = 0x0038,
281 [TSU_QTAGM0] = 0x0040,
282 [TSU_QTAGM1] = 0x0044,
283 [TSU_ADQT0] = 0x0048,
284 [TSU_ADQT1] = 0x004c,
285 [TSU_FWSR] = 0x0050,
286 [TSU_FWINMK] = 0x0054,
287 [TSU_ADSBSY] = 0x0060,
288 [TSU_TEN] = 0x0064,
289 [TSU_POST1] = 0x0070,
290 [TSU_POST2] = 0x0074,
291 [TSU_POST3] = 0x0078,
292 [TSU_POST4] = 0x007c,
293
294 [TXNLCR0] = 0x0080,
295 [TXALCR0] = 0x0084,
296 [RXNLCR0] = 0x0088,
297 [RXALCR0] = 0x008c,
298 [FWNLCR0] = 0x0090,
299 [FWALCR0] = 0x0094,
300 [TXNLCR1] = 0x00a0,
301 [TXALCR1] = 0x00a0,
302 [RXNLCR1] = 0x00a8,
303 [RXALCR1] = 0x00ac,
304 [FWNLCR1] = 0x00b0,
305 [FWALCR1] = 0x00b4,
306
307 [TSU_ADRH0] = 0x0100,
308 [TSU_ADRL0] = 0x0104,
309 [TSU_ADRL31] = 0x01fc,
310};
311
Simon Horman504c8ca2014-01-17 09:22:27 +0900312static bool sh_eth_is_gether(struct sh_eth_private *mdp)
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000313{
Simon Horman504c8ca2014-01-17 09:22:27 +0900314 return mdp->reg_offset == sh_eth_offset_gigabit;
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000315}
316
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400317static void sh_eth_select_mii(struct net_device *ndev)
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000318{
319 u32 value = 0x0;
320 struct sh_eth_private *mdp = netdev_priv(ndev);
321
322 switch (mdp->phy_interface) {
323 case PHY_INTERFACE_MODE_GMII:
324 value = 0x2;
325 break;
326 case PHY_INTERFACE_MODE_MII:
327 value = 0x1;
328 break;
329 case PHY_INTERFACE_MODE_RMII:
330 value = 0x0;
331 break;
332 default:
333 pr_warn("PHY interface mode was not setup. Set to MII.\n");
334 value = 0x1;
335 break;
336 }
337
338 sh_eth_write(ndev, value, RMII_MII);
339}
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000340
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400341static void sh_eth_set_duplex(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000342{
343 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000344
345 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000346 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000347 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000348 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000349}
350
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000351/* There is CPU dependent code */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000352static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000353{
354 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000355
356 switch (mdp->speed) {
357 case 10: /* 10BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000358 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000359 break;
360 case 100:/* 100BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000361 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
362 break;
363 default:
364 break;
365 }
366}
367
Sergei Shtylyov674853b2013-04-27 10:44:24 +0000368/* R8A7778/9 */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000369static struct sh_eth_cpu_data r8a777x_data = {
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000370 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000371 .set_rate = sh_eth_set_rate_r8a777x,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000372
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400373 .register_type = SH_ETH_REG_FAST_RCAR,
374
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000375 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
376 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
377 .eesipr_value = 0x01ff009f,
378
379 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400380 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
381 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
382 EESR_ECI,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000383
384 .apr = 1,
385 .mpr = 1,
386 .tpauser = 1,
387 .hw_swap = 1,
388};
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000389
Sergei Shtylyov94a12b12013-12-08 02:59:18 +0300390/* R8A7790/1 */
391static struct sh_eth_cpu_data r8a779x_data = {
Simon Hormane18dbf72013-07-23 10:18:05 +0900392 .set_duplex = sh_eth_set_duplex,
393 .set_rate = sh_eth_set_rate_r8a777x,
394
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400395 .register_type = SH_ETH_REG_FAST_RCAR,
396
Simon Hormane18dbf72013-07-23 10:18:05 +0900397 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
398 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
399 .eesipr_value = 0x01ff009f,
400
401 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Laurent Pinchartba361cb2013-07-31 16:42:11 +0900402 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
403 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
404 EESR_ECI,
Simon Hormane18dbf72013-07-23 10:18:05 +0900405
406 .apr = 1,
407 .mpr = 1,
408 .tpauser = 1,
409 .hw_swap = 1,
410 .rmiimode = 1,
Kouei Abefd9af072013-08-30 12:41:08 +0900411 .shift_rd0 = 1,
Simon Hormane18dbf72013-07-23 10:18:05 +0900412};
413
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000414static void sh_eth_set_rate_sh7724(struct net_device *ndev)
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000415{
416 struct sh_eth_private *mdp = netdev_priv(ndev);
417
418 switch (mdp->speed) {
419 case 10: /* 10BASE */
420 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
421 break;
422 case 100:/* 100BASE */
423 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000424 break;
425 default:
426 break;
427 }
428}
429
430/* SH7724 */
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000431static struct sh_eth_cpu_data sh7724_data = {
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000432 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000433 .set_rate = sh_eth_set_rate_sh7724,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000434
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400435 .register_type = SH_ETH_REG_FAST_SH4,
436
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000437 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
438 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
Sergei Shtylyova80c3de2013-06-20 02:24:54 +0400439 .eesipr_value = 0x01ff009f,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000440
441 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400442 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
443 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
444 EESR_ECI,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000445
446 .apr = 1,
447 .mpr = 1,
448 .tpauser = 1,
449 .hw_swap = 1,
Magnus Damm503914c2009-12-15 21:16:55 -0800450 .rpadir = 1,
451 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000452};
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000453
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000454static void sh_eth_set_rate_sh7757(struct net_device *ndev)
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000455{
456 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000457
458 switch (mdp->speed) {
459 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000460 sh_eth_write(ndev, 0, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000461 break;
462 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000463 sh_eth_write(ndev, 1, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000464 break;
465 default:
466 break;
467 }
468}
469
470/* SH7757 */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000471static struct sh_eth_cpu_data sh7757_data = {
472 .set_duplex = sh_eth_set_duplex,
473 .set_rate = sh_eth_set_rate_sh7757,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000474
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400475 .register_type = SH_ETH_REG_FAST_SH4,
476
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000477 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Sergei Shtylyov305a3382013-10-16 02:29:58 +0400478 .rmcr_value = RMCR_RNC,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000479
480 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400481 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
482 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
483 EESR_ECI,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000484
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000485 .irq_flags = IRQF_SHARED,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000486 .apr = 1,
487 .mpr = 1,
488 .tpauser = 1,
489 .hw_swap = 1,
490 .no_ade = 1,
Yoshihiro Shimoda2e98e792011-07-05 20:33:57 +0000491 .rpadir = 1,
492 .rpadir_value = 2 << 16,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000493};
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000494
David S. Millere403d292013-06-07 23:40:41 -0700495#define SH_GIGA_ETH_BASE 0xfee00000UL
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000496#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
497#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
498static void sh_eth_chip_reset_giga(struct net_device *ndev)
499{
500 int i;
501 unsigned long mahr[2], malr[2];
502
503 /* save MAHR and MALR */
504 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000505 malr[i] = ioread32((void *)GIGA_MALR(i));
506 mahr[i] = ioread32((void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000507 }
508
509 /* reset device */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000510 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000511 mdelay(1);
512
513 /* restore MAHR and MALR */
514 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000515 iowrite32(malr[i], (void *)GIGA_MALR(i));
516 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000517 }
518}
519
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000520static void sh_eth_set_rate_giga(struct net_device *ndev)
521{
522 struct sh_eth_private *mdp = netdev_priv(ndev);
523
524 switch (mdp->speed) {
525 case 10: /* 10BASE */
526 sh_eth_write(ndev, 0x00000000, GECMR);
527 break;
528 case 100:/* 100BASE */
529 sh_eth_write(ndev, 0x00000010, GECMR);
530 break;
531 case 1000: /* 1000BASE */
532 sh_eth_write(ndev, 0x00000020, GECMR);
533 break;
534 default:
535 break;
536 }
537}
538
539/* SH7757(GETHERC) */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000540static struct sh_eth_cpu_data sh7757_data_giga = {
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000541 .chip_reset = sh_eth_chip_reset_giga,
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000542 .set_duplex = sh_eth_set_duplex,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000543 .set_rate = sh_eth_set_rate_giga,
544
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400545 .register_type = SH_ETH_REG_GIGABIT,
546
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000547 .ecsr_value = ECSR_ICD | ECSR_MPD,
548 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
549 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
550
551 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400552 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
553 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
554 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000555 .fdr_value = 0x0000072f,
Sergei Shtylyov305a3382013-10-16 02:29:58 +0400556 .rmcr_value = RMCR_RNC,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000557
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000558 .irq_flags = IRQF_SHARED,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000559 .apr = 1,
560 .mpr = 1,
561 .tpauser = 1,
562 .bculr = 1,
563 .hw_swap = 1,
564 .rpadir = 1,
565 .rpadir_value = 2 << 16,
566 .no_trimd = 1,
567 .no_ade = 1,
Yoshihiro Shimoda3acbc972012-02-15 17:54:51 +0000568 .tsu = 1,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000569};
570
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000571static void sh_eth_chip_reset(struct net_device *ndev)
572{
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000573 struct sh_eth_private *mdp = netdev_priv(ndev);
574
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000575 /* reset device */
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000576 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000577 mdelay(1);
578}
579
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000580static void sh_eth_set_rate_gether(struct net_device *ndev)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000581{
582 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000583
584 switch (mdp->speed) {
585 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000586 sh_eth_write(ndev, GECMR_10, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000587 break;
588 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000589 sh_eth_write(ndev, GECMR_100, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000590 break;
591 case 1000: /* 1000BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000592 sh_eth_write(ndev, GECMR_1000, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000593 break;
594 default:
595 break;
596 }
597}
598
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000599/* SH7734 */
600static struct sh_eth_cpu_data sh7734_data = {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000601 .chip_reset = sh_eth_chip_reset,
602 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000603 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000604
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400605 .register_type = SH_ETH_REG_GIGABIT,
606
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000607 .ecsr_value = ECSR_ICD | ECSR_MPD,
608 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
609 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
610
611 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400612 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
613 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
614 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000615
616 .apr = 1,
617 .mpr = 1,
618 .tpauser = 1,
619 .bculr = 1,
620 .hw_swap = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000621 .no_trimd = 1,
622 .no_ade = 1,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000623 .tsu = 1,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000624 .hw_crc = 1,
625 .select_mii = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000626};
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000627
628/* SH7763 */
629static struct sh_eth_cpu_data sh7763_data = {
630 .chip_reset = sh_eth_chip_reset,
631 .set_duplex = sh_eth_set_duplex,
632 .set_rate = sh_eth_set_rate_gether,
633
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400634 .register_type = SH_ETH_REG_GIGABIT,
635
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000636 .ecsr_value = ECSR_ICD | ECSR_MPD,
637 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
638 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
639
640 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300641 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
642 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000643 EESR_ECI,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000644
645 .apr = 1,
646 .mpr = 1,
647 .tpauser = 1,
648 .bculr = 1,
649 .hw_swap = 1,
650 .no_trimd = 1,
651 .no_ade = 1,
652 .tsu = 1,
653 .irq_flags = IRQF_SHARED,
654};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000655
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000656static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000657{
658 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000659
660 /* reset device */
661 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
662 mdelay(1);
663
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000664 sh_eth_select_mii(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000665}
666
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000667/* R8A7740 */
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000668static struct sh_eth_cpu_data r8a7740_data = {
669 .chip_reset = sh_eth_chip_reset_r8a7740,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000670 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000671 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000672
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400673 .register_type = SH_ETH_REG_GIGABIT,
674
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000675 .ecsr_value = ECSR_ICD | ECSR_MPD,
676 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
677 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
678
679 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400680 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
681 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
682 EESR_TDE | EESR_ECI,
Simon Hormancc235282013-10-10 14:51:16 +0900683 .fdr_value = 0x0000070f,
Sergei Shtylyov305a3382013-10-16 02:29:58 +0400684 .rmcr_value = RMCR_RNC,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000685
686 .apr = 1,
687 .mpr = 1,
688 .tpauser = 1,
689 .bculr = 1,
690 .hw_swap = 1,
Simon Hormancc235282013-10-10 14:51:16 +0900691 .rpadir = 1,
692 .rpadir_value = 2 << 16,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000693 .no_trimd = 1,
694 .no_ade = 1,
695 .tsu = 1,
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000696 .select_mii = 1,
Sergei Shtylyovac8025a2013-06-13 22:12:45 +0400697 .shift_rd0 = 1,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000698};
699
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +0000700static struct sh_eth_cpu_data sh7619_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400701 .register_type = SH_ETH_REG_FAST_SH3_SH2,
702
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000703 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
704
705 .apr = 1,
706 .mpr = 1,
707 .tpauser = 1,
708 .hw_swap = 1,
709};
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +0000710
711static struct sh_eth_cpu_data sh771x_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400712 .register_type = SH_ETH_REG_FAST_SH3_SH2,
713
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000714 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000715 .tsu = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000716};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000717
718static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
719{
720 if (!cd->ecsr_value)
721 cd->ecsr_value = DEFAULT_ECSR_INIT;
722
723 if (!cd->ecsipr_value)
724 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
725
726 if (!cd->fcftr_value)
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300727 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000728 DEFAULT_FIFO_F_D_RFD;
729
730 if (!cd->fdr_value)
731 cd->fdr_value = DEFAULT_FDR_INIT;
732
733 if (!cd->rmcr_value)
734 cd->rmcr_value = DEFAULT_RMCR_VALUE;
735
736 if (!cd->tx_check)
737 cd->tx_check = DEFAULT_TX_CHECK;
738
739 if (!cd->eesr_err_check)
740 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000741}
742
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000743static int sh_eth_check_reset(struct net_device *ndev)
744{
745 int ret = 0;
746 int cnt = 100;
747
748 while (cnt > 0) {
749 if (!(sh_eth_read(ndev, EDMR) & 0x3))
750 break;
751 mdelay(1);
752 cnt--;
753 }
Sergei Shtylyov9f8c4262013-06-05 23:54:01 +0400754 if (cnt <= 0) {
755 pr_err("Device reset failed\n");
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000756 ret = -ETIMEDOUT;
757 }
758 return ret;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000759}
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000760
761static int sh_eth_reset(struct net_device *ndev)
762{
763 struct sh_eth_private *mdp = netdev_priv(ndev);
764 int ret = 0;
765
766 if (sh_eth_is_gether(mdp)) {
767 sh_eth_write(ndev, EDSR_ENALL, EDSR);
768 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
769 EDMR);
770
771 ret = sh_eth_check_reset(ndev);
772 if (ret)
773 goto out;
774
775 /* Table Init */
776 sh_eth_write(ndev, 0x0, TDLAR);
777 sh_eth_write(ndev, 0x0, TDFAR);
778 sh_eth_write(ndev, 0x0, TDFXR);
779 sh_eth_write(ndev, 0x0, TDFFR);
780 sh_eth_write(ndev, 0x0, RDLAR);
781 sh_eth_write(ndev, 0x0, RDFAR);
782 sh_eth_write(ndev, 0x0, RDFXR);
783 sh_eth_write(ndev, 0x0, RDFFR);
784
785 /* Reset HW CRC register */
786 if (mdp->cd->hw_crc)
787 sh_eth_write(ndev, 0x0, CSMR);
788
789 /* Select MII mode */
790 if (mdp->cd->select_mii)
791 sh_eth_select_mii(ndev);
792 } else {
793 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
794 EDMR);
795 mdelay(3);
796 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
797 EDMR);
798 }
799
800out:
801 return ret;
802}
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000803
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000804#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000805static void sh_eth_set_receive_align(struct sk_buff *skb)
806{
807 int reserve;
808
809 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
810 if (reserve)
811 skb_reserve(skb, reserve);
812}
813#else
814static void sh_eth_set_receive_align(struct sk_buff *skb)
815{
816 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
817}
818#endif
819
820
Yoshinori Sato71557a32008-08-06 19:49:00 -0400821/* CPU <-> EDMAC endian convert */
822static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
823{
824 switch (mdp->edmac_endian) {
825 case EDMAC_LITTLE_ENDIAN:
826 return cpu_to_le32(x);
827 case EDMAC_BIG_ENDIAN:
828 return cpu_to_be32(x);
829 }
830 return x;
831}
832
833static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
834{
835 switch (mdp->edmac_endian) {
836 case EDMAC_LITTLE_ENDIAN:
837 return le32_to_cpu(x);
838 case EDMAC_BIG_ENDIAN:
839 return be32_to_cpu(x);
840 }
841 return x;
842}
843
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300844/* Program the hardware MAC address from dev->dev_addr. */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700845static void update_mac_address(struct net_device *ndev)
846{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000847 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300848 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
849 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000850 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300851 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700852}
853
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300854/* Get MAC address from SuperH MAC address register
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700855 *
856 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
857 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
858 * When you want use this device, you must set MAC address in bootloader.
859 *
860 */
Magnus Damm748031f2009-10-09 00:17:14 +0000861static void read_mac_address(struct net_device *ndev, unsigned char *mac)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700862{
Magnus Damm748031f2009-10-09 00:17:14 +0000863 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
Joe Perchesd458cdf2013-10-01 19:04:40 -0700864 memcpy(ndev->dev_addr, mac, ETH_ALEN);
Magnus Damm748031f2009-10-09 00:17:14 +0000865 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000866 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
867 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
868 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
869 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
870 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
871 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
Magnus Damm748031f2009-10-09 00:17:14 +0000872 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700873}
874
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000875static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
876{
877 if (sh_eth_is_gether(mdp))
878 return EDTRR_TRNS_GETHER;
879 else
880 return EDTRR_TRNS_ETHER;
881}
882
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700883struct bb_info {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000884 void (*set_gate)(void *addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700885 struct mdiobb_ctrl ctrl;
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000886 void *addr;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700887 u32 mmd_msk;/* MMD */
888 u32 mdo_msk;
889 u32 mdi_msk;
890 u32 mdc_msk;
891};
892
893/* PHY bit set */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000894static void bb_set(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700895{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000896 iowrite32(ioread32(addr) | msk, addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700897}
898
899/* PHY bit clear */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000900static void bb_clr(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700901{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000902 iowrite32((ioread32(addr) & ~msk), addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700903}
904
905/* PHY bit read */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000906static int bb_read(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700907{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000908 return (ioread32(addr) & msk) != 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700909}
910
911/* Data I/O pin control */
912static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
913{
914 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000915
916 if (bitbang->set_gate)
917 bitbang->set_gate(bitbang->addr);
918
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700919 if (bit)
920 bb_set(bitbang->addr, bitbang->mmd_msk);
921 else
922 bb_clr(bitbang->addr, bitbang->mmd_msk);
923}
924
925/* Set bit data*/
926static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
927{
928 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
929
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000930 if (bitbang->set_gate)
931 bitbang->set_gate(bitbang->addr);
932
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700933 if (bit)
934 bb_set(bitbang->addr, bitbang->mdo_msk);
935 else
936 bb_clr(bitbang->addr, bitbang->mdo_msk);
937}
938
939/* Get bit data*/
940static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
941{
942 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000943
944 if (bitbang->set_gate)
945 bitbang->set_gate(bitbang->addr);
946
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700947 return bb_read(bitbang->addr, bitbang->mdi_msk);
948}
949
950/* MDC pin control */
951static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
952{
953 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
954
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000955 if (bitbang->set_gate)
956 bitbang->set_gate(bitbang->addr);
957
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700958 if (bit)
959 bb_set(bitbang->addr, bitbang->mdc_msk);
960 else
961 bb_clr(bitbang->addr, bitbang->mdc_msk);
962}
963
964/* mdio bus control struct */
965static struct mdiobb_ops bb_ops = {
966 .owner = THIS_MODULE,
967 .set_mdc = sh_mdc_ctrl,
968 .set_mdio_dir = sh_mmd_ctrl,
969 .set_mdio_data = sh_set_mdio,
970 .get_mdio_data = sh_get_mdio,
971};
972
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700973/* free skb and descriptor buffer */
974static void sh_eth_ring_free(struct net_device *ndev)
975{
976 struct sh_eth_private *mdp = netdev_priv(ndev);
977 int i;
978
979 /* Free Rx skb ringbuffer */
980 if (mdp->rx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +0000981 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700982 if (mdp->rx_skbuff[i])
983 dev_kfree_skb(mdp->rx_skbuff[i]);
984 }
985 }
986 kfree(mdp->rx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +0000987 mdp->rx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700988
989 /* Free Tx skb ringbuffer */
990 if (mdp->tx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +0000991 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700992 if (mdp->tx_skbuff[i])
993 dev_kfree_skb(mdp->tx_skbuff[i]);
994 }
995 }
996 kfree(mdp->tx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +0000997 mdp->tx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700998}
999
1000/* format skb and descriptor buffer */
1001static void sh_eth_ring_format(struct net_device *ndev)
1002{
1003 struct sh_eth_private *mdp = netdev_priv(ndev);
1004 int i;
1005 struct sk_buff *skb;
1006 struct sh_eth_rxdesc *rxdesc = NULL;
1007 struct sh_eth_txdesc *txdesc = NULL;
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001008 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1009 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001010
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001011 mdp->cur_rx = 0;
1012 mdp->cur_tx = 0;
1013 mdp->dirty_rx = 0;
1014 mdp->dirty_tx = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001015
1016 memset(mdp->rx_ring, 0, rx_ringsize);
1017
1018 /* build Rx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001019 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001020 /* skb */
1021 mdp->rx_skbuff[i] = NULL;
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001022 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001023 mdp->rx_skbuff[i] = skb;
1024 if (skb == NULL)
1025 break;
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001026 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001027 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001028 sh_eth_set_receive_align(skb);
1029
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001030 /* RX descriptor */
1031 rxdesc = &mdp->rx_ring[i];
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001032 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Yoshinori Sato71557a32008-08-06 19:49:00 -04001033 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001034
1035 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001036 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001037 /* Rx descriptor address set */
1038 if (i == 0) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001039 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001040 if (sh_eth_is_gether(mdp))
1041 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001042 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001043 }
1044
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001045 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001046
1047 /* Mark the last entry as wrapping the ring. */
Yoshinori Sato71557a32008-08-06 19:49:00 -04001048 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001049
1050 memset(mdp->tx_ring, 0, tx_ringsize);
1051
1052 /* build Tx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001053 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001054 mdp->tx_skbuff[i] = NULL;
1055 txdesc = &mdp->tx_ring[i];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001056 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001057 txdesc->buffer_length = 0;
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001058 if (i == 0) {
Yoshinori Sato71557a32008-08-06 19:49:00 -04001059 /* Tx descriptor address set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001060 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001061 if (sh_eth_is_gether(mdp))
1062 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001063 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001064 }
1065
Yoshinori Sato71557a32008-08-06 19:49:00 -04001066 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001067}
1068
1069/* Get skb and descriptor buffer */
1070static int sh_eth_ring_init(struct net_device *ndev)
1071{
1072 struct sh_eth_private *mdp = netdev_priv(ndev);
1073 int rx_ringsize, tx_ringsize, ret = 0;
1074
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001075 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001076 * card needs room to do 8 byte alignment, +2 so we can reserve
1077 * the first 2 bytes, and +16 gets room for the status word from the
1078 * card.
1079 */
1080 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1081 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
Magnus Damm503914c2009-12-15 21:16:55 -08001082 if (mdp->cd->rpadir)
1083 mdp->rx_buf_sz += NET_IP_ALIGN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001084
1085 /* Allocate RX and TX skb rings */
Joe Perchesb2adaca2013-02-03 17:43:58 +00001086 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1087 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001088 if (!mdp->rx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001089 ret = -ENOMEM;
1090 return ret;
1091 }
1092
Joe Perchesb2adaca2013-02-03 17:43:58 +00001093 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1094 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001095 if (!mdp->tx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001096 ret = -ENOMEM;
1097 goto skb_ring_free;
1098 }
1099
1100 /* Allocate all Rx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001101 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001102 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001103 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001104 if (!mdp->rx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001105 ret = -ENOMEM;
1106 goto desc_ring_free;
1107 }
1108
1109 mdp->dirty_rx = 0;
1110
1111 /* Allocate all Tx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001112 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001113 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001114 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001115 if (!mdp->tx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001116 ret = -ENOMEM;
1117 goto desc_ring_free;
1118 }
1119 return ret;
1120
1121desc_ring_free:
1122 /* free DMA buffer */
1123 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1124
1125skb_ring_free:
1126 /* Free Rx and Tx skb ring buffer */
1127 sh_eth_ring_free(ndev);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001128 mdp->tx_ring = NULL;
1129 mdp->rx_ring = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001130
1131 return ret;
1132}
1133
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001134static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1135{
1136 int ringsize;
1137
1138 if (mdp->rx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001139 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001140 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1141 mdp->rx_desc_dma);
1142 mdp->rx_ring = NULL;
1143 }
1144
1145 if (mdp->tx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001146 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001147 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1148 mdp->tx_desc_dma);
1149 mdp->tx_ring = NULL;
1150 }
1151}
1152
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001153static int sh_eth_dev_init(struct net_device *ndev, bool start)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001154{
1155 int ret = 0;
1156 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001157 u32 val;
1158
1159 /* Soft Reset */
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001160 ret = sh_eth_reset(ndev);
1161 if (ret)
1162 goto out;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001163
Simon Horman55754f12013-07-23 10:18:04 +09001164 if (mdp->cd->rmiimode)
1165 sh_eth_write(ndev, 0x1, RMIIMODE);
1166
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001167 /* Descriptor format */
1168 sh_eth_ring_format(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001169 if (mdp->cd->rpadir)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001170 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001171
1172 /* all sh_eth int mask */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001173 sh_eth_write(ndev, 0, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001174
Yoshihiro Shimoda10b91942012-03-29 19:32:08 +00001175#if defined(__LITTLE_ENDIAN)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001176 if (mdp->cd->hw_swap)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001177 sh_eth_write(ndev, EDMR_EL, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001178 else
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001179#endif
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001180 sh_eth_write(ndev, 0, EDMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001181
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001182 /* FIFO size set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001183 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1184 sh_eth_write(ndev, 0, TFTR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001185
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001186 /* Frame recv control */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001187 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001188
Yoshihiro Shimoda2ecbb782012-06-26 19:59:58 +00001189 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001190
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001191 if (mdp->cd->bculr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001192 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001193
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001194 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001195
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001196 if (!mdp->cd->no_trimd)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001197 sh_eth_write(ndev, 0, TRIMD);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001198
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001199 /* Recv frame limit set register */
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +00001200 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1201 RFLR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001202
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001203 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001204 if (start)
1205 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001206
1207 /* PAUSE Prohibition */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001208 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001209 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1210
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001211 sh_eth_write(ndev, val, ECMR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001212
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001213 if (mdp->cd->set_rate)
1214 mdp->cd->set_rate(ndev);
1215
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001216 /* E-MAC Status Register clear */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001217 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001218
1219 /* E-MAC Interrupt Enable register */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001220 if (start)
1221 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001222
1223 /* Set MAC address */
1224 update_mac_address(ndev);
1225
1226 /* mask reset */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001227 if (mdp->cd->apr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001228 sh_eth_write(ndev, APR_AP, APR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001229 if (mdp->cd->mpr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001230 sh_eth_write(ndev, MPR_MP, MPR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001231 if (mdp->cd->tpauser)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001232 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001233
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001234 if (start) {
1235 /* Setting the Rx mode will start the Rx process. */
1236 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001237
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001238 netif_start_queue(ndev);
1239 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001240
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001241out:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001242 return ret;
1243}
1244
1245/* free Tx skb function */
1246static int sh_eth_txfree(struct net_device *ndev)
1247{
1248 struct sh_eth_private *mdp = netdev_priv(ndev);
1249 struct sh_eth_txdesc *txdesc;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001250 int free_num = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001251 int entry = 0;
1252
1253 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001254 entry = mdp->dirty_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001255 txdesc = &mdp->tx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001256 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001257 break;
1258 /* Free the original skb. */
1259 if (mdp->tx_skbuff[entry]) {
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00001260 dma_unmap_single(&ndev->dev, txdesc->addr,
1261 txdesc->buffer_length, DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001262 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1263 mdp->tx_skbuff[entry] = NULL;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001264 free_num++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001265 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001266 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001267 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001268 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001269
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001270 ndev->stats.tx_packets++;
1271 ndev->stats.tx_bytes += txdesc->buffer_length;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001272 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001273 return free_num;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001274}
1275
1276/* Packet receive function */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001277static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001278{
1279 struct sh_eth_private *mdp = netdev_priv(ndev);
1280 struct sh_eth_rxdesc *rxdesc;
1281
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001282 int entry = mdp->cur_rx % mdp->num_rx_ring;
1283 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001284 struct sk_buff *skb;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001285 int exceeded = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001286 u16 pkt_len = 0;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001287 u32 desc_status;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001288
1289 rxdesc = &mdp->rx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001290 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1291 desc_status = edmac_to_cpu(mdp, rxdesc->status);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001292 pkt_len = rxdesc->frame_length;
1293
1294 if (--boguscnt < 0)
1295 break;
1296
Sergei Shtylyov37191092013-06-19 23:30:23 +04001297 if (*quota <= 0) {
1298 exceeded = 1;
1299 break;
1300 }
1301 (*quota)--;
1302
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001303 if (!(desc_status & RDFEND))
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001304 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001305
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001306 /* In case of almost all GETHER/ETHERs, the Receive Frame State
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001307 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1308 * bit 0. However, in case of the R8A7740's GETHER, the RFS
1309 * bits are from bit 25 to bit 16. So, the driver needs right
1310 * shifting by 16.
1311 */
Sergei Shtylyovac8025a2013-06-13 22:12:45 +04001312 if (mdp->cd->shift_rd0)
1313 desc_status >>= 16;
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001314
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001315 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1316 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001317 ndev->stats.rx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001318 if (desc_status & RD_RFS1)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001319 ndev->stats.rx_crc_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001320 if (desc_status & RD_RFS2)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001321 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001322 if (desc_status & RD_RFS3)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001323 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001324 if (desc_status & RD_RFS4)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001325 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001326 if (desc_status & RD_RFS6)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001327 ndev->stats.rx_missed_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001328 if (desc_status & RD_RFS10)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001329 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001330 } else {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001331 if (!mdp->cd->hw_swap)
1332 sh_eth_soft_swap(
1333 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1334 pkt_len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001335 skb = mdp->rx_skbuff[entry];
1336 mdp->rx_skbuff[entry] = NULL;
Magnus Damm503914c2009-12-15 21:16:55 -08001337 if (mdp->cd->rpadir)
1338 skb_reserve(skb, NET_IP_ALIGN);
Kouei Abe7db8e0c2013-08-30 12:41:07 +09001339 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1340 mdp->rx_buf_sz,
1341 DMA_FROM_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001342 skb_put(skb, pkt_len);
1343 skb->protocol = eth_type_trans(skb, ndev);
Sergei Shtylyova8e9fd02013-09-03 03:03:10 +04001344 netif_receive_skb(skb);
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001345 ndev->stats.rx_packets++;
1346 ndev->stats.rx_bytes += pkt_len;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001347 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001348 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001349 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
Yoshihiro Shimoda862df492009-05-24 23:53:40 +00001350 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001351 }
1352
1353 /* Refill the Rx ring buffers. */
1354 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001355 entry = mdp->dirty_rx % mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001356 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001357 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001358 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001359
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001360 if (mdp->rx_skbuff[entry] == NULL) {
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001361 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001362 mdp->rx_skbuff[entry] = skb;
1363 if (skb == NULL)
1364 break; /* Better luck next round. */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001365 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001366 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001367 sh_eth_set_receive_align(skb);
1368
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001369 skb_checksum_none_assert(skb);
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001370 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001371 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001372 if (entry >= mdp->num_rx_ring - 1)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001373 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001374 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001375 else
1376 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001377 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001378 }
1379
1380 /* Restart Rx engine if stopped. */
1381 /* If we don't need to check status, don't. -KDU */
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001382 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
Yoshihiro Shimodaa18e08b2012-06-20 15:26:34 +00001383 /* fix the values for the next receiving if RDE is set */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001384 if (intr_status & EESR_RDE) {
1385 u32 count = (sh_eth_read(ndev, RDFAR) -
1386 sh_eth_read(ndev, RDLAR)) >> 4;
1387
1388 mdp->cur_rx = count;
1389 mdp->dirty_rx = count;
1390 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001391 sh_eth_write(ndev, EDRRR_R, EDRRR);
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001392 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001393
Sergei Shtylyov37191092013-06-19 23:30:23 +04001394 return exceeded;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001395}
1396
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001397static void sh_eth_rcv_snd_disable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001398{
1399 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001400 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1401 ~(ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001402}
1403
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001404static void sh_eth_rcv_snd_enable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001405{
1406 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001407 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1408 (ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001409}
1410
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001411/* error control function */
1412static void sh_eth_error(struct net_device *ndev, int intr_status)
1413{
1414 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001415 u32 felic_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001416 u32 link_stat;
1417 u32 mask;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001418
1419 if (intr_status & EESR_ECI) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001420 felic_stat = sh_eth_read(ndev, ECSR);
1421 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001422 if (felic_stat & ECSR_ICD)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001423 ndev->stats.tx_carrier_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001424 if (felic_stat & ECSR_LCHNG) {
1425 /* Link Changed */
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001426 if (mdp->cd->no_psr || mdp->no_ether_link) {
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001427 goto ignore_link;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001428 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001429 link_stat = (sh_eth_read(ndev, PSR));
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001430 if (mdp->ether_link_active_low)
1431 link_stat = ~link_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001432 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001433 if (!(link_stat & PHY_ST_LINK)) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001434 sh_eth_rcv_snd_disable(ndev);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001435 } else {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001436 /* Link Up */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001437 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001438 ~DMAC_M_ECI, EESIPR);
1439 /* clear int */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001440 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001441 ECSR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001442 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001443 DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001444 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001445 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001446 }
1447 }
1448 }
1449
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001450ignore_link:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001451 if (intr_status & EESR_TWB) {
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001452 /* Unused write back interrupt */
1453 if (intr_status & EESR_TABT) { /* Transmit Abort int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001454 ndev->stats.tx_aborted_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001455 if (netif_msg_tx_err(mdp))
1456 dev_err(&ndev->dev, "Transmit Abort\n");
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001457 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001458 }
1459
1460 if (intr_status & EESR_RABT) {
1461 /* Receive Abort int */
1462 if (intr_status & EESR_RFRMER) {
1463 /* Receive Frame Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001464 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001465 if (netif_msg_rx_err(mdp))
1466 dev_err(&ndev->dev, "Receive Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001467 }
1468 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001469
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001470 if (intr_status & EESR_TDE) {
1471 /* Transmit Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001472 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001473 if (netif_msg_tx_err(mdp))
1474 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1475 }
1476
1477 if (intr_status & EESR_TFE) {
1478 /* FIFO under flow */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001479 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001480 if (netif_msg_tx_err(mdp))
1481 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001482 }
1483
1484 if (intr_status & EESR_RDE) {
1485 /* Receive Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001486 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001487
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001488 if (netif_msg_rx_err(mdp))
1489 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001490 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001491
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001492 if (intr_status & EESR_RFE) {
1493 /* Receive FIFO Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001494 ndev->stats.rx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001495 if (netif_msg_rx_err(mdp))
1496 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1497 }
1498
1499 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1500 /* Address Error */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001501 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001502 if (netif_msg_tx_err(mdp))
1503 dev_err(&ndev->dev, "Address Error\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001504 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001505
1506 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1507 if (mdp->cd->no_ade)
1508 mask &= ~EESR_ADE;
1509 if (intr_status & mask) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001510 /* Tx error */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001511 u32 edtrr = sh_eth_read(ndev, EDTRR);
Sergei Shtylyov090d5602014-01-11 02:41:49 +03001512
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001513 /* dmesg */
Sergei Shtylyov090d5602014-01-11 02:41:49 +03001514 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1515 intr_status, mdp->cur_tx, mdp->dirty_tx,
1516 (u32)ndev->state, edtrr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001517 /* dirty buffer free */
1518 sh_eth_txfree(ndev);
1519
1520 /* SH7712 BUG */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001521 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001522 /* tx dma start */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001523 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001524 }
1525 /* wakeup */
1526 netif_wake_queue(ndev);
1527 }
1528}
1529
1530static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1531{
1532 struct net_device *ndev = netdev;
1533 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001534 struct sh_eth_cpu_data *cd = mdp->cd;
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001535 irqreturn_t ret = IRQ_NONE;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001536 unsigned long intr_status, intr_enable;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001537
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001538 spin_lock(&mdp->lock);
1539
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001540 /* Get interrupt status */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001541 intr_status = sh_eth_read(ndev, EESR);
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001542 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1543 * enabled since it's the one that comes thru regardless of the mask,
1544 * and we need to fully handle it in sh_eth_error() in order to quench
1545 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1546 */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001547 intr_enable = sh_eth_read(ndev, EESIPR);
1548 intr_status &= intr_enable | DMAC_M_ECI;
1549 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001550 ret = IRQ_HANDLED;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001551 else
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001552 goto other_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001553
Sergei Shtylyov37191092013-06-19 23:30:23 +04001554 if (intr_status & EESR_RX_CHECK) {
1555 if (napi_schedule_prep(&mdp->napi)) {
1556 /* Mask Rx interrupts */
1557 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1558 EESIPR);
1559 __napi_schedule(&mdp->napi);
1560 } else {
1561 dev_warn(&ndev->dev,
1562 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1563 intr_status, intr_enable);
1564 }
1565 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001566
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001567 /* Tx Check */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001568 if (intr_status & cd->tx_check) {
Sergei Shtylyov37191092013-06-19 23:30:23 +04001569 /* Clear Tx interrupts */
1570 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1571
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001572 sh_eth_txfree(ndev);
1573 netif_wake_queue(ndev);
1574 }
1575
Sergei Shtylyov37191092013-06-19 23:30:23 +04001576 if (intr_status & cd->eesr_err_check) {
1577 /* Clear error interrupts */
1578 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1579
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001580 sh_eth_error(ndev, intr_status);
Sergei Shtylyov37191092013-06-19 23:30:23 +04001581 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001582
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001583other_irq:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001584 spin_unlock(&mdp->lock);
1585
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001586 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001587}
1588
Sergei Shtylyov37191092013-06-19 23:30:23 +04001589static int sh_eth_poll(struct napi_struct *napi, int budget)
1590{
1591 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1592 napi);
1593 struct net_device *ndev = napi->dev;
1594 int quota = budget;
1595 unsigned long intr_status;
1596
1597 for (;;) {
1598 intr_status = sh_eth_read(ndev, EESR);
1599 if (!(intr_status & EESR_RX_CHECK))
1600 break;
1601 /* Clear Rx interrupts */
1602 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1603
1604 if (sh_eth_rx(ndev, intr_status, &quota))
1605 goto out;
1606 }
1607
1608 napi_complete(napi);
1609
1610 /* Reenable Rx interrupts */
1611 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1612out:
1613 return budget - quota;
1614}
1615
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001616/* PHY state control function */
1617static void sh_eth_adjust_link(struct net_device *ndev)
1618{
1619 struct sh_eth_private *mdp = netdev_priv(ndev);
1620 struct phy_device *phydev = mdp->phydev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001621 int new_state = 0;
1622
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001623 if (phydev->link) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001624 if (phydev->duplex != mdp->duplex) {
1625 new_state = 1;
1626 mdp->duplex = phydev->duplex;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001627 if (mdp->cd->set_duplex)
1628 mdp->cd->set_duplex(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001629 }
1630
1631 if (phydev->speed != mdp->speed) {
1632 new_state = 1;
1633 mdp->speed = phydev->speed;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001634 if (mdp->cd->set_rate)
1635 mdp->cd->set_rate(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001636 }
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001637 if (!mdp->link) {
Yoshihiro Shimoda91a56152011-07-05 20:33:51 +00001638 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001639 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1640 ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001641 new_state = 1;
1642 mdp->link = phydev->link;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001643 if (mdp->cd->no_psr || mdp->no_ether_link)
1644 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001645 }
1646 } else if (mdp->link) {
1647 new_state = 1;
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001648 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001649 mdp->speed = 0;
1650 mdp->duplex = -1;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001651 if (mdp->cd->no_psr || mdp->no_ether_link)
1652 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001653 }
1654
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001655 if (new_state && netif_msg_link(mdp))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001656 phy_print_status(phydev);
1657}
1658
1659/* PHY init function */
1660static int sh_eth_phy_init(struct net_device *ndev)
1661{
1662 struct sh_eth_private *mdp = netdev_priv(ndev);
David S. Miller0a372eb2009-05-26 21:11:09 -07001663 char phy_id[MII_BUS_ID_SIZE + 3];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001664 struct phy_device *phydev = NULL;
1665
Kay Sieversfb28ad32008-11-10 13:55:14 -08001666 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001667 mdp->mii_bus->id, mdp->phy_id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001668
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001669 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001670 mdp->speed = 0;
1671 mdp->duplex = -1;
1672
1673 /* Try connect to PHY */
Joe Perchesc061b182010-08-23 18:20:03 +00001674 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
Florian Fainellif9a8f832013-01-14 00:52:52 +00001675 mdp->phy_interface);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001676 if (IS_ERR(phydev)) {
1677 dev_err(&ndev->dev, "phy_connect failed\n");
1678 return PTR_ERR(phydev);
1679 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001680
Sergei Shtylyov18be0992013-12-20 01:39:52 +03001681 dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
1682 phydev->addr, phydev->irq, phydev->drv->name);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001683
1684 mdp->phydev = phydev;
1685
1686 return 0;
1687}
1688
1689/* PHY control start function */
1690static int sh_eth_phy_start(struct net_device *ndev)
1691{
1692 struct sh_eth_private *mdp = netdev_priv(ndev);
1693 int ret;
1694
1695 ret = sh_eth_phy_init(ndev);
1696 if (ret)
1697 return ret;
1698
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001699 phy_start(mdp->phydev);
1700
1701 return 0;
1702}
1703
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001704static int sh_eth_get_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001705 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001706{
1707 struct sh_eth_private *mdp = netdev_priv(ndev);
1708 unsigned long flags;
1709 int ret;
1710
1711 spin_lock_irqsave(&mdp->lock, flags);
1712 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1713 spin_unlock_irqrestore(&mdp->lock, flags);
1714
1715 return ret;
1716}
1717
1718static int sh_eth_set_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001719 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001720{
1721 struct sh_eth_private *mdp = netdev_priv(ndev);
1722 unsigned long flags;
1723 int ret;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001724
1725 spin_lock_irqsave(&mdp->lock, flags);
1726
1727 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001728 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001729
1730 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1731 if (ret)
1732 goto error_exit;
1733
1734 if (ecmd->duplex == DUPLEX_FULL)
1735 mdp->duplex = 1;
1736 else
1737 mdp->duplex = 0;
1738
1739 if (mdp->cd->set_duplex)
1740 mdp->cd->set_duplex(ndev);
1741
1742error_exit:
1743 mdelay(1);
1744
1745 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001746 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001747
1748 spin_unlock_irqrestore(&mdp->lock, flags);
1749
1750 return ret;
1751}
1752
1753static int sh_eth_nway_reset(struct net_device *ndev)
1754{
1755 struct sh_eth_private *mdp = netdev_priv(ndev);
1756 unsigned long flags;
1757 int ret;
1758
1759 spin_lock_irqsave(&mdp->lock, flags);
1760 ret = phy_start_aneg(mdp->phydev);
1761 spin_unlock_irqrestore(&mdp->lock, flags);
1762
1763 return ret;
1764}
1765
1766static u32 sh_eth_get_msglevel(struct net_device *ndev)
1767{
1768 struct sh_eth_private *mdp = netdev_priv(ndev);
1769 return mdp->msg_enable;
1770}
1771
1772static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1773{
1774 struct sh_eth_private *mdp = netdev_priv(ndev);
1775 mdp->msg_enable = value;
1776}
1777
1778static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1779 "rx_current", "tx_current",
1780 "rx_dirty", "tx_dirty",
1781};
1782#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1783
1784static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1785{
1786 switch (sset) {
1787 case ETH_SS_STATS:
1788 return SH_ETH_STATS_LEN;
1789 default:
1790 return -EOPNOTSUPP;
1791 }
1792}
1793
1794static void sh_eth_get_ethtool_stats(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001795 struct ethtool_stats *stats, u64 *data)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001796{
1797 struct sh_eth_private *mdp = netdev_priv(ndev);
1798 int i = 0;
1799
1800 /* device-specific stats */
1801 data[i++] = mdp->cur_rx;
1802 data[i++] = mdp->cur_tx;
1803 data[i++] = mdp->dirty_rx;
1804 data[i++] = mdp->dirty_tx;
1805}
1806
1807static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1808{
1809 switch (stringset) {
1810 case ETH_SS_STATS:
1811 memcpy(data, *sh_eth_gstrings_stats,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001812 sizeof(sh_eth_gstrings_stats));
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001813 break;
1814 }
1815}
1816
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001817static void sh_eth_get_ringparam(struct net_device *ndev,
1818 struct ethtool_ringparam *ring)
1819{
1820 struct sh_eth_private *mdp = netdev_priv(ndev);
1821
1822 ring->rx_max_pending = RX_RING_MAX;
1823 ring->tx_max_pending = TX_RING_MAX;
1824 ring->rx_pending = mdp->num_rx_ring;
1825 ring->tx_pending = mdp->num_tx_ring;
1826}
1827
1828static int sh_eth_set_ringparam(struct net_device *ndev,
1829 struct ethtool_ringparam *ring)
1830{
1831 struct sh_eth_private *mdp = netdev_priv(ndev);
1832 int ret;
1833
1834 if (ring->tx_pending > TX_RING_MAX ||
1835 ring->rx_pending > RX_RING_MAX ||
1836 ring->tx_pending < TX_RING_MIN ||
1837 ring->rx_pending < RX_RING_MIN)
1838 return -EINVAL;
1839 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1840 return -EINVAL;
1841
1842 if (netif_running(ndev)) {
1843 netif_tx_disable(ndev);
1844 /* Disable interrupts by clearing the interrupt mask. */
1845 sh_eth_write(ndev, 0x0000, EESIPR);
1846 /* Stop the chip's Tx and Rx processes. */
1847 sh_eth_write(ndev, 0, EDTRR);
1848 sh_eth_write(ndev, 0, EDRRR);
1849 synchronize_irq(ndev->irq);
1850 }
1851
1852 /* Free all the skbuffs in the Rx queue. */
1853 sh_eth_ring_free(ndev);
1854 /* Free DMA buffer */
1855 sh_eth_free_dma_buffer(mdp);
1856
1857 /* Set new parameters */
1858 mdp->num_rx_ring = ring->rx_pending;
1859 mdp->num_tx_ring = ring->tx_pending;
1860
1861 ret = sh_eth_ring_init(ndev);
1862 if (ret < 0) {
1863 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1864 return ret;
1865 }
1866 ret = sh_eth_dev_init(ndev, false);
1867 if (ret < 0) {
1868 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1869 return ret;
1870 }
1871
1872 if (netif_running(ndev)) {
1873 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1874 /* Setting the Rx mode will start the Rx process. */
1875 sh_eth_write(ndev, EDRRR_R, EDRRR);
1876 netif_wake_queue(ndev);
1877 }
1878
1879 return 0;
1880}
1881
stephen hemminger9b07be42012-01-04 12:59:49 +00001882static const struct ethtool_ops sh_eth_ethtool_ops = {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001883 .get_settings = sh_eth_get_settings,
1884 .set_settings = sh_eth_set_settings,
stephen hemminger9b07be42012-01-04 12:59:49 +00001885 .nway_reset = sh_eth_nway_reset,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001886 .get_msglevel = sh_eth_get_msglevel,
1887 .set_msglevel = sh_eth_set_msglevel,
stephen hemminger9b07be42012-01-04 12:59:49 +00001888 .get_link = ethtool_op_get_link,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001889 .get_strings = sh_eth_get_strings,
1890 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1891 .get_sset_count = sh_eth_get_sset_count,
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001892 .get_ringparam = sh_eth_get_ringparam,
1893 .set_ringparam = sh_eth_set_ringparam,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001894};
1895
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001896/* network device open function */
1897static int sh_eth_open(struct net_device *ndev)
1898{
1899 int ret = 0;
1900 struct sh_eth_private *mdp = netdev_priv(ndev);
1901
Magnus Dammbcd51492009-10-09 00:20:04 +00001902 pm_runtime_get_sync(&mdp->pdev->dev);
1903
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04001904 napi_enable(&mdp->napi);
1905
Joe Perchesa0607fd2009-11-18 23:29:17 -08001906 ret = request_irq(ndev->irq, sh_eth_interrupt,
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +00001907 mdp->cd->irq_flags, ndev->name, ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001908 if (ret) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001909 dev_err(&ndev->dev, "Can not assign IRQ number\n");
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04001910 goto out_napi_off;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001911 }
1912
1913 /* Descriptor set */
1914 ret = sh_eth_ring_init(ndev);
1915 if (ret)
1916 goto out_free_irq;
1917
1918 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001919 ret = sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001920 if (ret)
1921 goto out_free_irq;
1922
1923 /* PHY control start*/
1924 ret = sh_eth_phy_start(ndev);
1925 if (ret)
1926 goto out_free_irq;
1927
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001928 return ret;
1929
1930out_free_irq:
1931 free_irq(ndev->irq, ndev);
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04001932out_napi_off:
1933 napi_disable(&mdp->napi);
Magnus Dammbcd51492009-10-09 00:20:04 +00001934 pm_runtime_put_sync(&mdp->pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001935 return ret;
1936}
1937
1938/* Timeout function */
1939static void sh_eth_tx_timeout(struct net_device *ndev)
1940{
1941 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001942 struct sh_eth_rxdesc *rxdesc;
1943 int i;
1944
1945 netif_stop_queue(ndev);
1946
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001947 if (netif_msg_timer(mdp)) {
1948 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
1949 ndev->name, (int)sh_eth_read(ndev, EESR));
1950 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001951
1952 /* tx_errors count up */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001953 ndev->stats.tx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001954
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001955 /* Free all the skbuffs in the Rx queue. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001956 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001957 rxdesc = &mdp->rx_ring[i];
1958 rxdesc->status = 0;
1959 rxdesc->addr = 0xBADF00D0;
1960 if (mdp->rx_skbuff[i])
1961 dev_kfree_skb(mdp->rx_skbuff[i]);
1962 mdp->rx_skbuff[i] = NULL;
1963 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001964 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001965 if (mdp->tx_skbuff[i])
1966 dev_kfree_skb(mdp->tx_skbuff[i]);
1967 mdp->tx_skbuff[i] = NULL;
1968 }
1969
1970 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001971 sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001972}
1973
1974/* Packet transmit function */
1975static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1976{
1977 struct sh_eth_private *mdp = netdev_priv(ndev);
1978 struct sh_eth_txdesc *txdesc;
1979 u32 entry;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00001980 unsigned long flags;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001981
1982 spin_lock_irqsave(&mdp->lock, flags);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001983 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001984 if (!sh_eth_txfree(ndev)) {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001985 if (netif_msg_tx_queued(mdp))
1986 dev_warn(&ndev->dev, "TxFD exhausted.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001987 netif_stop_queue(ndev);
1988 spin_unlock_irqrestore(&mdp->lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +00001989 return NETDEV_TX_BUSY;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001990 }
1991 }
1992 spin_unlock_irqrestore(&mdp->lock, flags);
1993
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001994 entry = mdp->cur_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001995 mdp->tx_skbuff[entry] = skb;
1996 txdesc = &mdp->tx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001997 /* soft swap. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001998 if (!mdp->cd->hw_swap)
1999 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2000 skb->len + 2);
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00002001 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2002 DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002003 if (skb->len < ETHERSMALL)
2004 txdesc->buffer_length = ETHERSMALL;
2005 else
2006 txdesc->buffer_length = skb->len;
2007
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002008 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04002009 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002010 else
Yoshinori Sato71557a32008-08-06 19:49:00 -04002011 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002012
2013 mdp->cur_tx++;
2014
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002015 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2016 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09002017
Patrick McHardy6ed10652009-06-23 06:03:08 +00002018 return NETDEV_TX_OK;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002019}
2020
2021/* device close function */
2022static int sh_eth_close(struct net_device *ndev)
2023{
2024 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002025
2026 netif_stop_queue(ndev);
2027
2028 /* Disable interrupts by clearing the interrupt mask. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002029 sh_eth_write(ndev, 0x0000, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002030
2031 /* Stop the chip's Tx and Rx processes. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002032 sh_eth_write(ndev, 0, EDTRR);
2033 sh_eth_write(ndev, 0, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002034
2035 /* PHY Disconnect */
2036 if (mdp->phydev) {
2037 phy_stop(mdp->phydev);
2038 phy_disconnect(mdp->phydev);
2039 }
2040
2041 free_irq(ndev->irq, ndev);
2042
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002043 napi_disable(&mdp->napi);
2044
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002045 /* Free all the skbuffs in the Rx queue. */
2046 sh_eth_ring_free(ndev);
2047
2048 /* free DMA buffer */
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00002049 sh_eth_free_dma_buffer(mdp);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002050
Magnus Dammbcd51492009-10-09 00:20:04 +00002051 pm_runtime_put_sync(&mdp->pdev->dev);
2052
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002053 return 0;
2054}
2055
2056static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2057{
2058 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002059
Magnus Dammbcd51492009-10-09 00:20:04 +00002060 pm_runtime_get_sync(&mdp->pdev->dev);
2061
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002062 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002063 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002064 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002065 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002066 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002067 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002068 if (sh_eth_is_gether(mdp)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002069 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002070 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002071 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002072 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2073 } else {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002074 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002075 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2076 }
Magnus Dammbcd51492009-10-09 00:20:04 +00002077 pm_runtime_put_sync(&mdp->pdev->dev);
2078
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002079 return &ndev->stats;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002080}
2081
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002082/* ioctl to device function */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002083static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002084{
2085 struct sh_eth_private *mdp = netdev_priv(ndev);
2086 struct phy_device *phydev = mdp->phydev;
2087
2088 if (!netif_running(ndev))
2089 return -EINVAL;
2090
2091 if (!phydev)
2092 return -ENODEV;
2093
Richard Cochran28b04112010-07-17 08:48:55 +00002094 return phy_mii_ioctl(phydev, rq, cmd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002095}
2096
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002097/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2098static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2099 int entry)
2100{
2101 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2102}
2103
2104static u32 sh_eth_tsu_get_post_mask(int entry)
2105{
2106 return 0x0f << (28 - ((entry % 8) * 4));
2107}
2108
2109static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2110{
2111 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2112}
2113
2114static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2115 int entry)
2116{
2117 struct sh_eth_private *mdp = netdev_priv(ndev);
2118 u32 tmp;
2119 void *reg_offset;
2120
2121 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2122 tmp = ioread32(reg_offset);
2123 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2124}
2125
2126static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2127 int entry)
2128{
2129 struct sh_eth_private *mdp = netdev_priv(ndev);
2130 u32 post_mask, ref_mask, tmp;
2131 void *reg_offset;
2132
2133 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2134 post_mask = sh_eth_tsu_get_post_mask(entry);
2135 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2136
2137 tmp = ioread32(reg_offset);
2138 iowrite32(tmp & ~post_mask, reg_offset);
2139
2140 /* If other port enables, the function returns "true" */
2141 return tmp & ref_mask;
2142}
2143
2144static int sh_eth_tsu_busy(struct net_device *ndev)
2145{
2146 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2147 struct sh_eth_private *mdp = netdev_priv(ndev);
2148
2149 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2150 udelay(10);
2151 timeout--;
2152 if (timeout <= 0) {
2153 dev_err(&ndev->dev, "%s: timeout\n", __func__);
2154 return -ETIMEDOUT;
2155 }
2156 }
2157
2158 return 0;
2159}
2160
2161static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2162 const u8 *addr)
2163{
2164 u32 val;
2165
2166 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2167 iowrite32(val, reg);
2168 if (sh_eth_tsu_busy(ndev) < 0)
2169 return -EBUSY;
2170
2171 val = addr[4] << 8 | addr[5];
2172 iowrite32(val, reg + 4);
2173 if (sh_eth_tsu_busy(ndev) < 0)
2174 return -EBUSY;
2175
2176 return 0;
2177}
2178
2179static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2180{
2181 u32 val;
2182
2183 val = ioread32(reg);
2184 addr[0] = (val >> 24) & 0xff;
2185 addr[1] = (val >> 16) & 0xff;
2186 addr[2] = (val >> 8) & 0xff;
2187 addr[3] = val & 0xff;
2188 val = ioread32(reg + 4);
2189 addr[4] = (val >> 8) & 0xff;
2190 addr[5] = val & 0xff;
2191}
2192
2193
2194static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2195{
2196 struct sh_eth_private *mdp = netdev_priv(ndev);
2197 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2198 int i;
2199 u8 c_addr[ETH_ALEN];
2200
2201 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2202 sh_eth_tsu_read_entry(reg_offset, c_addr);
dingtianhongc4bde292013-12-30 15:41:17 +08002203 if (ether_addr_equal(addr, c_addr))
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002204 return i;
2205 }
2206
2207 return -ENOENT;
2208}
2209
2210static int sh_eth_tsu_find_empty(struct net_device *ndev)
2211{
2212 u8 blank[ETH_ALEN];
2213 int entry;
2214
2215 memset(blank, 0, sizeof(blank));
2216 entry = sh_eth_tsu_find_entry(ndev, blank);
2217 return (entry < 0) ? -ENOMEM : entry;
2218}
2219
2220static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2221 int entry)
2222{
2223 struct sh_eth_private *mdp = netdev_priv(ndev);
2224 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2225 int ret;
2226 u8 blank[ETH_ALEN];
2227
2228 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2229 ~(1 << (31 - entry)), TSU_TEN);
2230
2231 memset(blank, 0, sizeof(blank));
2232 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2233 if (ret < 0)
2234 return ret;
2235 return 0;
2236}
2237
2238static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2239{
2240 struct sh_eth_private *mdp = netdev_priv(ndev);
2241 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2242 int i, ret;
2243
2244 if (!mdp->cd->tsu)
2245 return 0;
2246
2247 i = sh_eth_tsu_find_entry(ndev, addr);
2248 if (i < 0) {
2249 /* No entry found, create one */
2250 i = sh_eth_tsu_find_empty(ndev);
2251 if (i < 0)
2252 return -ENOMEM;
2253 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2254 if (ret < 0)
2255 return ret;
2256
2257 /* Enable the entry */
2258 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2259 (1 << (31 - i)), TSU_TEN);
2260 }
2261
2262 /* Entry found or created, enable POST */
2263 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2264
2265 return 0;
2266}
2267
2268static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2269{
2270 struct sh_eth_private *mdp = netdev_priv(ndev);
2271 int i, ret;
2272
2273 if (!mdp->cd->tsu)
2274 return 0;
2275
2276 i = sh_eth_tsu_find_entry(ndev, addr);
2277 if (i) {
2278 /* Entry found */
2279 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2280 goto done;
2281
2282 /* Disable the entry if both ports was disabled */
2283 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2284 if (ret < 0)
2285 return ret;
2286 }
2287done:
2288 return 0;
2289}
2290
2291static int sh_eth_tsu_purge_all(struct net_device *ndev)
2292{
2293 struct sh_eth_private *mdp = netdev_priv(ndev);
2294 int i, ret;
2295
2296 if (unlikely(!mdp->cd->tsu))
2297 return 0;
2298
2299 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2300 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2301 continue;
2302
2303 /* Disable the entry if both ports was disabled */
2304 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2305 if (ret < 0)
2306 return ret;
2307 }
2308
2309 return 0;
2310}
2311
2312static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2313{
2314 struct sh_eth_private *mdp = netdev_priv(ndev);
2315 u8 addr[ETH_ALEN];
2316 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2317 int i;
2318
2319 if (unlikely(!mdp->cd->tsu))
2320 return;
2321
2322 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2323 sh_eth_tsu_read_entry(reg_offset, addr);
2324 if (is_multicast_ether_addr(addr))
2325 sh_eth_tsu_del_entry(ndev, addr);
2326 }
2327}
2328
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002329/* Multicast reception directions set */
2330static void sh_eth_set_multicast_list(struct net_device *ndev)
2331{
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002332 struct sh_eth_private *mdp = netdev_priv(ndev);
2333 u32 ecmr_bits;
2334 int mcast_all = 0;
2335 unsigned long flags;
2336
2337 spin_lock_irqsave(&mdp->lock, flags);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002338 /* Initial condition is MCT = 1, PRM = 0.
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002339 * Depending on ndev->flags, set PRM or clear MCT
2340 */
2341 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2342
2343 if (!(ndev->flags & IFF_MULTICAST)) {
2344 sh_eth_tsu_purge_mcast(ndev);
2345 mcast_all = 1;
2346 }
2347 if (ndev->flags & IFF_ALLMULTI) {
2348 sh_eth_tsu_purge_mcast(ndev);
2349 ecmr_bits &= ~ECMR_MCT;
2350 mcast_all = 1;
2351 }
2352
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002353 if (ndev->flags & IFF_PROMISC) {
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002354 sh_eth_tsu_purge_all(ndev);
2355 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2356 } else if (mdp->cd->tsu) {
2357 struct netdev_hw_addr *ha;
2358 netdev_for_each_mc_addr(ha, ndev) {
2359 if (mcast_all && is_multicast_ether_addr(ha->addr))
2360 continue;
2361
2362 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2363 if (!mcast_all) {
2364 sh_eth_tsu_purge_mcast(ndev);
2365 ecmr_bits &= ~ECMR_MCT;
2366 mcast_all = 1;
2367 }
2368 }
2369 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002370 } else {
2371 /* Normal, unicast/broadcast-only mode. */
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002372 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002373 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002374
2375 /* update the ethernet mode */
2376 sh_eth_write(ndev, ecmr_bits, ECMR);
2377
2378 spin_unlock_irqrestore(&mdp->lock, flags);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002379}
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002380
2381static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2382{
2383 if (!mdp->port)
2384 return TSU_VTAG0;
2385 else
2386 return TSU_VTAG1;
2387}
2388
Patrick McHardy80d5c362013-04-19 02:04:28 +00002389static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2390 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002391{
2392 struct sh_eth_private *mdp = netdev_priv(ndev);
2393 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2394
2395 if (unlikely(!mdp->cd->tsu))
2396 return -EPERM;
2397
2398 /* No filtering if vid = 0 */
2399 if (!vid)
2400 return 0;
2401
2402 mdp->vlan_num_ids++;
2403
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002404 /* The controller has one VLAN tag HW filter. So, if the filter is
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002405 * already enabled, the driver disables it and the filte
2406 */
2407 if (mdp->vlan_num_ids > 1) {
2408 /* disable VLAN filter */
2409 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2410 return 0;
2411 }
2412
2413 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2414 vtag_reg_index);
2415
2416 return 0;
2417}
2418
Patrick McHardy80d5c362013-04-19 02:04:28 +00002419static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2420 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002421{
2422 struct sh_eth_private *mdp = netdev_priv(ndev);
2423 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2424
2425 if (unlikely(!mdp->cd->tsu))
2426 return -EPERM;
2427
2428 /* No filtering if vid = 0 */
2429 if (!vid)
2430 return 0;
2431
2432 mdp->vlan_num_ids--;
2433 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2434
2435 return 0;
2436}
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002437
2438/* SuperH's TSU register init function */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002439static void sh_eth_tsu_init(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002440{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002441 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2442 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2443 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2444 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2445 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2446 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2447 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2448 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2449 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2450 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002451 if (sh_eth_is_gether(mdp)) {
2452 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2453 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2454 } else {
2455 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2456 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2457 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002458 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2459 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2460 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2461 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2462 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2463 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2464 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002465}
2466
2467/* MDIO bus release function */
2468static int sh_mdio_release(struct net_device *ndev)
2469{
2470 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2471
2472 /* unregister mdio bus */
2473 mdiobus_unregister(bus);
2474
2475 /* remove mdio bus info from net_device */
2476 dev_set_drvdata(&ndev->dev, NULL);
2477
2478 /* free bitbang info */
2479 free_mdio_bitbang(bus);
2480
2481 return 0;
2482}
2483
2484/* MDIO bus init function */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002485static int sh_mdio_init(struct net_device *ndev, int id,
2486 struct sh_eth_plat_data *pd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002487{
2488 int ret, i;
2489 struct bb_info *bitbang;
2490 struct sh_eth_private *mdp = netdev_priv(ndev);
2491
2492 /* create bit control struct for PHY */
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002493 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2494 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002495 if (!bitbang) {
2496 ret = -ENOMEM;
2497 goto out;
2498 }
2499
2500 /* bitbang init */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002501 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002502 bitbang->set_gate = pd->set_mdio_gate;
Sergei Shtylyovdfed5e72013-03-21 10:37:54 +00002503 bitbang->mdi_msk = PIR_MDI;
2504 bitbang->mdo_msk = PIR_MDO;
2505 bitbang->mmd_msk = PIR_MMD;
2506 bitbang->mdc_msk = PIR_MDC;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002507 bitbang->ctrl.ops = &bb_ops;
2508
Stefan Weilc2e07b32010-08-03 19:44:52 +02002509 /* MII controller setting */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002510 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2511 if (!mdp->mii_bus) {
2512 ret = -ENOMEM;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002513 goto out;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002514 }
2515
2516 /* Hook up MII support for ethtool */
2517 mdp->mii_bus->name = "sh_mii";
Lennert Buytenhek18ee49d2008-10-01 15:41:33 +00002518 mdp->mii_bus->parent = &ndev->dev;
Florian Fainelli5278fb52012-01-09 23:59:17 +00002519 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002520 mdp->pdev->name, id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002521
2522 /* PHY IRQ */
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002523 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2524 sizeof(int) * PHY_MAX_ADDR,
2525 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002526 if (!mdp->mii_bus->irq) {
2527 ret = -ENOMEM;
2528 goto out_free_bus;
2529 }
2530
2531 for (i = 0; i < PHY_MAX_ADDR; i++)
2532 mdp->mii_bus->irq[i] = PHY_POLL;
Sergei Shtylyov18be0992013-12-20 01:39:52 +03002533 if (pd->phy_irq > 0)
2534 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002535
YOSHIFUJI Hideaki / 吉藤英明8f6352f2012-11-02 04:45:07 +00002536 /* register mdio bus */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002537 ret = mdiobus_register(mdp->mii_bus);
2538 if (ret)
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002539 goto out_free_bus;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002540
2541 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2542
2543 return 0;
2544
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002545out_free_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002546 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002547
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002548out:
2549 return ret;
2550}
2551
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002552static const u16 *sh_eth_get_register_offset(int register_type)
2553{
2554 const u16 *reg_offset = NULL;
2555
2556 switch (register_type) {
2557 case SH_ETH_REG_GIGABIT:
2558 reg_offset = sh_eth_offset_gigabit;
2559 break;
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00002560 case SH_ETH_REG_FAST_RCAR:
2561 reg_offset = sh_eth_offset_fast_rcar;
2562 break;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002563 case SH_ETH_REG_FAST_SH4:
2564 reg_offset = sh_eth_offset_fast_sh4;
2565 break;
2566 case SH_ETH_REG_FAST_SH3_SH2:
2567 reg_offset = sh_eth_offset_fast_sh3_sh2;
2568 break;
2569 default:
Nobuhiro Iwamatsu14c33262013-03-20 22:46:55 +00002570 pr_err("Unknown register type (%d)\n", register_type);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002571 break;
2572 }
2573
2574 return reg_offset;
2575}
2576
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002577static const struct net_device_ops sh_eth_netdev_ops = {
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002578 .ndo_open = sh_eth_open,
2579 .ndo_stop = sh_eth_close,
2580 .ndo_start_xmit = sh_eth_start_xmit,
2581 .ndo_get_stats = sh_eth_get_stats,
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002582 .ndo_tx_timeout = sh_eth_tx_timeout,
2583 .ndo_do_ioctl = sh_eth_do_ioctl,
2584 .ndo_validate_addr = eth_validate_addr,
2585 .ndo_set_mac_address = eth_mac_addr,
2586 .ndo_change_mtu = eth_change_mtu,
2587};
2588
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002589static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2590 .ndo_open = sh_eth_open,
2591 .ndo_stop = sh_eth_close,
2592 .ndo_start_xmit = sh_eth_start_xmit,
2593 .ndo_get_stats = sh_eth_get_stats,
2594 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2595 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2596 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2597 .ndo_tx_timeout = sh_eth_tx_timeout,
2598 .ndo_do_ioctl = sh_eth_do_ioctl,
2599 .ndo_validate_addr = eth_validate_addr,
2600 .ndo_set_mac_address = eth_mac_addr,
2601 .ndo_change_mtu = eth_change_mtu,
2602};
2603
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002604static int sh_eth_drv_probe(struct platform_device *pdev)
2605{
Kuninori Morimoto9c386572010-08-19 00:39:45 -07002606 int ret, devno = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002607 struct resource *res;
2608 struct net_device *ndev = NULL;
Kuninori Morimotoec0d7552011-06-23 16:02:38 +00002609 struct sh_eth_private *mdp = NULL;
Jingoo Han0b76b862013-08-30 14:00:11 +09002610 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002611 const struct platform_device_id *id = platform_get_device_id(pdev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002612
2613 /* get base addr */
2614 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2615 if (unlikely(res == NULL)) {
2616 dev_err(&pdev->dev, "invalid resource\n");
2617 ret = -EINVAL;
2618 goto out;
2619 }
2620
2621 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2622 if (!ndev) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002623 ret = -ENOMEM;
2624 goto out;
2625 }
2626
2627 /* The sh Ether-specific entries in the device structure. */
2628 ndev->base_addr = res->start;
2629 devno = pdev->id;
2630 if (devno < 0)
2631 devno = 0;
2632
2633 ndev->dma = -1;
roel kluincc3c0802008-09-10 19:22:44 +02002634 ret = platform_get_irq(pdev, 0);
2635 if (ret < 0) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002636 ret = -ENODEV;
2637 goto out_release;
2638 }
roel kluincc3c0802008-09-10 19:22:44 +02002639 ndev->irq = ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002640
2641 SET_NETDEV_DEV(ndev, &pdev->dev);
2642
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002643 mdp = netdev_priv(ndev);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002644 mdp->num_tx_ring = TX_RING_SIZE;
2645 mdp->num_rx_ring = RX_RING_SIZE;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002646 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2647 if (IS_ERR(mdp->addr)) {
2648 ret = PTR_ERR(mdp->addr);
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002649 goto out_release;
2650 }
2651
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002652 spin_lock_init(&mdp->lock);
Magnus Dammbcd51492009-10-09 00:20:04 +00002653 mdp->pdev = pdev;
2654 pm_runtime_enable(&pdev->dev);
2655 pm_runtime_resume(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002656
Sergei Shtylyov3b4c5cb2013-10-30 23:30:19 +03002657 if (!pd) {
2658 dev_err(&pdev->dev, "no platform data\n");
2659 ret = -EINVAL;
2660 goto out_release;
2661 }
2662
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002663 /* get PHY ID */
Yoshinori Sato71557a32008-08-06 19:49:00 -04002664 mdp->phy_id = pd->phy;
Yoshihiro Shimodae47c9052011-03-07 21:59:45 +00002665 mdp->phy_interface = pd->phy_interface;
Yoshinori Sato71557a32008-08-06 19:49:00 -04002666 /* EDMAC endian */
2667 mdp->edmac_endian = pd->edmac_endian;
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00002668 mdp->no_ether_link = pd->no_ether_link;
2669 mdp->ether_link_active_low = pd->ether_link_active_low;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002670
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002671 /* set cpu data */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +00002672 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
Sergei Shtylyova3153d82013-08-18 03:11:28 +04002673 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002674 sh_eth_set_default_cpu_data(mdp->cd);
2675
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002676 /* set function */
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002677 if (mdp->cd->tsu)
2678 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2679 else
2680 ndev->netdev_ops = &sh_eth_netdev_ops;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002681 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002682 ndev->watchdog_timeo = TX_TIMEOUT;
2683
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002684 /* debug message level */
2685 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002686
2687 /* read and set MAC address */
Magnus Damm748031f2009-10-09 00:17:14 +00002688 read_mac_address(ndev, pd->mac_addr);
Sergei Shtylyovff6e7222013-04-29 09:49:42 +00002689 if (!is_valid_ether_addr(ndev->dev_addr)) {
2690 dev_warn(&pdev->dev,
2691 "no valid MAC address supplied, using a random one.\n");
2692 eth_hw_addr_random(ndev);
2693 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002694
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002695 /* ioremap the TSU registers */
2696 if (mdp->cd->tsu) {
2697 struct resource *rtsu;
2698 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002699 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2700 if (IS_ERR(mdp->tsu_addr)) {
2701 ret = PTR_ERR(mdp->tsu_addr);
Sergei Shtylyovfc0c0902013-03-19 13:41:32 +00002702 goto out_release;
2703 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002704 mdp->port = devno % 2;
Patrick McHardyf6469682013-04-19 02:04:27 +00002705 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002706 }
2707
Yoshihiro Shimoda150647f2012-02-15 17:54:56 +00002708 /* initialize first or needed device */
2709 if (!devno || pd->needs_init) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002710 if (mdp->cd->chip_reset)
2711 mdp->cd->chip_reset(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002712
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00002713 if (mdp->cd->tsu) {
2714 /* TSU init (Init only)*/
2715 sh_eth_tsu_init(mdp);
2716 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002717 }
2718
Sergei Shtylyov37191092013-06-19 23:30:23 +04002719 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2720
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002721 /* network device register */
2722 ret = register_netdev(ndev);
2723 if (ret)
Sergei Shtylyov37191092013-06-19 23:30:23 +04002724 goto out_napi_del;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002725
2726 /* mdio bus init */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002727 ret = sh_mdio_init(ndev, pdev->id, pd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002728 if (ret)
2729 goto out_unregister;
2730
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002731 /* print device information */
H Hartley Sweeten6cd9b492009-12-29 20:10:35 -08002732 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002733 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002734
2735 platform_set_drvdata(pdev, ndev);
2736
2737 return ret;
2738
2739out_unregister:
2740 unregister_netdev(ndev);
2741
Sergei Shtylyov37191092013-06-19 23:30:23 +04002742out_napi_del:
2743 netif_napi_del(&mdp->napi);
2744
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002745out_release:
2746 /* net_dev free */
2747 if (ndev)
2748 free_netdev(ndev);
2749
2750out:
2751 return ret;
2752}
2753
2754static int sh_eth_drv_remove(struct platform_device *pdev)
2755{
2756 struct net_device *ndev = platform_get_drvdata(pdev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002757 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002758
2759 sh_mdio_release(ndev);
2760 unregister_netdev(ndev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002761 netif_napi_del(&mdp->napi);
Magnus Dammbcd51492009-10-09 00:20:04 +00002762 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002763 free_netdev(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002764
2765 return 0;
2766}
2767
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002768#ifdef CONFIG_PM
Magnus Dammbcd51492009-10-09 00:20:04 +00002769static int sh_eth_runtime_nop(struct device *dev)
2770{
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002771 /* Runtime PM callback shared between ->runtime_suspend()
Magnus Dammbcd51492009-10-09 00:20:04 +00002772 * and ->runtime_resume(). Simply returns success.
2773 *
2774 * This driver re-initializes all registers after
2775 * pm_runtime_get_sync() anyway so there is no need
2776 * to save and restore registers here.
2777 */
2778 return 0;
2779}
2780
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002781static const struct dev_pm_ops sh_eth_dev_pm_ops = {
Magnus Dammbcd51492009-10-09 00:20:04 +00002782 .runtime_suspend = sh_eth_runtime_nop,
2783 .runtime_resume = sh_eth_runtime_nop,
2784};
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002785#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2786#else
2787#define SH_ETH_PM_OPS NULL
2788#endif
Magnus Dammbcd51492009-10-09 00:20:04 +00002789
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002790static struct platform_device_id sh_eth_id_table[] = {
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +00002791 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +00002792 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +00002793 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002794 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
Sergei Shtylyov24549e22013-06-07 13:59:21 +00002795 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2796 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002797 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +00002798 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
Sergei Shtylyov589ebde2013-06-07 14:05:59 +00002799 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
Sergei Shtylyov94a12b12013-12-08 02:59:18 +03002800 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2801 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002802 { }
2803};
2804MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2805
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002806static struct platform_driver sh_eth_driver = {
2807 .probe = sh_eth_drv_probe,
2808 .remove = sh_eth_drv_remove,
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002809 .id_table = sh_eth_id_table,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002810 .driver = {
2811 .name = CARDNAME,
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002812 .pm = SH_ETH_PM_OPS,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002813 },
2814};
2815
Axel Lindb62f682011-11-27 16:44:17 +00002816module_platform_driver(sh_eth_driver);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002817
2818MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2819MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2820MODULE_LICENSE("GPL v2");