blob: b5db6b3f939fc00884f313829cb9c1ffff118981 [file] [log] [blame]
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001/* SuperH Ethernet device driver
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002 *
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +00003 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
Sergei Shtylyovb356e972014-02-18 03:12:43 +03004 * Copyright (C) 2008-2014 Renesas Solutions Corp.
5 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
Ben Dooks702eca02014-03-12 17:47:40 +00006 * Copyright (C) 2014 Codethink Limited
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07007 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070016 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 */
20
Yoshihiro Shimoda06540112011-09-29 17:16:57 +000021#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
David S. Miller823dcd22011-08-20 10:39:12 -070024#include <linux/interrupt.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070025#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/platform_device.h>
29#include <linux/mdio-bitbang.h>
30#include <linux/netdevice.h>
Sergei Shtylyovb356e972014-02-18 03:12:43 +030031#include <linux/of.h>
32#include <linux/of_device.h>
33#include <linux/of_irq.h>
34#include <linux/of_net.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070035#include <linux/phy.h>
36#include <linux/cache.h>
37#include <linux/io.h>
Magnus Dammbcd51492009-10-09 00:20:04 +000038#include <linux/pm_runtime.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000040#include <linux/ethtool.h>
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +000041#include <linux/if_vlan.h>
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +000042#include <linux/clk.h>
Yoshihiro Shimodad4fa0e32011-09-27 21:49:12 +000043#include <linux/sh_eth.h>
Ben Dooks702eca02014-03-12 17:47:40 +000044#include <linux/of_mdio.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070045
46#include "sh_eth.h"
47
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000048#define SH_ETH_DEF_MSG_ENABLE \
49 (NETIF_MSG_LINK | \
50 NETIF_MSG_TIMER | \
51 NETIF_MSG_RX_ERR| \
52 NETIF_MSG_TX_ERR)
53
Sergei Shtylyovc0013f62013-03-28 11:48:26 +000054static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
55 [EDSR] = 0x0000,
56 [EDMR] = 0x0400,
57 [EDTRR] = 0x0408,
58 [EDRRR] = 0x0410,
59 [EESR] = 0x0428,
60 [EESIPR] = 0x0430,
61 [TDLAR] = 0x0010,
62 [TDFAR] = 0x0014,
63 [TDFXR] = 0x0018,
64 [TDFFR] = 0x001c,
65 [RDLAR] = 0x0030,
66 [RDFAR] = 0x0034,
67 [RDFXR] = 0x0038,
68 [RDFFR] = 0x003c,
69 [TRSCER] = 0x0438,
70 [RMFCR] = 0x0440,
71 [TFTR] = 0x0448,
72 [FDR] = 0x0450,
73 [RMCR] = 0x0458,
74 [RPADIR] = 0x0460,
75 [FCFTR] = 0x0468,
76 [CSMR] = 0x04E4,
77
78 [ECMR] = 0x0500,
79 [ECSR] = 0x0510,
80 [ECSIPR] = 0x0518,
81 [PIR] = 0x0520,
82 [PSR] = 0x0528,
83 [PIPR] = 0x052c,
84 [RFLR] = 0x0508,
85 [APR] = 0x0554,
86 [MPR] = 0x0558,
87 [PFTCR] = 0x055c,
88 [PFRCR] = 0x0560,
89 [TPAUSER] = 0x0564,
90 [GECMR] = 0x05b0,
91 [BCULR] = 0x05b4,
92 [MAHR] = 0x05c0,
93 [MALR] = 0x05c8,
94 [TROCR] = 0x0700,
95 [CDCR] = 0x0708,
96 [LCCR] = 0x0710,
97 [CEFCR] = 0x0740,
98 [FRECR] = 0x0748,
99 [TSFRCR] = 0x0750,
100 [TLFRCR] = 0x0758,
101 [RFCR] = 0x0760,
102 [CERCR] = 0x0768,
103 [CEECR] = 0x0770,
104 [MAFCR] = 0x0778,
105 [RMII_MII] = 0x0790,
106
107 [ARSTR] = 0x0000,
108 [TSU_CTRST] = 0x0004,
109 [TSU_FWEN0] = 0x0010,
110 [TSU_FWEN1] = 0x0014,
111 [TSU_FCM] = 0x0018,
112 [TSU_BSYSL0] = 0x0020,
113 [TSU_BSYSL1] = 0x0024,
114 [TSU_PRISL0] = 0x0028,
115 [TSU_PRISL1] = 0x002c,
116 [TSU_FWSL0] = 0x0030,
117 [TSU_FWSL1] = 0x0034,
118 [TSU_FWSLC] = 0x0038,
119 [TSU_QTAG0] = 0x0040,
120 [TSU_QTAG1] = 0x0044,
121 [TSU_FWSR] = 0x0050,
122 [TSU_FWINMK] = 0x0054,
123 [TSU_ADQT0] = 0x0048,
124 [TSU_ADQT1] = 0x004c,
125 [TSU_VTAG0] = 0x0058,
126 [TSU_VTAG1] = 0x005c,
127 [TSU_ADSBSY] = 0x0060,
128 [TSU_TEN] = 0x0064,
129 [TSU_POST1] = 0x0070,
130 [TSU_POST2] = 0x0074,
131 [TSU_POST3] = 0x0078,
132 [TSU_POST4] = 0x007c,
133 [TSU_ADRH0] = 0x0100,
134 [TSU_ADRL0] = 0x0104,
135 [TSU_ADRH31] = 0x01f8,
136 [TSU_ADRL31] = 0x01fc,
137
138 [TXNLCR0] = 0x0080,
139 [TXALCR0] = 0x0084,
140 [RXNLCR0] = 0x0088,
141 [RXALCR0] = 0x008c,
142 [FWNLCR0] = 0x0090,
143 [FWALCR0] = 0x0094,
144 [TXNLCR1] = 0x00a0,
145 [TXALCR1] = 0x00a0,
146 [RXNLCR1] = 0x00a8,
147 [RXALCR1] = 0x00ac,
148 [FWNLCR1] = 0x00b0,
149 [FWALCR1] = 0x00b4,
150};
151
Simon Hormandb893472014-01-17 09:22:28 +0900152static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
153 [EDSR] = 0x0000,
154 [EDMR] = 0x0400,
155 [EDTRR] = 0x0408,
156 [EDRRR] = 0x0410,
157 [EESR] = 0x0428,
158 [EESIPR] = 0x0430,
159 [TDLAR] = 0x0010,
160 [TDFAR] = 0x0014,
161 [TDFXR] = 0x0018,
162 [TDFFR] = 0x001c,
163 [RDLAR] = 0x0030,
164 [RDFAR] = 0x0034,
165 [RDFXR] = 0x0038,
166 [RDFFR] = 0x003c,
167 [TRSCER] = 0x0438,
168 [RMFCR] = 0x0440,
169 [TFTR] = 0x0448,
170 [FDR] = 0x0450,
171 [RMCR] = 0x0458,
172 [RPADIR] = 0x0460,
173 [FCFTR] = 0x0468,
174 [CSMR] = 0x04E4,
175
176 [ECMR] = 0x0500,
177 [RFLR] = 0x0508,
178 [ECSR] = 0x0510,
179 [ECSIPR] = 0x0518,
180 [PIR] = 0x0520,
181 [APR] = 0x0554,
182 [MPR] = 0x0558,
183 [PFTCR] = 0x055c,
184 [PFRCR] = 0x0560,
185 [TPAUSER] = 0x0564,
186 [MAHR] = 0x05c0,
187 [MALR] = 0x05c8,
188 [CEFCR] = 0x0740,
189 [FRECR] = 0x0748,
190 [TSFRCR] = 0x0750,
191 [TLFRCR] = 0x0758,
192 [RFCR] = 0x0760,
193 [MAFCR] = 0x0778,
194
195 [ARSTR] = 0x0000,
196 [TSU_CTRST] = 0x0004,
197 [TSU_VTAG0] = 0x0058,
198 [TSU_ADSBSY] = 0x0060,
199 [TSU_TEN] = 0x0064,
200 [TSU_ADRH0] = 0x0100,
201 [TSU_ADRL0] = 0x0104,
202 [TSU_ADRH31] = 0x01f8,
203 [TSU_ADRL31] = 0x01fc,
204
205 [TXNLCR0] = 0x0080,
206 [TXALCR0] = 0x0084,
207 [RXNLCR0] = 0x0088,
208 [RXALCR0] = 0x008C,
209};
210
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000211static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
212 [ECMR] = 0x0300,
213 [RFLR] = 0x0308,
214 [ECSR] = 0x0310,
215 [ECSIPR] = 0x0318,
216 [PIR] = 0x0320,
217 [PSR] = 0x0328,
218 [RDMLR] = 0x0340,
219 [IPGR] = 0x0350,
220 [APR] = 0x0354,
221 [MPR] = 0x0358,
222 [RFCF] = 0x0360,
223 [TPAUSER] = 0x0364,
224 [TPAUSECR] = 0x0368,
225 [MAHR] = 0x03c0,
226 [MALR] = 0x03c8,
227 [TROCR] = 0x03d0,
228 [CDCR] = 0x03d4,
229 [LCCR] = 0x03d8,
230 [CNDCR] = 0x03dc,
231 [CEFCR] = 0x03e4,
232 [FRECR] = 0x03e8,
233 [TSFRCR] = 0x03ec,
234 [TLFRCR] = 0x03f0,
235 [RFCR] = 0x03f4,
236 [MAFCR] = 0x03f8,
237
238 [EDMR] = 0x0200,
239 [EDTRR] = 0x0208,
240 [EDRRR] = 0x0210,
241 [TDLAR] = 0x0218,
242 [RDLAR] = 0x0220,
243 [EESR] = 0x0228,
244 [EESIPR] = 0x0230,
245 [TRSCER] = 0x0238,
246 [RMFCR] = 0x0240,
247 [TFTR] = 0x0248,
248 [FDR] = 0x0250,
249 [RMCR] = 0x0258,
250 [TFUCR] = 0x0264,
251 [RFOCR] = 0x0268,
Simon Horman55754f12013-07-23 10:18:04 +0900252 [RMIIMODE] = 0x026c,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000253 [FCFTR] = 0x0270,
254 [TRIMD] = 0x027c,
255};
256
Sergei Shtylyovc0013f62013-03-28 11:48:26 +0000257static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
258 [ECMR] = 0x0100,
259 [RFLR] = 0x0108,
260 [ECSR] = 0x0110,
261 [ECSIPR] = 0x0118,
262 [PIR] = 0x0120,
263 [PSR] = 0x0128,
264 [RDMLR] = 0x0140,
265 [IPGR] = 0x0150,
266 [APR] = 0x0154,
267 [MPR] = 0x0158,
268 [TPAUSER] = 0x0164,
269 [RFCF] = 0x0160,
270 [TPAUSECR] = 0x0168,
271 [BCFRR] = 0x016c,
272 [MAHR] = 0x01c0,
273 [MALR] = 0x01c8,
274 [TROCR] = 0x01d0,
275 [CDCR] = 0x01d4,
276 [LCCR] = 0x01d8,
277 [CNDCR] = 0x01dc,
278 [CEFCR] = 0x01e4,
279 [FRECR] = 0x01e8,
280 [TSFRCR] = 0x01ec,
281 [TLFRCR] = 0x01f0,
282 [RFCR] = 0x01f4,
283 [MAFCR] = 0x01f8,
284 [RTRATE] = 0x01fc,
285
286 [EDMR] = 0x0000,
287 [EDTRR] = 0x0008,
288 [EDRRR] = 0x0010,
289 [TDLAR] = 0x0018,
290 [RDLAR] = 0x0020,
291 [EESR] = 0x0028,
292 [EESIPR] = 0x0030,
293 [TRSCER] = 0x0038,
294 [RMFCR] = 0x0040,
295 [TFTR] = 0x0048,
296 [FDR] = 0x0050,
297 [RMCR] = 0x0058,
298 [TFUCR] = 0x0064,
299 [RFOCR] = 0x0068,
300 [FCFTR] = 0x0070,
301 [RPADIR] = 0x0078,
302 [TRIMD] = 0x007c,
303 [RBWAR] = 0x00c8,
304 [RDFAR] = 0x00cc,
305 [TBRAR] = 0x00d4,
306 [TDFAR] = 0x00d8,
307};
308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
Sergei Shtylyovd8b04262014-06-03 23:42:26 +0400310 [EDMR] = 0x0000,
311 [EDTRR] = 0x0004,
312 [EDRRR] = 0x0008,
313 [TDLAR] = 0x000c,
314 [RDLAR] = 0x0010,
315 [EESR] = 0x0014,
316 [EESIPR] = 0x0018,
317 [TRSCER] = 0x001c,
318 [RMFCR] = 0x0020,
319 [TFTR] = 0x0024,
320 [FDR] = 0x0028,
321 [RMCR] = 0x002c,
322 [EDOCR] = 0x0030,
323 [FCFTR] = 0x0034,
324 [RPADIR] = 0x0038,
325 [TRIMD] = 0x003c,
326 [RBWAR] = 0x0040,
327 [RDFAR] = 0x0044,
328 [TBRAR] = 0x004c,
329 [TDFAR] = 0x0050,
330
Sergei Shtylyovc0013f62013-03-28 11:48:26 +0000331 [ECMR] = 0x0160,
332 [ECSR] = 0x0164,
333 [ECSIPR] = 0x0168,
334 [PIR] = 0x016c,
335 [MAHR] = 0x0170,
336 [MALR] = 0x0174,
337 [RFLR] = 0x0178,
338 [PSR] = 0x017c,
339 [TROCR] = 0x0180,
340 [CDCR] = 0x0184,
341 [LCCR] = 0x0188,
342 [CNDCR] = 0x018c,
343 [CEFCR] = 0x0194,
344 [FRECR] = 0x0198,
345 [TSFRCR] = 0x019c,
346 [TLFRCR] = 0x01a0,
347 [RFCR] = 0x01a4,
348 [MAFCR] = 0x01a8,
349 [IPGR] = 0x01b4,
350 [APR] = 0x01b8,
351 [MPR] = 0x01bc,
352 [TPAUSER] = 0x01c4,
353 [BCFR] = 0x01cc,
354
355 [ARSTR] = 0x0000,
356 [TSU_CTRST] = 0x0004,
357 [TSU_FWEN0] = 0x0010,
358 [TSU_FWEN1] = 0x0014,
359 [TSU_FCM] = 0x0018,
360 [TSU_BSYSL0] = 0x0020,
361 [TSU_BSYSL1] = 0x0024,
362 [TSU_PRISL0] = 0x0028,
363 [TSU_PRISL1] = 0x002c,
364 [TSU_FWSL0] = 0x0030,
365 [TSU_FWSL1] = 0x0034,
366 [TSU_FWSLC] = 0x0038,
367 [TSU_QTAGM0] = 0x0040,
368 [TSU_QTAGM1] = 0x0044,
369 [TSU_ADQT0] = 0x0048,
370 [TSU_ADQT1] = 0x004c,
371 [TSU_FWSR] = 0x0050,
372 [TSU_FWINMK] = 0x0054,
373 [TSU_ADSBSY] = 0x0060,
374 [TSU_TEN] = 0x0064,
375 [TSU_POST1] = 0x0070,
376 [TSU_POST2] = 0x0074,
377 [TSU_POST3] = 0x0078,
378 [TSU_POST4] = 0x007c,
379
380 [TXNLCR0] = 0x0080,
381 [TXALCR0] = 0x0084,
382 [RXNLCR0] = 0x0088,
383 [RXALCR0] = 0x008c,
384 [FWNLCR0] = 0x0090,
385 [FWALCR0] = 0x0094,
386 [TXNLCR1] = 0x00a0,
387 [TXALCR1] = 0x00a0,
388 [RXNLCR1] = 0x00a8,
389 [RXALCR1] = 0x00ac,
390 [FWNLCR1] = 0x00b0,
391 [FWALCR1] = 0x00b4,
392
393 [TSU_ADRH0] = 0x0100,
394 [TSU_ADRL0] = 0x0104,
395 [TSU_ADRL31] = 0x01fc,
396};
397
Simon Horman504c8ca2014-01-17 09:22:27 +0900398static bool sh_eth_is_gether(struct sh_eth_private *mdp)
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000399{
Simon Horman504c8ca2014-01-17 09:22:27 +0900400 return mdp->reg_offset == sh_eth_offset_gigabit;
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000401}
402
Simon Hormandb893472014-01-17 09:22:28 +0900403static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
404{
405 return mdp->reg_offset == sh_eth_offset_fast_rz;
406}
407
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400408static void sh_eth_select_mii(struct net_device *ndev)
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000409{
410 u32 value = 0x0;
411 struct sh_eth_private *mdp = netdev_priv(ndev);
412
413 switch (mdp->phy_interface) {
414 case PHY_INTERFACE_MODE_GMII:
415 value = 0x2;
416 break;
417 case PHY_INTERFACE_MODE_MII:
418 value = 0x1;
419 break;
420 case PHY_INTERFACE_MODE_RMII:
421 value = 0x0;
422 break;
423 default:
Sergei Shtylyovf75f14e2014-03-15 03:27:54 +0300424 netdev_warn(ndev,
425 "PHY interface mode was not setup. Set to MII.\n");
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000426 value = 0x1;
427 break;
428 }
429
430 sh_eth_write(ndev, value, RMII_MII);
431}
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000432
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400433static void sh_eth_set_duplex(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000434{
435 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000436
437 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000438 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000439 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000440 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000441}
442
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000443/* There is CPU dependent code */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000444static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000445{
446 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000447
448 switch (mdp->speed) {
449 case 10: /* 10BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000450 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000451 break;
452 case 100:/* 100BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000453 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
454 break;
455 default:
456 break;
457 }
458}
459
Sergei Shtylyov674853b2013-04-27 10:44:24 +0000460/* R8A7778/9 */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000461static struct sh_eth_cpu_data r8a777x_data = {
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000462 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000463 .set_rate = sh_eth_set_rate_r8a777x,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000464
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400465 .register_type = SH_ETH_REG_FAST_RCAR,
466
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000467 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
468 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
469 .eesipr_value = 0x01ff009f,
470
471 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400472 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
473 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
474 EESR_ECI,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000475
476 .apr = 1,
477 .mpr = 1,
478 .tpauser = 1,
479 .hw_swap = 1,
480};
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000481
Sergei Shtylyov94a12b12013-12-08 02:59:18 +0300482/* R8A7790/1 */
483static struct sh_eth_cpu_data r8a779x_data = {
Simon Hormane18dbf72013-07-23 10:18:05 +0900484 .set_duplex = sh_eth_set_duplex,
485 .set_rate = sh_eth_set_rate_r8a777x,
486
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400487 .register_type = SH_ETH_REG_FAST_RCAR,
488
Simon Hormane18dbf72013-07-23 10:18:05 +0900489 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
490 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
491 .eesipr_value = 0x01ff009f,
492
493 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Laurent Pinchartba361cb2013-07-31 16:42:11 +0900494 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
495 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
496 EESR_ECI,
Simon Hormane18dbf72013-07-23 10:18:05 +0900497
498 .apr = 1,
499 .mpr = 1,
500 .tpauser = 1,
501 .hw_swap = 1,
502 .rmiimode = 1,
Kouei Abefd9af072013-08-30 12:41:08 +0900503 .shift_rd0 = 1,
Simon Hormane18dbf72013-07-23 10:18:05 +0900504};
505
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000506static void sh_eth_set_rate_sh7724(struct net_device *ndev)
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000507{
508 struct sh_eth_private *mdp = netdev_priv(ndev);
509
510 switch (mdp->speed) {
511 case 10: /* 10BASE */
512 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
513 break;
514 case 100:/* 100BASE */
515 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000516 break;
517 default:
518 break;
519 }
520}
521
522/* SH7724 */
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000523static struct sh_eth_cpu_data sh7724_data = {
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000524 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000525 .set_rate = sh_eth_set_rate_sh7724,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000526
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400527 .register_type = SH_ETH_REG_FAST_SH4,
528
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000529 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
530 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
Sergei Shtylyova80c3de2013-06-20 02:24:54 +0400531 .eesipr_value = 0x01ff009f,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000532
533 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400534 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
535 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
536 EESR_ECI,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000537
538 .apr = 1,
539 .mpr = 1,
540 .tpauser = 1,
541 .hw_swap = 1,
Magnus Damm503914c2009-12-15 21:16:55 -0800542 .rpadir = 1,
543 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000544};
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000545
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000546static void sh_eth_set_rate_sh7757(struct net_device *ndev)
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000547{
548 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000549
550 switch (mdp->speed) {
551 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000552 sh_eth_write(ndev, 0, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000553 break;
554 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000555 sh_eth_write(ndev, 1, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000556 break;
557 default:
558 break;
559 }
560}
561
562/* SH7757 */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000563static struct sh_eth_cpu_data sh7757_data = {
564 .set_duplex = sh_eth_set_duplex,
565 .set_rate = sh_eth_set_rate_sh7757,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000566
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400567 .register_type = SH_ETH_REG_FAST_SH4,
568
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000569 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000570
571 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400572 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
573 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
574 EESR_ECI,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000575
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000576 .irq_flags = IRQF_SHARED,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000577 .apr = 1,
578 .mpr = 1,
579 .tpauser = 1,
580 .hw_swap = 1,
581 .no_ade = 1,
Yoshihiro Shimoda2e98e792011-07-05 20:33:57 +0000582 .rpadir = 1,
583 .rpadir_value = 2 << 16,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000584};
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000585
David S. Millere403d292013-06-07 23:40:41 -0700586#define SH_GIGA_ETH_BASE 0xfee00000UL
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000587#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
588#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
589static void sh_eth_chip_reset_giga(struct net_device *ndev)
590{
591 int i;
592 unsigned long mahr[2], malr[2];
593
594 /* save MAHR and MALR */
595 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000596 malr[i] = ioread32((void *)GIGA_MALR(i));
597 mahr[i] = ioread32((void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000598 }
599
600 /* reset device */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000601 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000602 mdelay(1);
603
604 /* restore MAHR and MALR */
605 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000606 iowrite32(malr[i], (void *)GIGA_MALR(i));
607 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000608 }
609}
610
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000611static void sh_eth_set_rate_giga(struct net_device *ndev)
612{
613 struct sh_eth_private *mdp = netdev_priv(ndev);
614
615 switch (mdp->speed) {
616 case 10: /* 10BASE */
617 sh_eth_write(ndev, 0x00000000, GECMR);
618 break;
619 case 100:/* 100BASE */
620 sh_eth_write(ndev, 0x00000010, GECMR);
621 break;
622 case 1000: /* 1000BASE */
623 sh_eth_write(ndev, 0x00000020, GECMR);
624 break;
625 default:
626 break;
627 }
628}
629
630/* SH7757(GETHERC) */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000631static struct sh_eth_cpu_data sh7757_data_giga = {
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000632 .chip_reset = sh_eth_chip_reset_giga,
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000633 .set_duplex = sh_eth_set_duplex,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000634 .set_rate = sh_eth_set_rate_giga,
635
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400636 .register_type = SH_ETH_REG_GIGABIT,
637
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000638 .ecsr_value = ECSR_ICD | ECSR_MPD,
639 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
640 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
641
642 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400643 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
644 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
645 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000646 .fdr_value = 0x0000072f,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000647
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000648 .irq_flags = IRQF_SHARED,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000649 .apr = 1,
650 .mpr = 1,
651 .tpauser = 1,
652 .bculr = 1,
653 .hw_swap = 1,
654 .rpadir = 1,
655 .rpadir_value = 2 << 16,
656 .no_trimd = 1,
657 .no_ade = 1,
Yoshihiro Shimoda3acbc972012-02-15 17:54:51 +0000658 .tsu = 1,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000659};
660
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000661static void sh_eth_chip_reset(struct net_device *ndev)
662{
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000663 struct sh_eth_private *mdp = netdev_priv(ndev);
664
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000665 /* reset device */
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000666 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000667 mdelay(1);
668}
669
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000670static void sh_eth_set_rate_gether(struct net_device *ndev)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000671{
672 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000673
674 switch (mdp->speed) {
675 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000676 sh_eth_write(ndev, GECMR_10, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000677 break;
678 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000679 sh_eth_write(ndev, GECMR_100, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000680 break;
681 case 1000: /* 1000BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000682 sh_eth_write(ndev, GECMR_1000, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000683 break;
684 default:
685 break;
686 }
687}
688
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000689/* SH7734 */
690static struct sh_eth_cpu_data sh7734_data = {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000691 .chip_reset = sh_eth_chip_reset,
692 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000693 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000694
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400695 .register_type = SH_ETH_REG_GIGABIT,
696
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000697 .ecsr_value = ECSR_ICD | ECSR_MPD,
698 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
699 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
700
701 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400702 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
703 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
704 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000705
706 .apr = 1,
707 .mpr = 1,
708 .tpauser = 1,
709 .bculr = 1,
710 .hw_swap = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000711 .no_trimd = 1,
712 .no_ade = 1,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000713 .tsu = 1,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000714 .hw_crc = 1,
715 .select_mii = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000716};
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000717
718/* SH7763 */
719static struct sh_eth_cpu_data sh7763_data = {
720 .chip_reset = sh_eth_chip_reset,
721 .set_duplex = sh_eth_set_duplex,
722 .set_rate = sh_eth_set_rate_gether,
723
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400724 .register_type = SH_ETH_REG_GIGABIT,
725
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000726 .ecsr_value = ECSR_ICD | ECSR_MPD,
727 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
728 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
729
730 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300731 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
732 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000733 EESR_ECI,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000734
735 .apr = 1,
736 .mpr = 1,
737 .tpauser = 1,
738 .bculr = 1,
739 .hw_swap = 1,
740 .no_trimd = 1,
741 .no_ade = 1,
742 .tsu = 1,
743 .irq_flags = IRQF_SHARED,
744};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000745
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000746static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000747{
748 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000749
750 /* reset device */
751 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
752 mdelay(1);
753
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000754 sh_eth_select_mii(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000755}
756
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000757/* R8A7740 */
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000758static struct sh_eth_cpu_data r8a7740_data = {
759 .chip_reset = sh_eth_chip_reset_r8a7740,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000760 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000761 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000762
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400763 .register_type = SH_ETH_REG_GIGABIT,
764
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000765 .ecsr_value = ECSR_ICD | ECSR_MPD,
766 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
767 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
768
769 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400770 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
771 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
772 EESR_TDE | EESR_ECI,
Simon Hormancc235282013-10-10 14:51:16 +0900773 .fdr_value = 0x0000070f,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000774
775 .apr = 1,
776 .mpr = 1,
777 .tpauser = 1,
778 .bculr = 1,
779 .hw_swap = 1,
Simon Hormancc235282013-10-10 14:51:16 +0900780 .rpadir = 1,
781 .rpadir_value = 2 << 16,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000782 .no_trimd = 1,
783 .no_ade = 1,
784 .tsu = 1,
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000785 .select_mii = 1,
Sergei Shtylyovac8025a2013-06-13 22:12:45 +0400786 .shift_rd0 = 1,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000787};
788
Simon Hormandb893472014-01-17 09:22:28 +0900789/* R7S72100 */
790static struct sh_eth_cpu_data r7s72100_data = {
791 .chip_reset = sh_eth_chip_reset,
792 .set_duplex = sh_eth_set_duplex,
793
794 .register_type = SH_ETH_REG_FAST_RZ,
795
796 .ecsr_value = ECSR_ICD,
797 .ecsipr_value = ECSIPR_ICDIP,
798 .eesipr_value = 0xff7f009f,
799
800 .tx_check = EESR_TC1 | EESR_FTC,
801 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
802 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
803 EESR_TDE | EESR_ECI,
804 .fdr_value = 0x0000070f,
Simon Hormandb893472014-01-17 09:22:28 +0900805
806 .no_psr = 1,
807 .apr = 1,
808 .mpr = 1,
809 .tpauser = 1,
810 .hw_swap = 1,
811 .rpadir = 1,
812 .rpadir_value = 2 << 16,
813 .no_trimd = 1,
814 .no_ade = 1,
815 .hw_crc = 1,
816 .tsu = 1,
817 .shift_rd0 = 1,
818};
819
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +0000820static struct sh_eth_cpu_data sh7619_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400821 .register_type = SH_ETH_REG_FAST_SH3_SH2,
822
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000823 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
824
825 .apr = 1,
826 .mpr = 1,
827 .tpauser = 1,
828 .hw_swap = 1,
829};
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +0000830
831static struct sh_eth_cpu_data sh771x_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400832 .register_type = SH_ETH_REG_FAST_SH3_SH2,
833
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000834 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000835 .tsu = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000836};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000837
838static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
839{
840 if (!cd->ecsr_value)
841 cd->ecsr_value = DEFAULT_ECSR_INIT;
842
843 if (!cd->ecsipr_value)
844 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
845
846 if (!cd->fcftr_value)
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300847 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000848 DEFAULT_FIFO_F_D_RFD;
849
850 if (!cd->fdr_value)
851 cd->fdr_value = DEFAULT_FDR_INIT;
852
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000853 if (!cd->tx_check)
854 cd->tx_check = DEFAULT_TX_CHECK;
855
856 if (!cd->eesr_err_check)
857 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000858}
859
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000860static int sh_eth_check_reset(struct net_device *ndev)
861{
862 int ret = 0;
863 int cnt = 100;
864
865 while (cnt > 0) {
866 if (!(sh_eth_read(ndev, EDMR) & 0x3))
867 break;
868 mdelay(1);
869 cnt--;
870 }
Sergei Shtylyov9f8c4262013-06-05 23:54:01 +0400871 if (cnt <= 0) {
Sergei Shtylyovf75f14e2014-03-15 03:27:54 +0300872 netdev_err(ndev, "Device reset failed\n");
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000873 ret = -ETIMEDOUT;
874 }
875 return ret;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000876}
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000877
878static int sh_eth_reset(struct net_device *ndev)
879{
880 struct sh_eth_private *mdp = netdev_priv(ndev);
881 int ret = 0;
882
Simon Hormandb893472014-01-17 09:22:28 +0900883 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000884 sh_eth_write(ndev, EDSR_ENALL, EDSR);
885 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
886 EDMR);
887
888 ret = sh_eth_check_reset(ndev);
889 if (ret)
Laurent Pinchartf738a132014-03-20 15:00:35 +0100890 return ret;
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000891
892 /* Table Init */
893 sh_eth_write(ndev, 0x0, TDLAR);
894 sh_eth_write(ndev, 0x0, TDFAR);
895 sh_eth_write(ndev, 0x0, TDFXR);
896 sh_eth_write(ndev, 0x0, TDFFR);
897 sh_eth_write(ndev, 0x0, RDLAR);
898 sh_eth_write(ndev, 0x0, RDFAR);
899 sh_eth_write(ndev, 0x0, RDFXR);
900 sh_eth_write(ndev, 0x0, RDFFR);
901
902 /* Reset HW CRC register */
903 if (mdp->cd->hw_crc)
904 sh_eth_write(ndev, 0x0, CSMR);
905
906 /* Select MII mode */
907 if (mdp->cd->select_mii)
908 sh_eth_select_mii(ndev);
909 } else {
910 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
911 EDMR);
912 mdelay(3);
913 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
914 EDMR);
915 }
916
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000917 return ret;
918}
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000919
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000920static void sh_eth_set_receive_align(struct sk_buff *skb)
921{
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +0900922 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000923
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000924 if (reserve)
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +0900925 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000926}
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000927
928
Yoshinori Sato71557a32008-08-06 19:49:00 -0400929/* CPU <-> EDMAC endian convert */
930static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
931{
932 switch (mdp->edmac_endian) {
933 case EDMAC_LITTLE_ENDIAN:
934 return cpu_to_le32(x);
935 case EDMAC_BIG_ENDIAN:
936 return cpu_to_be32(x);
937 }
938 return x;
939}
940
941static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
942{
943 switch (mdp->edmac_endian) {
944 case EDMAC_LITTLE_ENDIAN:
945 return le32_to_cpu(x);
946 case EDMAC_BIG_ENDIAN:
947 return be32_to_cpu(x);
948 }
949 return x;
950}
951
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300952/* Program the hardware MAC address from dev->dev_addr. */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700953static void update_mac_address(struct net_device *ndev)
954{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000955 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300956 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
957 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000958 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300959 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700960}
961
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300962/* Get MAC address from SuperH MAC address register
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700963 *
964 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
965 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
966 * When you want use this device, you must set MAC address in bootloader.
967 *
968 */
Magnus Damm748031f2009-10-09 00:17:14 +0000969static void read_mac_address(struct net_device *ndev, unsigned char *mac)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700970{
Magnus Damm748031f2009-10-09 00:17:14 +0000971 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
Joe Perchesd458cdf2013-10-01 19:04:40 -0700972 memcpy(ndev->dev_addr, mac, ETH_ALEN);
Magnus Damm748031f2009-10-09 00:17:14 +0000973 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000974 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
975 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
976 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
977 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
978 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
979 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
Magnus Damm748031f2009-10-09 00:17:14 +0000980 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700981}
982
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000983static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
984{
Simon Hormandb893472014-01-17 09:22:28 +0900985 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000986 return EDTRR_TRNS_GETHER;
987 else
988 return EDTRR_TRNS_ETHER;
989}
990
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700991struct bb_info {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000992 void (*set_gate)(void *addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700993 struct mdiobb_ctrl ctrl;
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000994 void *addr;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700995 u32 mmd_msk;/* MMD */
996 u32 mdo_msk;
997 u32 mdi_msk;
998 u32 mdc_msk;
999};
1000
1001/* PHY bit set */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001002static void bb_set(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001003{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001004 iowrite32(ioread32(addr) | msk, addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001005}
1006
1007/* PHY bit clear */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001008static void bb_clr(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001009{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001010 iowrite32((ioread32(addr) & ~msk), addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001011}
1012
1013/* PHY bit read */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001014static int bb_read(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001015{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001016 return (ioread32(addr) & msk) != 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001017}
1018
1019/* Data I/O pin control */
1020static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1021{
1022 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001023
1024 if (bitbang->set_gate)
1025 bitbang->set_gate(bitbang->addr);
1026
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001027 if (bit)
1028 bb_set(bitbang->addr, bitbang->mmd_msk);
1029 else
1030 bb_clr(bitbang->addr, bitbang->mmd_msk);
1031}
1032
1033/* Set bit data*/
1034static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1035{
1036 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1037
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001038 if (bitbang->set_gate)
1039 bitbang->set_gate(bitbang->addr);
1040
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001041 if (bit)
1042 bb_set(bitbang->addr, bitbang->mdo_msk);
1043 else
1044 bb_clr(bitbang->addr, bitbang->mdo_msk);
1045}
1046
1047/* Get bit data*/
1048static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1049{
1050 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001051
1052 if (bitbang->set_gate)
1053 bitbang->set_gate(bitbang->addr);
1054
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001055 return bb_read(bitbang->addr, bitbang->mdi_msk);
1056}
1057
1058/* MDC pin control */
1059static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1060{
1061 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1062
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001063 if (bitbang->set_gate)
1064 bitbang->set_gate(bitbang->addr);
1065
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001066 if (bit)
1067 bb_set(bitbang->addr, bitbang->mdc_msk);
1068 else
1069 bb_clr(bitbang->addr, bitbang->mdc_msk);
1070}
1071
1072/* mdio bus control struct */
1073static struct mdiobb_ops bb_ops = {
1074 .owner = THIS_MODULE,
1075 .set_mdc = sh_mdc_ctrl,
1076 .set_mdio_dir = sh_mmd_ctrl,
1077 .set_mdio_data = sh_set_mdio,
1078 .get_mdio_data = sh_get_mdio,
1079};
1080
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001081/* free skb and descriptor buffer */
1082static void sh_eth_ring_free(struct net_device *ndev)
1083{
1084 struct sh_eth_private *mdp = netdev_priv(ndev);
1085 int i;
1086
1087 /* Free Rx skb ringbuffer */
1088 if (mdp->rx_skbuff) {
Sergei Shtylyov179d80a2014-06-28 04:10:00 +04001089 for (i = 0; i < mdp->num_rx_ring; i++)
1090 dev_kfree_skb(mdp->rx_skbuff[i]);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001091 }
1092 kfree(mdp->rx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001093 mdp->rx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001094
1095 /* Free Tx skb ringbuffer */
1096 if (mdp->tx_skbuff) {
Sergei Shtylyov179d80a2014-06-28 04:10:00 +04001097 for (i = 0; i < mdp->num_tx_ring; i++)
1098 dev_kfree_skb(mdp->tx_skbuff[i]);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001099 }
1100 kfree(mdp->tx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001101 mdp->tx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001102}
1103
1104/* format skb and descriptor buffer */
1105static void sh_eth_ring_format(struct net_device *ndev)
1106{
1107 struct sh_eth_private *mdp = netdev_priv(ndev);
1108 int i;
1109 struct sk_buff *skb;
1110 struct sh_eth_rxdesc *rxdesc = NULL;
1111 struct sh_eth_txdesc *txdesc = NULL;
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001112 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1113 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +09001114 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001115
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001116 mdp->cur_rx = 0;
1117 mdp->cur_tx = 0;
1118 mdp->dirty_rx = 0;
1119 mdp->dirty_tx = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001120
1121 memset(mdp->rx_ring, 0, rx_ringsize);
1122
1123 /* build Rx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001124 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001125 /* skb */
1126 mdp->rx_skbuff[i] = NULL;
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +09001127 skb = netdev_alloc_skb(ndev, skbuff_size);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001128 mdp->rx_skbuff[i] = skb;
1129 if (skb == NULL)
1130 break;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001131 sh_eth_set_receive_align(skb);
1132
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001133 /* RX descriptor */
1134 rxdesc = &mdp->rx_ring[i];
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +09001135 /* The size of the buffer is a multiple of 16 bytes. */
1136 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1137 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
1138 DMA_FROM_DEVICE);
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001139 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Yoshinori Sato71557a32008-08-06 19:49:00 -04001140 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001141
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001142 /* Rx descriptor address set */
1143 if (i == 0) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001144 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
Simon Hormandb893472014-01-17 09:22:28 +09001145 if (sh_eth_is_gether(mdp) ||
1146 sh_eth_is_rz_fast_ether(mdp))
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001147 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001148 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001149 }
1150
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001151 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001152
1153 /* Mark the last entry as wrapping the ring. */
Yoshinori Sato71557a32008-08-06 19:49:00 -04001154 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001155
1156 memset(mdp->tx_ring, 0, tx_ringsize);
1157
1158 /* build Tx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001159 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001160 mdp->tx_skbuff[i] = NULL;
1161 txdesc = &mdp->tx_ring[i];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001162 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001163 txdesc->buffer_length = 0;
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001164 if (i == 0) {
Yoshinori Sato71557a32008-08-06 19:49:00 -04001165 /* Tx descriptor address set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001166 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
Simon Hormandb893472014-01-17 09:22:28 +09001167 if (sh_eth_is_gether(mdp) ||
1168 sh_eth_is_rz_fast_ether(mdp))
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001169 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001170 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001171 }
1172
Yoshinori Sato71557a32008-08-06 19:49:00 -04001173 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001174}
1175
1176/* Get skb and descriptor buffer */
1177static int sh_eth_ring_init(struct net_device *ndev)
1178{
1179 struct sh_eth_private *mdp = netdev_priv(ndev);
1180 int rx_ringsize, tx_ringsize, ret = 0;
1181
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001182 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001183 * card needs room to do 8 byte alignment, +2 so we can reserve
1184 * the first 2 bytes, and +16 gets room for the status word from the
1185 * card.
1186 */
1187 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1188 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
Magnus Damm503914c2009-12-15 21:16:55 -08001189 if (mdp->cd->rpadir)
1190 mdp->rx_buf_sz += NET_IP_ALIGN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001191
1192 /* Allocate RX and TX skb rings */
Joe Perchesb2adaca2013-02-03 17:43:58 +00001193 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1194 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001195 if (!mdp->rx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001196 ret = -ENOMEM;
1197 return ret;
1198 }
1199
Joe Perchesb2adaca2013-02-03 17:43:58 +00001200 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1201 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001202 if (!mdp->tx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001203 ret = -ENOMEM;
1204 goto skb_ring_free;
1205 }
1206
1207 /* Allocate all Rx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001208 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001209 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001210 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001211 if (!mdp->rx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001212 ret = -ENOMEM;
1213 goto desc_ring_free;
1214 }
1215
1216 mdp->dirty_rx = 0;
1217
1218 /* Allocate all Tx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001219 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001220 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001221 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001222 if (!mdp->tx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001223 ret = -ENOMEM;
1224 goto desc_ring_free;
1225 }
1226 return ret;
1227
1228desc_ring_free:
1229 /* free DMA buffer */
1230 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1231
1232skb_ring_free:
1233 /* Free Rx and Tx skb ring buffer */
1234 sh_eth_ring_free(ndev);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001235 mdp->tx_ring = NULL;
1236 mdp->rx_ring = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001237
1238 return ret;
1239}
1240
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001241static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1242{
1243 int ringsize;
1244
1245 if (mdp->rx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001246 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001247 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1248 mdp->rx_desc_dma);
1249 mdp->rx_ring = NULL;
1250 }
1251
1252 if (mdp->tx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001253 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001254 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1255 mdp->tx_desc_dma);
1256 mdp->tx_ring = NULL;
1257 }
1258}
1259
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001260static int sh_eth_dev_init(struct net_device *ndev, bool start)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001261{
1262 int ret = 0;
1263 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001264 u32 val;
1265
1266 /* Soft Reset */
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001267 ret = sh_eth_reset(ndev);
1268 if (ret)
Laurent Pinchartf738a132014-03-20 15:00:35 +01001269 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001270
Simon Horman55754f12013-07-23 10:18:04 +09001271 if (mdp->cd->rmiimode)
1272 sh_eth_write(ndev, 0x1, RMIIMODE);
1273
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001274 /* Descriptor format */
1275 sh_eth_ring_format(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001276 if (mdp->cd->rpadir)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001277 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001278
1279 /* all sh_eth int mask */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001280 sh_eth_write(ndev, 0, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001281
Yoshihiro Shimoda10b91942012-03-29 19:32:08 +00001282#if defined(__LITTLE_ENDIAN)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001283 if (mdp->cd->hw_swap)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001284 sh_eth_write(ndev, EDMR_EL, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001285 else
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001286#endif
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001287 sh_eth_write(ndev, 0, EDMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001288
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001289 /* FIFO size set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001290 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1291 sh_eth_write(ndev, 0, TFTR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001292
Ben Dooks530aa2d2014-06-03 12:21:13 +01001293 /* Frame recv control (enable multiple-packets per rx irq) */
1294 sh_eth_write(ndev, RMCR_RNC, RMCR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001295
Yoshihiro Shimoda2ecbb782012-06-26 19:59:58 +00001296 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001297
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001298 if (mdp->cd->bculr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001299 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001300
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001301 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001302
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001303 if (!mdp->cd->no_trimd)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001304 sh_eth_write(ndev, 0, TRIMD);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001305
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001306 /* Recv frame limit set register */
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +00001307 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1308 RFLR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001309
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001310 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001311 if (start)
1312 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001313
1314 /* PAUSE Prohibition */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001315 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001316 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1317
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001318 sh_eth_write(ndev, val, ECMR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001319
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001320 if (mdp->cd->set_rate)
1321 mdp->cd->set_rate(ndev);
1322
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001323 /* E-MAC Status Register clear */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001324 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001325
1326 /* E-MAC Interrupt Enable register */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001327 if (start)
1328 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001329
1330 /* Set MAC address */
1331 update_mac_address(ndev);
1332
1333 /* mask reset */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001334 if (mdp->cd->apr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001335 sh_eth_write(ndev, APR_AP, APR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001336 if (mdp->cd->mpr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001337 sh_eth_write(ndev, MPR_MP, MPR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001338 if (mdp->cd->tpauser)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001339 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001340
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001341 if (start) {
1342 /* Setting the Rx mode will start the Rx process. */
1343 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001344
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001345 netif_start_queue(ndev);
1346 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001347
1348 return ret;
1349}
1350
1351/* free Tx skb function */
1352static int sh_eth_txfree(struct net_device *ndev)
1353{
1354 struct sh_eth_private *mdp = netdev_priv(ndev);
1355 struct sh_eth_txdesc *txdesc;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001356 int free_num = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001357 int entry = 0;
1358
1359 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001360 entry = mdp->dirty_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001361 txdesc = &mdp->tx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001362 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001363 break;
1364 /* Free the original skb. */
1365 if (mdp->tx_skbuff[entry]) {
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00001366 dma_unmap_single(&ndev->dev, txdesc->addr,
1367 txdesc->buffer_length, DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001368 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1369 mdp->tx_skbuff[entry] = NULL;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001370 free_num++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001371 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001372 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001373 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001374 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001375
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001376 ndev->stats.tx_packets++;
1377 ndev->stats.tx_bytes += txdesc->buffer_length;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001378 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001379 return free_num;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001380}
1381
1382/* Packet receive function */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001383static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001384{
1385 struct sh_eth_private *mdp = netdev_priv(ndev);
1386 struct sh_eth_rxdesc *rxdesc;
1387
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001388 int entry = mdp->cur_rx % mdp->num_rx_ring;
1389 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001390 struct sk_buff *skb;
1391 u16 pkt_len = 0;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001392 u32 desc_status;
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +09001393 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001394
1395 rxdesc = &mdp->rx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001396 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1397 desc_status = edmac_to_cpu(mdp, rxdesc->status);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001398 pkt_len = rxdesc->frame_length;
1399
1400 if (--boguscnt < 0)
1401 break;
1402
Yoshihiro Shimoda4f809ce2014-06-10 09:40:14 +09001403 if (*quota <= 0)
Sergei Shtylyov37191092013-06-19 23:30:23 +04001404 break;
Yoshihiro Shimoda4f809ce2014-06-10 09:40:14 +09001405
Sergei Shtylyov37191092013-06-19 23:30:23 +04001406 (*quota)--;
1407
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001408 if (!(desc_status & RDFEND))
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001409 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001410
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001411 /* In case of almost all GETHER/ETHERs, the Receive Frame State
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001412 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
Simon Hormandb893472014-01-17 09:22:28 +09001413 * bit 0. However, in case of the R8A7740, R8A779x, and
1414 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1415 * driver needs right shifting by 16.
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001416 */
Sergei Shtylyovac8025a2013-06-13 22:12:45 +04001417 if (mdp->cd->shift_rd0)
1418 desc_status >>= 16;
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001419
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001420 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1421 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001422 ndev->stats.rx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001423 if (desc_status & RD_RFS1)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001424 ndev->stats.rx_crc_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001425 if (desc_status & RD_RFS2)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001426 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001427 if (desc_status & RD_RFS3)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001428 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001429 if (desc_status & RD_RFS4)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001430 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001431 if (desc_status & RD_RFS6)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001432 ndev->stats.rx_missed_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001433 if (desc_status & RD_RFS10)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001434 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001435 } else {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001436 if (!mdp->cd->hw_swap)
1437 sh_eth_soft_swap(
1438 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1439 pkt_len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001440 skb = mdp->rx_skbuff[entry];
1441 mdp->rx_skbuff[entry] = NULL;
Magnus Damm503914c2009-12-15 21:16:55 -08001442 if (mdp->cd->rpadir)
1443 skb_reserve(skb, NET_IP_ALIGN);
Kouei Abe7db8e0c2013-08-30 12:41:07 +09001444 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +09001445 ALIGN(mdp->rx_buf_sz, 16),
Kouei Abe7db8e0c2013-08-30 12:41:07 +09001446 DMA_FROM_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001447 skb_put(skb, pkt_len);
1448 skb->protocol = eth_type_trans(skb, ndev);
Sergei Shtylyova8e9fd02013-09-03 03:03:10 +04001449 netif_receive_skb(skb);
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001450 ndev->stats.rx_packets++;
1451 ndev->stats.rx_bytes += pkt_len;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001452 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001453 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
Yoshihiro Shimoda862df492009-05-24 23:53:40 +00001454 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001455 }
1456
1457 /* Refill the Rx ring buffers. */
1458 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001459 entry = mdp->dirty_rx % mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001460 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001461 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001462 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001463
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001464 if (mdp->rx_skbuff[entry] == NULL) {
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +09001465 skb = netdev_alloc_skb(ndev, skbuff_size);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001466 mdp->rx_skbuff[entry] = skb;
1467 if (skb == NULL)
1468 break; /* Better luck next round. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001469 sh_eth_set_receive_align(skb);
Mitsuhiro Kimura4d6a9492014-11-27 20:34:00 +09001470 dma_map_single(&ndev->dev, skb->data,
1471 rxdesc->buffer_length, DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001472
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001473 skb_checksum_none_assert(skb);
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001474 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001475 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001476 if (entry >= mdp->num_rx_ring - 1)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001477 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001478 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001479 else
1480 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001481 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001482 }
1483
1484 /* Restart Rx engine if stopped. */
1485 /* If we don't need to check status, don't. -KDU */
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001486 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
Yoshihiro Shimodaa18e08b2012-06-20 15:26:34 +00001487 /* fix the values for the next receiving if RDE is set */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001488 if (intr_status & EESR_RDE) {
1489 u32 count = (sh_eth_read(ndev, RDFAR) -
1490 sh_eth_read(ndev, RDLAR)) >> 4;
1491
1492 mdp->cur_rx = count;
1493 mdp->dirty_rx = count;
1494 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001495 sh_eth_write(ndev, EDRRR_R, EDRRR);
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001496 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001497
Yoshihiro Shimoda4f809ce2014-06-10 09:40:14 +09001498 return *quota <= 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001499}
1500
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001501static void sh_eth_rcv_snd_disable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001502{
1503 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001504 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1505 ~(ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001506}
1507
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001508static void sh_eth_rcv_snd_enable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001509{
1510 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001511 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1512 (ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001513}
1514
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001515/* error control function */
1516static void sh_eth_error(struct net_device *ndev, int intr_status)
1517{
1518 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001519 u32 felic_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001520 u32 link_stat;
1521 u32 mask;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001522
1523 if (intr_status & EESR_ECI) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001524 felic_stat = sh_eth_read(ndev, ECSR);
1525 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001526 if (felic_stat & ECSR_ICD)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001527 ndev->stats.tx_carrier_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001528 if (felic_stat & ECSR_LCHNG) {
1529 /* Link Changed */
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001530 if (mdp->cd->no_psr || mdp->no_ether_link) {
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001531 goto ignore_link;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001532 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001533 link_stat = (sh_eth_read(ndev, PSR));
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001534 if (mdp->ether_link_active_low)
1535 link_stat = ~link_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001536 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001537 if (!(link_stat & PHY_ST_LINK)) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001538 sh_eth_rcv_snd_disable(ndev);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001539 } else {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001540 /* Link Up */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001541 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001542 ~DMAC_M_ECI, EESIPR);
1543 /* clear int */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001544 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001545 ECSR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001546 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001547 DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001548 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001549 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001550 }
1551 }
1552 }
1553
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001554ignore_link:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001555 if (intr_status & EESR_TWB) {
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001556 /* Unused write back interrupt */
1557 if (intr_status & EESR_TABT) { /* Transmit Abort int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001558 ndev->stats.tx_aborted_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001559 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001560 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001561 }
1562
1563 if (intr_status & EESR_RABT) {
1564 /* Receive Abort int */
1565 if (intr_status & EESR_RFRMER) {
1566 /* Receive Frame Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001567 ndev->stats.rx_frame_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001568 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001569 }
1570 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001571
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001572 if (intr_status & EESR_TDE) {
1573 /* Transmit Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001574 ndev->stats.tx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001575 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001576 }
1577
1578 if (intr_status & EESR_TFE) {
1579 /* FIFO under flow */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001580 ndev->stats.tx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001581 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001582 }
1583
1584 if (intr_status & EESR_RDE) {
1585 /* Receive Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001586 ndev->stats.rx_over_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001587 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001588 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001589
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001590 if (intr_status & EESR_RFE) {
1591 /* Receive FIFO Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001592 ndev->stats.rx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001593 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001594 }
1595
1596 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1597 /* Address Error */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001598 ndev->stats.tx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001599 netif_err(mdp, tx_err, ndev, "Address Error\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001600 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001601
1602 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1603 if (mdp->cd->no_ade)
1604 mask &= ~EESR_ADE;
1605 if (intr_status & mask) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001606 /* Tx error */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001607 u32 edtrr = sh_eth_read(ndev, EDTRR);
Sergei Shtylyov090d5602014-01-11 02:41:49 +03001608
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001609 /* dmesg */
Sergei Shtylyovda246852014-03-15 03:29:14 +03001610 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1611 intr_status, mdp->cur_tx, mdp->dirty_tx,
1612 (u32)ndev->state, edtrr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001613 /* dirty buffer free */
1614 sh_eth_txfree(ndev);
1615
1616 /* SH7712 BUG */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001617 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001618 /* tx dma start */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001619 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001620 }
1621 /* wakeup */
1622 netif_wake_queue(ndev);
1623 }
1624}
1625
1626static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1627{
1628 struct net_device *ndev = netdev;
1629 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001630 struct sh_eth_cpu_data *cd = mdp->cd;
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001631 irqreturn_t ret = IRQ_NONE;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001632 unsigned long intr_status, intr_enable;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001633
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001634 spin_lock(&mdp->lock);
1635
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001636 /* Get interrupt status */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001637 intr_status = sh_eth_read(ndev, EESR);
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001638 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1639 * enabled since it's the one that comes thru regardless of the mask,
1640 * and we need to fully handle it in sh_eth_error() in order to quench
1641 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1642 */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001643 intr_enable = sh_eth_read(ndev, EESIPR);
1644 intr_status &= intr_enable | DMAC_M_ECI;
1645 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001646 ret = IRQ_HANDLED;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001647 else
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001648 goto other_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001649
Sergei Shtylyov37191092013-06-19 23:30:23 +04001650 if (intr_status & EESR_RX_CHECK) {
1651 if (napi_schedule_prep(&mdp->napi)) {
1652 /* Mask Rx interrupts */
1653 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1654 EESIPR);
1655 __napi_schedule(&mdp->napi);
1656 } else {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001657 netdev_warn(ndev,
1658 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1659 intr_status, intr_enable);
Sergei Shtylyov37191092013-06-19 23:30:23 +04001660 }
1661 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001662
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001663 /* Tx Check */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001664 if (intr_status & cd->tx_check) {
Sergei Shtylyov37191092013-06-19 23:30:23 +04001665 /* Clear Tx interrupts */
1666 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1667
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001668 sh_eth_txfree(ndev);
1669 netif_wake_queue(ndev);
1670 }
1671
Sergei Shtylyov37191092013-06-19 23:30:23 +04001672 if (intr_status & cd->eesr_err_check) {
1673 /* Clear error interrupts */
1674 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1675
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001676 sh_eth_error(ndev, intr_status);
Sergei Shtylyov37191092013-06-19 23:30:23 +04001677 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001678
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001679other_irq:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001680 spin_unlock(&mdp->lock);
1681
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001682 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001683}
1684
Sergei Shtylyov37191092013-06-19 23:30:23 +04001685static int sh_eth_poll(struct napi_struct *napi, int budget)
1686{
1687 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1688 napi);
1689 struct net_device *ndev = napi->dev;
1690 int quota = budget;
1691 unsigned long intr_status;
1692
1693 for (;;) {
1694 intr_status = sh_eth_read(ndev, EESR);
1695 if (!(intr_status & EESR_RX_CHECK))
1696 break;
1697 /* Clear Rx interrupts */
1698 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1699
1700 if (sh_eth_rx(ndev, intr_status, &quota))
1701 goto out;
1702 }
1703
1704 napi_complete(napi);
1705
1706 /* Reenable Rx interrupts */
1707 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1708out:
1709 return budget - quota;
1710}
1711
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001712/* PHY state control function */
1713static void sh_eth_adjust_link(struct net_device *ndev)
1714{
1715 struct sh_eth_private *mdp = netdev_priv(ndev);
1716 struct phy_device *phydev = mdp->phydev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001717 int new_state = 0;
1718
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001719 if (phydev->link) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001720 if (phydev->duplex != mdp->duplex) {
1721 new_state = 1;
1722 mdp->duplex = phydev->duplex;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001723 if (mdp->cd->set_duplex)
1724 mdp->cd->set_duplex(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001725 }
1726
1727 if (phydev->speed != mdp->speed) {
1728 new_state = 1;
1729 mdp->speed = phydev->speed;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001730 if (mdp->cd->set_rate)
1731 mdp->cd->set_rate(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001732 }
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001733 if (!mdp->link) {
Yoshihiro Shimoda91a56152011-07-05 20:33:51 +00001734 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001735 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1736 ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001737 new_state = 1;
1738 mdp->link = phydev->link;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001739 if (mdp->cd->no_psr || mdp->no_ether_link)
1740 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001741 }
1742 } else if (mdp->link) {
1743 new_state = 1;
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001744 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001745 mdp->speed = 0;
1746 mdp->duplex = -1;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001747 if (mdp->cd->no_psr || mdp->no_ether_link)
1748 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001749 }
1750
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001751 if (new_state && netif_msg_link(mdp))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001752 phy_print_status(phydev);
1753}
1754
1755/* PHY init function */
1756static int sh_eth_phy_init(struct net_device *ndev)
1757{
Ben Dooks702eca02014-03-12 17:47:40 +00001758 struct device_node *np = ndev->dev.parent->of_node;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001759 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001760 struct phy_device *phydev = NULL;
1761
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001762 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001763 mdp->speed = 0;
1764 mdp->duplex = -1;
1765
1766 /* Try connect to PHY */
Ben Dooks702eca02014-03-12 17:47:40 +00001767 if (np) {
1768 struct device_node *pn;
1769
1770 pn = of_parse_phandle(np, "phy-handle", 0);
1771 phydev = of_phy_connect(ndev, pn,
1772 sh_eth_adjust_link, 0,
1773 mdp->phy_interface);
1774
1775 if (!phydev)
1776 phydev = ERR_PTR(-ENOENT);
1777 } else {
1778 char phy_id[MII_BUS_ID_SIZE + 3];
1779
1780 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1781 mdp->mii_bus->id, mdp->phy_id);
1782
1783 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1784 mdp->phy_interface);
1785 }
1786
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001787 if (IS_ERR(phydev)) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001788 netdev_err(ndev, "failed to connect PHY\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001789 return PTR_ERR(phydev);
1790 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001791
Sergei Shtylyovda246852014-03-15 03:29:14 +03001792 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1793 phydev->addr, phydev->irq, phydev->drv->name);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001794
1795 mdp->phydev = phydev;
1796
1797 return 0;
1798}
1799
1800/* PHY control start function */
1801static int sh_eth_phy_start(struct net_device *ndev)
1802{
1803 struct sh_eth_private *mdp = netdev_priv(ndev);
1804 int ret;
1805
1806 ret = sh_eth_phy_init(ndev);
1807 if (ret)
1808 return ret;
1809
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001810 phy_start(mdp->phydev);
1811
1812 return 0;
1813}
1814
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001815static int sh_eth_get_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001816 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001817{
1818 struct sh_eth_private *mdp = netdev_priv(ndev);
1819 unsigned long flags;
1820 int ret;
1821
1822 spin_lock_irqsave(&mdp->lock, flags);
1823 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1824 spin_unlock_irqrestore(&mdp->lock, flags);
1825
1826 return ret;
1827}
1828
1829static int sh_eth_set_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001830 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001831{
1832 struct sh_eth_private *mdp = netdev_priv(ndev);
1833 unsigned long flags;
1834 int ret;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001835
1836 spin_lock_irqsave(&mdp->lock, flags);
1837
1838 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001839 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001840
1841 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1842 if (ret)
1843 goto error_exit;
1844
1845 if (ecmd->duplex == DUPLEX_FULL)
1846 mdp->duplex = 1;
1847 else
1848 mdp->duplex = 0;
1849
1850 if (mdp->cd->set_duplex)
1851 mdp->cd->set_duplex(ndev);
1852
1853error_exit:
1854 mdelay(1);
1855
1856 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001857 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001858
1859 spin_unlock_irqrestore(&mdp->lock, flags);
1860
1861 return ret;
1862}
1863
1864static int sh_eth_nway_reset(struct net_device *ndev)
1865{
1866 struct sh_eth_private *mdp = netdev_priv(ndev);
1867 unsigned long flags;
1868 int ret;
1869
1870 spin_lock_irqsave(&mdp->lock, flags);
1871 ret = phy_start_aneg(mdp->phydev);
1872 spin_unlock_irqrestore(&mdp->lock, flags);
1873
1874 return ret;
1875}
1876
1877static u32 sh_eth_get_msglevel(struct net_device *ndev)
1878{
1879 struct sh_eth_private *mdp = netdev_priv(ndev);
1880 return mdp->msg_enable;
1881}
1882
1883static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1884{
1885 struct sh_eth_private *mdp = netdev_priv(ndev);
1886 mdp->msg_enable = value;
1887}
1888
1889static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1890 "rx_current", "tx_current",
1891 "rx_dirty", "tx_dirty",
1892};
1893#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1894
1895static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1896{
1897 switch (sset) {
1898 case ETH_SS_STATS:
1899 return SH_ETH_STATS_LEN;
1900 default:
1901 return -EOPNOTSUPP;
1902 }
1903}
1904
1905static void sh_eth_get_ethtool_stats(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001906 struct ethtool_stats *stats, u64 *data)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001907{
1908 struct sh_eth_private *mdp = netdev_priv(ndev);
1909 int i = 0;
1910
1911 /* device-specific stats */
1912 data[i++] = mdp->cur_rx;
1913 data[i++] = mdp->cur_tx;
1914 data[i++] = mdp->dirty_rx;
1915 data[i++] = mdp->dirty_tx;
1916}
1917
1918static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1919{
1920 switch (stringset) {
1921 case ETH_SS_STATS:
1922 memcpy(data, *sh_eth_gstrings_stats,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001923 sizeof(sh_eth_gstrings_stats));
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001924 break;
1925 }
1926}
1927
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001928static void sh_eth_get_ringparam(struct net_device *ndev,
1929 struct ethtool_ringparam *ring)
1930{
1931 struct sh_eth_private *mdp = netdev_priv(ndev);
1932
1933 ring->rx_max_pending = RX_RING_MAX;
1934 ring->tx_max_pending = TX_RING_MAX;
1935 ring->rx_pending = mdp->num_rx_ring;
1936 ring->tx_pending = mdp->num_tx_ring;
1937}
1938
1939static int sh_eth_set_ringparam(struct net_device *ndev,
1940 struct ethtool_ringparam *ring)
1941{
1942 struct sh_eth_private *mdp = netdev_priv(ndev);
1943 int ret;
1944
1945 if (ring->tx_pending > TX_RING_MAX ||
1946 ring->rx_pending > RX_RING_MAX ||
1947 ring->tx_pending < TX_RING_MIN ||
1948 ring->rx_pending < RX_RING_MIN)
1949 return -EINVAL;
1950 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1951 return -EINVAL;
1952
1953 if (netif_running(ndev)) {
1954 netif_tx_disable(ndev);
1955 /* Disable interrupts by clearing the interrupt mask. */
1956 sh_eth_write(ndev, 0x0000, EESIPR);
1957 /* Stop the chip's Tx and Rx processes. */
1958 sh_eth_write(ndev, 0, EDTRR);
1959 sh_eth_write(ndev, 0, EDRRR);
1960 synchronize_irq(ndev->irq);
1961 }
1962
1963 /* Free all the skbuffs in the Rx queue. */
1964 sh_eth_ring_free(ndev);
1965 /* Free DMA buffer */
1966 sh_eth_free_dma_buffer(mdp);
1967
1968 /* Set new parameters */
1969 mdp->num_rx_ring = ring->rx_pending;
1970 mdp->num_tx_ring = ring->tx_pending;
1971
1972 ret = sh_eth_ring_init(ndev);
1973 if (ret < 0) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001974 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001975 return ret;
1976 }
1977 ret = sh_eth_dev_init(ndev, false);
1978 if (ret < 0) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001979 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001980 return ret;
1981 }
1982
1983 if (netif_running(ndev)) {
1984 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1985 /* Setting the Rx mode will start the Rx process. */
1986 sh_eth_write(ndev, EDRRR_R, EDRRR);
1987 netif_wake_queue(ndev);
1988 }
1989
1990 return 0;
1991}
1992
stephen hemminger9b07be42012-01-04 12:59:49 +00001993static const struct ethtool_ops sh_eth_ethtool_ops = {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001994 .get_settings = sh_eth_get_settings,
1995 .set_settings = sh_eth_set_settings,
stephen hemminger9b07be42012-01-04 12:59:49 +00001996 .nway_reset = sh_eth_nway_reset,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001997 .get_msglevel = sh_eth_get_msglevel,
1998 .set_msglevel = sh_eth_set_msglevel,
stephen hemminger9b07be42012-01-04 12:59:49 +00001999 .get_link = ethtool_op_get_link,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002000 .get_strings = sh_eth_get_strings,
2001 .get_ethtool_stats = sh_eth_get_ethtool_stats,
2002 .get_sset_count = sh_eth_get_sset_count,
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002003 .get_ringparam = sh_eth_get_ringparam,
2004 .set_ringparam = sh_eth_set_ringparam,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002005};
2006
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002007/* network device open function */
2008static int sh_eth_open(struct net_device *ndev)
2009{
2010 int ret = 0;
2011 struct sh_eth_private *mdp = netdev_priv(ndev);
2012
Magnus Dammbcd51492009-10-09 00:20:04 +00002013 pm_runtime_get_sync(&mdp->pdev->dev);
2014
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002015 napi_enable(&mdp->napi);
2016
Joe Perchesa0607fd2009-11-18 23:29:17 -08002017 ret = request_irq(ndev->irq, sh_eth_interrupt,
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +00002018 mdp->cd->irq_flags, ndev->name, ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002019 if (ret) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03002020 netdev_err(ndev, "Can not assign IRQ number\n");
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002021 goto out_napi_off;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002022 }
2023
2024 /* Descriptor set */
2025 ret = sh_eth_ring_init(ndev);
2026 if (ret)
2027 goto out_free_irq;
2028
2029 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002030 ret = sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002031 if (ret)
2032 goto out_free_irq;
2033
2034 /* PHY control start*/
2035 ret = sh_eth_phy_start(ndev);
2036 if (ret)
2037 goto out_free_irq;
2038
Mitsuhiro Kimura7fa29552014-11-28 10:04:15 +09002039 mdp->is_opened = 1;
2040
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002041 return ret;
2042
2043out_free_irq:
2044 free_irq(ndev->irq, ndev);
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002045out_napi_off:
2046 napi_disable(&mdp->napi);
Magnus Dammbcd51492009-10-09 00:20:04 +00002047 pm_runtime_put_sync(&mdp->pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002048 return ret;
2049}
2050
2051/* Timeout function */
2052static void sh_eth_tx_timeout(struct net_device *ndev)
2053{
2054 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002055 struct sh_eth_rxdesc *rxdesc;
2056 int i;
2057
2058 netif_stop_queue(ndev);
2059
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03002060 netif_err(mdp, timer, ndev,
2061 "transmit timed out, status %8.8x, resetting...\n",
2062 (int)sh_eth_read(ndev, EESR));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002063
2064 /* tx_errors count up */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002065 ndev->stats.tx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002066
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002067 /* Free all the skbuffs in the Rx queue. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002068 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002069 rxdesc = &mdp->rx_ring[i];
2070 rxdesc->status = 0;
2071 rxdesc->addr = 0xBADF00D0;
Sergei Shtylyov179d80a2014-06-28 04:10:00 +04002072 dev_kfree_skb(mdp->rx_skbuff[i]);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002073 mdp->rx_skbuff[i] = NULL;
2074 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002075 for (i = 0; i < mdp->num_tx_ring; i++) {
Sergei Shtylyov179d80a2014-06-28 04:10:00 +04002076 dev_kfree_skb(mdp->tx_skbuff[i]);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002077 mdp->tx_skbuff[i] = NULL;
2078 }
2079
2080 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002081 sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002082}
2083
2084/* Packet transmit function */
2085static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2086{
2087 struct sh_eth_private *mdp = netdev_priv(ndev);
2088 struct sh_eth_txdesc *txdesc;
2089 u32 entry;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00002090 unsigned long flags;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002091
2092 spin_lock_irqsave(&mdp->lock, flags);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002093 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002094 if (!sh_eth_txfree(ndev)) {
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03002095 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002096 netif_stop_queue(ndev);
2097 spin_unlock_irqrestore(&mdp->lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +00002098 return NETDEV_TX_BUSY;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002099 }
2100 }
2101 spin_unlock_irqrestore(&mdp->lock, flags);
2102
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002103 entry = mdp->cur_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002104 mdp->tx_skbuff[entry] = skb;
2105 txdesc = &mdp->tx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002106 /* soft swap. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002107 if (!mdp->cd->hw_swap)
2108 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2109 skb->len + 2);
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00002110 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2111 DMA_TO_DEVICE);
Sergei Shtylyov730c8c62014-02-14 03:05:42 +03002112 if (skb->len < ETH_ZLEN)
2113 txdesc->buffer_length = ETH_ZLEN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002114 else
2115 txdesc->buffer_length = skb->len;
2116
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002117 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04002118 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002119 else
Yoshinori Sato71557a32008-08-06 19:49:00 -04002120 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002121
2122 mdp->cur_tx++;
2123
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002124 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2125 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09002126
Patrick McHardy6ed10652009-06-23 06:03:08 +00002127 return NETDEV_TX_OK;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002128}
2129
Mitsuhiro Kimura7fa29552014-11-28 10:04:15 +09002130static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2131{
2132 struct sh_eth_private *mdp = netdev_priv(ndev);
2133
2134 if (sh_eth_is_rz_fast_ether(mdp))
2135 return &ndev->stats;
2136
2137 if (!mdp->is_opened)
2138 return &ndev->stats;
2139
2140 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2141 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2142 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2143 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2144 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2145 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2146
2147 if (sh_eth_is_gether(mdp)) {
2148 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2149 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2150 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2151 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2152 } else {
2153 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2154 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2155 }
2156
2157 return &ndev->stats;
2158}
2159
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002160/* device close function */
2161static int sh_eth_close(struct net_device *ndev)
2162{
2163 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002164
2165 netif_stop_queue(ndev);
2166
2167 /* Disable interrupts by clearing the interrupt mask. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002168 sh_eth_write(ndev, 0x0000, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002169
2170 /* Stop the chip's Tx and Rx processes. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002171 sh_eth_write(ndev, 0, EDTRR);
2172 sh_eth_write(ndev, 0, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002173
Mitsuhiro Kimura7fa29552014-11-28 10:04:15 +09002174 sh_eth_get_stats(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002175 /* PHY Disconnect */
2176 if (mdp->phydev) {
2177 phy_stop(mdp->phydev);
2178 phy_disconnect(mdp->phydev);
2179 }
2180
2181 free_irq(ndev->irq, ndev);
2182
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002183 napi_disable(&mdp->napi);
2184
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002185 /* Free all the skbuffs in the Rx queue. */
2186 sh_eth_ring_free(ndev);
2187
2188 /* free DMA buffer */
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00002189 sh_eth_free_dma_buffer(mdp);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002190
Magnus Dammbcd51492009-10-09 00:20:04 +00002191 pm_runtime_put_sync(&mdp->pdev->dev);
2192
Mitsuhiro Kimura7fa29552014-11-28 10:04:15 +09002193 mdp->is_opened = 0;
2194
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002195 return 0;
2196}
2197
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002198/* ioctl to device function */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002199static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002200{
2201 struct sh_eth_private *mdp = netdev_priv(ndev);
2202 struct phy_device *phydev = mdp->phydev;
2203
2204 if (!netif_running(ndev))
2205 return -EINVAL;
2206
2207 if (!phydev)
2208 return -ENODEV;
2209
Richard Cochran28b04112010-07-17 08:48:55 +00002210 return phy_mii_ioctl(phydev, rq, cmd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002211}
2212
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002213/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2214static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2215 int entry)
2216{
2217 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2218}
2219
2220static u32 sh_eth_tsu_get_post_mask(int entry)
2221{
2222 return 0x0f << (28 - ((entry % 8) * 4));
2223}
2224
2225static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2226{
2227 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2228}
2229
2230static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2231 int entry)
2232{
2233 struct sh_eth_private *mdp = netdev_priv(ndev);
2234 u32 tmp;
2235 void *reg_offset;
2236
2237 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2238 tmp = ioread32(reg_offset);
2239 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2240}
2241
2242static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2243 int entry)
2244{
2245 struct sh_eth_private *mdp = netdev_priv(ndev);
2246 u32 post_mask, ref_mask, tmp;
2247 void *reg_offset;
2248
2249 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2250 post_mask = sh_eth_tsu_get_post_mask(entry);
2251 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2252
2253 tmp = ioread32(reg_offset);
2254 iowrite32(tmp & ~post_mask, reg_offset);
2255
2256 /* If other port enables, the function returns "true" */
2257 return tmp & ref_mask;
2258}
2259
2260static int sh_eth_tsu_busy(struct net_device *ndev)
2261{
2262 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2263 struct sh_eth_private *mdp = netdev_priv(ndev);
2264
2265 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2266 udelay(10);
2267 timeout--;
2268 if (timeout <= 0) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03002269 netdev_err(ndev, "%s: timeout\n", __func__);
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002270 return -ETIMEDOUT;
2271 }
2272 }
2273
2274 return 0;
2275}
2276
2277static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2278 const u8 *addr)
2279{
2280 u32 val;
2281
2282 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2283 iowrite32(val, reg);
2284 if (sh_eth_tsu_busy(ndev) < 0)
2285 return -EBUSY;
2286
2287 val = addr[4] << 8 | addr[5];
2288 iowrite32(val, reg + 4);
2289 if (sh_eth_tsu_busy(ndev) < 0)
2290 return -EBUSY;
2291
2292 return 0;
2293}
2294
2295static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2296{
2297 u32 val;
2298
2299 val = ioread32(reg);
2300 addr[0] = (val >> 24) & 0xff;
2301 addr[1] = (val >> 16) & 0xff;
2302 addr[2] = (val >> 8) & 0xff;
2303 addr[3] = val & 0xff;
2304 val = ioread32(reg + 4);
2305 addr[4] = (val >> 8) & 0xff;
2306 addr[5] = val & 0xff;
2307}
2308
2309
2310static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2311{
2312 struct sh_eth_private *mdp = netdev_priv(ndev);
2313 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2314 int i;
2315 u8 c_addr[ETH_ALEN];
2316
2317 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2318 sh_eth_tsu_read_entry(reg_offset, c_addr);
dingtianhongc4bde292013-12-30 15:41:17 +08002319 if (ether_addr_equal(addr, c_addr))
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002320 return i;
2321 }
2322
2323 return -ENOENT;
2324}
2325
2326static int sh_eth_tsu_find_empty(struct net_device *ndev)
2327{
2328 u8 blank[ETH_ALEN];
2329 int entry;
2330
2331 memset(blank, 0, sizeof(blank));
2332 entry = sh_eth_tsu_find_entry(ndev, blank);
2333 return (entry < 0) ? -ENOMEM : entry;
2334}
2335
2336static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2337 int entry)
2338{
2339 struct sh_eth_private *mdp = netdev_priv(ndev);
2340 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2341 int ret;
2342 u8 blank[ETH_ALEN];
2343
2344 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2345 ~(1 << (31 - entry)), TSU_TEN);
2346
2347 memset(blank, 0, sizeof(blank));
2348 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2349 if (ret < 0)
2350 return ret;
2351 return 0;
2352}
2353
2354static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2355{
2356 struct sh_eth_private *mdp = netdev_priv(ndev);
2357 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2358 int i, ret;
2359
2360 if (!mdp->cd->tsu)
2361 return 0;
2362
2363 i = sh_eth_tsu_find_entry(ndev, addr);
2364 if (i < 0) {
2365 /* No entry found, create one */
2366 i = sh_eth_tsu_find_empty(ndev);
2367 if (i < 0)
2368 return -ENOMEM;
2369 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2370 if (ret < 0)
2371 return ret;
2372
2373 /* Enable the entry */
2374 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2375 (1 << (31 - i)), TSU_TEN);
2376 }
2377
2378 /* Entry found or created, enable POST */
2379 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2380
2381 return 0;
2382}
2383
2384static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2385{
2386 struct sh_eth_private *mdp = netdev_priv(ndev);
2387 int i, ret;
2388
2389 if (!mdp->cd->tsu)
2390 return 0;
2391
2392 i = sh_eth_tsu_find_entry(ndev, addr);
2393 if (i) {
2394 /* Entry found */
2395 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2396 goto done;
2397
2398 /* Disable the entry if both ports was disabled */
2399 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2400 if (ret < 0)
2401 return ret;
2402 }
2403done:
2404 return 0;
2405}
2406
2407static int sh_eth_tsu_purge_all(struct net_device *ndev)
2408{
2409 struct sh_eth_private *mdp = netdev_priv(ndev);
2410 int i, ret;
2411
2412 if (unlikely(!mdp->cd->tsu))
2413 return 0;
2414
2415 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2416 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2417 continue;
2418
2419 /* Disable the entry if both ports was disabled */
2420 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2421 if (ret < 0)
2422 return ret;
2423 }
2424
2425 return 0;
2426}
2427
2428static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2429{
2430 struct sh_eth_private *mdp = netdev_priv(ndev);
2431 u8 addr[ETH_ALEN];
2432 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2433 int i;
2434
2435 if (unlikely(!mdp->cd->tsu))
2436 return;
2437
2438 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2439 sh_eth_tsu_read_entry(reg_offset, addr);
2440 if (is_multicast_ether_addr(addr))
2441 sh_eth_tsu_del_entry(ndev, addr);
2442 }
2443}
2444
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002445/* Multicast reception directions set */
2446static void sh_eth_set_multicast_list(struct net_device *ndev)
2447{
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002448 struct sh_eth_private *mdp = netdev_priv(ndev);
2449 u32 ecmr_bits;
2450 int mcast_all = 0;
2451 unsigned long flags;
2452
2453 spin_lock_irqsave(&mdp->lock, flags);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002454 /* Initial condition is MCT = 1, PRM = 0.
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002455 * Depending on ndev->flags, set PRM or clear MCT
2456 */
2457 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2458
2459 if (!(ndev->flags & IFF_MULTICAST)) {
2460 sh_eth_tsu_purge_mcast(ndev);
2461 mcast_all = 1;
2462 }
2463 if (ndev->flags & IFF_ALLMULTI) {
2464 sh_eth_tsu_purge_mcast(ndev);
2465 ecmr_bits &= ~ECMR_MCT;
2466 mcast_all = 1;
2467 }
2468
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002469 if (ndev->flags & IFF_PROMISC) {
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002470 sh_eth_tsu_purge_all(ndev);
2471 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2472 } else if (mdp->cd->tsu) {
2473 struct netdev_hw_addr *ha;
2474 netdev_for_each_mc_addr(ha, ndev) {
2475 if (mcast_all && is_multicast_ether_addr(ha->addr))
2476 continue;
2477
2478 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2479 if (!mcast_all) {
2480 sh_eth_tsu_purge_mcast(ndev);
2481 ecmr_bits &= ~ECMR_MCT;
2482 mcast_all = 1;
2483 }
2484 }
2485 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002486 } else {
2487 /* Normal, unicast/broadcast-only mode. */
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002488 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002489 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002490
2491 /* update the ethernet mode */
2492 sh_eth_write(ndev, ecmr_bits, ECMR);
2493
2494 spin_unlock_irqrestore(&mdp->lock, flags);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002495}
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002496
2497static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2498{
2499 if (!mdp->port)
2500 return TSU_VTAG0;
2501 else
2502 return TSU_VTAG1;
2503}
2504
Patrick McHardy80d5c362013-04-19 02:04:28 +00002505static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2506 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002507{
2508 struct sh_eth_private *mdp = netdev_priv(ndev);
2509 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2510
2511 if (unlikely(!mdp->cd->tsu))
2512 return -EPERM;
2513
2514 /* No filtering if vid = 0 */
2515 if (!vid)
2516 return 0;
2517
2518 mdp->vlan_num_ids++;
2519
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002520 /* The controller has one VLAN tag HW filter. So, if the filter is
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002521 * already enabled, the driver disables it and the filte
2522 */
2523 if (mdp->vlan_num_ids > 1) {
2524 /* disable VLAN filter */
2525 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2526 return 0;
2527 }
2528
2529 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2530 vtag_reg_index);
2531
2532 return 0;
2533}
2534
Patrick McHardy80d5c362013-04-19 02:04:28 +00002535static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2536 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002537{
2538 struct sh_eth_private *mdp = netdev_priv(ndev);
2539 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2540
2541 if (unlikely(!mdp->cd->tsu))
2542 return -EPERM;
2543
2544 /* No filtering if vid = 0 */
2545 if (!vid)
2546 return 0;
2547
2548 mdp->vlan_num_ids--;
2549 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2550
2551 return 0;
2552}
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002553
2554/* SuperH's TSU register init function */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002555static void sh_eth_tsu_init(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002556{
Simon Hormandb893472014-01-17 09:22:28 +09002557 if (sh_eth_is_rz_fast_ether(mdp)) {
2558 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2559 return;
2560 }
2561
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002562 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2563 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2564 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2565 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2566 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2567 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2568 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2569 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2570 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2571 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002572 if (sh_eth_is_gether(mdp)) {
2573 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2574 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2575 } else {
2576 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2577 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2578 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002579 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2580 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2581 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2582 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2583 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2584 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2585 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002586}
2587
2588/* MDIO bus release function */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002589static int sh_mdio_release(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002590{
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002591 /* unregister mdio bus */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002592 mdiobus_unregister(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002593
2594 /* free bitbang info */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002595 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002596
2597 return 0;
2598}
2599
2600/* MDIO bus init function */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002601static int sh_mdio_init(struct sh_eth_private *mdp,
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002602 struct sh_eth_plat_data *pd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002603{
2604 int ret, i;
2605 struct bb_info *bitbang;
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002606 struct platform_device *pdev = mdp->pdev;
Laurent Pinchartaa8d4222014-03-20 15:00:31 +01002607 struct device *dev = &mdp->pdev->dev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002608
2609 /* create bit control struct for PHY */
Laurent Pinchartaa8d4222014-03-20 15:00:31 +01002610 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
Laurent Pinchartf738a132014-03-20 15:00:35 +01002611 if (!bitbang)
2612 return -ENOMEM;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002613
2614 /* bitbang init */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002615 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002616 bitbang->set_gate = pd->set_mdio_gate;
Sergei Shtylyovdfed5e72013-03-21 10:37:54 +00002617 bitbang->mdi_msk = PIR_MDI;
2618 bitbang->mdo_msk = PIR_MDO;
2619 bitbang->mmd_msk = PIR_MMD;
2620 bitbang->mdc_msk = PIR_MDC;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002621 bitbang->ctrl.ops = &bb_ops;
2622
Stefan Weilc2e07b32010-08-03 19:44:52 +02002623 /* MII controller setting */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002624 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
Laurent Pinchartf738a132014-03-20 15:00:35 +01002625 if (!mdp->mii_bus)
2626 return -ENOMEM;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002627
2628 /* Hook up MII support for ethtool */
2629 mdp->mii_bus->name = "sh_mii";
Laurent Pincharta5bd60602014-03-20 15:00:32 +01002630 mdp->mii_bus->parent = dev;
Florian Fainelli5278fb52012-01-09 23:59:17 +00002631 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002632 pdev->name, pdev->id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002633
2634 /* PHY IRQ */
Sergei Shtylyov86b5d252014-05-13 02:30:14 +04002635 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2636 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002637 if (!mdp->mii_bus->irq) {
2638 ret = -ENOMEM;
2639 goto out_free_bus;
2640 }
2641
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002642 /* register MDIO bus */
2643 if (dev->of_node) {
2644 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
Ben Dooks702eca02014-03-12 17:47:40 +00002645 } else {
2646 for (i = 0; i < PHY_MAX_ADDR; i++)
2647 mdp->mii_bus->irq[i] = PHY_POLL;
2648 if (pd->phy_irq > 0)
2649 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2650
2651 ret = mdiobus_register(mdp->mii_bus);
2652 }
2653
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002654 if (ret)
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002655 goto out_free_bus;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002656
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002657 return 0;
2658
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002659out_free_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002660 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002661 return ret;
2662}
2663
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002664static const u16 *sh_eth_get_register_offset(int register_type)
2665{
2666 const u16 *reg_offset = NULL;
2667
2668 switch (register_type) {
2669 case SH_ETH_REG_GIGABIT:
2670 reg_offset = sh_eth_offset_gigabit;
2671 break;
Simon Hormandb893472014-01-17 09:22:28 +09002672 case SH_ETH_REG_FAST_RZ:
2673 reg_offset = sh_eth_offset_fast_rz;
2674 break;
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00002675 case SH_ETH_REG_FAST_RCAR:
2676 reg_offset = sh_eth_offset_fast_rcar;
2677 break;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002678 case SH_ETH_REG_FAST_SH4:
2679 reg_offset = sh_eth_offset_fast_sh4;
2680 break;
2681 case SH_ETH_REG_FAST_SH3_SH2:
2682 reg_offset = sh_eth_offset_fast_sh3_sh2;
2683 break;
2684 default:
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002685 break;
2686 }
2687
2688 return reg_offset;
2689}
2690
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002691static const struct net_device_ops sh_eth_netdev_ops = {
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002692 .ndo_open = sh_eth_open,
2693 .ndo_stop = sh_eth_close,
2694 .ndo_start_xmit = sh_eth_start_xmit,
2695 .ndo_get_stats = sh_eth_get_stats,
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002696 .ndo_tx_timeout = sh_eth_tx_timeout,
2697 .ndo_do_ioctl = sh_eth_do_ioctl,
2698 .ndo_validate_addr = eth_validate_addr,
2699 .ndo_set_mac_address = eth_mac_addr,
2700 .ndo_change_mtu = eth_change_mtu,
2701};
2702
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002703static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2704 .ndo_open = sh_eth_open,
2705 .ndo_stop = sh_eth_close,
2706 .ndo_start_xmit = sh_eth_start_xmit,
2707 .ndo_get_stats = sh_eth_get_stats,
2708 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2709 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2710 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2711 .ndo_tx_timeout = sh_eth_tx_timeout,
2712 .ndo_do_ioctl = sh_eth_do_ioctl,
2713 .ndo_validate_addr = eth_validate_addr,
2714 .ndo_set_mac_address = eth_mac_addr,
2715 .ndo_change_mtu = eth_change_mtu,
2716};
2717
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002718#ifdef CONFIG_OF
2719static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2720{
2721 struct device_node *np = dev->of_node;
2722 struct sh_eth_plat_data *pdata;
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002723 const char *mac_addr;
2724
2725 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2726 if (!pdata)
2727 return NULL;
2728
2729 pdata->phy_interface = of_get_phy_mode(np);
2730
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002731 mac_addr = of_get_mac_address(np);
2732 if (mac_addr)
2733 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2734
2735 pdata->no_ether_link =
2736 of_property_read_bool(np, "renesas,no-ether-link");
2737 pdata->ether_link_active_low =
2738 of_property_read_bool(np, "renesas,ether-link-active-low");
2739
2740 return pdata;
2741}
2742
2743static const struct of_device_id sh_eth_match_table[] = {
2744 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2745 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2746 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2747 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2748 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
Hisashi Nakamura0f76b9d2014-08-01 17:03:00 +02002749 { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002750 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2751 { }
2752};
2753MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2754#else
2755static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2756{
2757 return NULL;
2758}
2759#endif
2760
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002761static int sh_eth_drv_probe(struct platform_device *pdev)
2762{
Kuninori Morimoto9c386572010-08-19 00:39:45 -07002763 int ret, devno = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002764 struct resource *res;
2765 struct net_device *ndev = NULL;
Kuninori Morimotoec0d7552011-06-23 16:02:38 +00002766 struct sh_eth_private *mdp = NULL;
Jingoo Han0b76b862013-08-30 14:00:11 +09002767 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002768 const struct platform_device_id *id = platform_get_device_id(pdev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002769
2770 /* get base addr */
2771 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2772 if (unlikely(res == NULL)) {
2773 dev_err(&pdev->dev, "invalid resource\n");
Laurent Pinchartf738a132014-03-20 15:00:35 +01002774 return -EINVAL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002775 }
2776
2777 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
Laurent Pinchartf738a132014-03-20 15:00:35 +01002778 if (!ndev)
2779 return -ENOMEM;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002780
Ben Dooksb5893a02014-03-21 12:09:14 +01002781 pm_runtime_enable(&pdev->dev);
2782 pm_runtime_get_sync(&pdev->dev);
2783
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002784 /* The sh Ether-specific entries in the device structure. */
2785 ndev->base_addr = res->start;
2786 devno = pdev->id;
2787 if (devno < 0)
2788 devno = 0;
2789
2790 ndev->dma = -1;
roel kluincc3c0802008-09-10 19:22:44 +02002791 ret = platform_get_irq(pdev, 0);
2792 if (ret < 0) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002793 ret = -ENODEV;
2794 goto out_release;
2795 }
roel kluincc3c0802008-09-10 19:22:44 +02002796 ndev->irq = ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002797
2798 SET_NETDEV_DEV(ndev, &pdev->dev);
2799
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002800 mdp = netdev_priv(ndev);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002801 mdp->num_tx_ring = TX_RING_SIZE;
2802 mdp->num_rx_ring = RX_RING_SIZE;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002803 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2804 if (IS_ERR(mdp->addr)) {
2805 ret = PTR_ERR(mdp->addr);
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002806 goto out_release;
2807 }
2808
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002809 spin_lock_init(&mdp->lock);
Magnus Dammbcd51492009-10-09 00:20:04 +00002810 mdp->pdev = pdev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002811
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002812 if (pdev->dev.of_node)
2813 pd = sh_eth_parse_dt(&pdev->dev);
Sergei Shtylyov3b4c5cb2013-10-30 23:30:19 +03002814 if (!pd) {
2815 dev_err(&pdev->dev, "no platform data\n");
2816 ret = -EINVAL;
2817 goto out_release;
2818 }
2819
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002820 /* get PHY ID */
Yoshinori Sato71557a32008-08-06 19:49:00 -04002821 mdp->phy_id = pd->phy;
Yoshihiro Shimodae47c9052011-03-07 21:59:45 +00002822 mdp->phy_interface = pd->phy_interface;
Yoshinori Sato71557a32008-08-06 19:49:00 -04002823 /* EDMAC endian */
2824 mdp->edmac_endian = pd->edmac_endian;
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00002825 mdp->no_ether_link = pd->no_ether_link;
2826 mdp->ether_link_active_low = pd->ether_link_active_low;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002827
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002828 /* set cpu data */
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002829 if (id) {
2830 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2831 } else {
2832 const struct of_device_id *match;
2833
2834 match = of_match_device(of_match_ptr(sh_eth_match_table),
2835 &pdev->dev);
2836 mdp->cd = (struct sh_eth_cpu_data *)match->data;
2837 }
Sergei Shtylyova3153d82013-08-18 03:11:28 +04002838 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
Sergei Shtylyov264be2f2014-03-15 03:11:24 +03002839 if (!mdp->reg_offset) {
2840 dev_err(&pdev->dev, "Unknown register type (%d)\n",
2841 mdp->cd->register_type);
2842 ret = -EINVAL;
2843 goto out_release;
2844 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002845 sh_eth_set_default_cpu_data(mdp->cd);
2846
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002847 /* set function */
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002848 if (mdp->cd->tsu)
2849 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2850 else
2851 ndev->netdev_ops = &sh_eth_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002852 ndev->ethtool_ops = &sh_eth_ethtool_ops;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002853 ndev->watchdog_timeo = TX_TIMEOUT;
2854
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002855 /* debug message level */
2856 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002857
2858 /* read and set MAC address */
Magnus Damm748031f2009-10-09 00:17:14 +00002859 read_mac_address(ndev, pd->mac_addr);
Sergei Shtylyovff6e7222013-04-29 09:49:42 +00002860 if (!is_valid_ether_addr(ndev->dev_addr)) {
2861 dev_warn(&pdev->dev,
2862 "no valid MAC address supplied, using a random one.\n");
2863 eth_hw_addr_random(ndev);
2864 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002865
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002866 /* ioremap the TSU registers */
2867 if (mdp->cd->tsu) {
2868 struct resource *rtsu;
2869 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002870 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2871 if (IS_ERR(mdp->tsu_addr)) {
2872 ret = PTR_ERR(mdp->tsu_addr);
Sergei Shtylyovfc0c0902013-03-19 13:41:32 +00002873 goto out_release;
2874 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002875 mdp->port = devno % 2;
Patrick McHardyf6469682013-04-19 02:04:27 +00002876 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002877 }
2878
Yoshihiro Shimoda150647f2012-02-15 17:54:56 +00002879 /* initialize first or needed device */
2880 if (!devno || pd->needs_init) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002881 if (mdp->cd->chip_reset)
2882 mdp->cd->chip_reset(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002883
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00002884 if (mdp->cd->tsu) {
2885 /* TSU init (Init only)*/
2886 sh_eth_tsu_init(mdp);
2887 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002888 }
2889
Laurent Pinchartdaacf032014-03-20 15:00:34 +01002890 /* MDIO bus init */
2891 ret = sh_mdio_init(mdp, pd);
2892 if (ret) {
2893 dev_err(&ndev->dev, "failed to initialise MDIO\n");
2894 goto out_release;
2895 }
2896
Sergei Shtylyov37191092013-06-19 23:30:23 +04002897 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2898
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002899 /* network device register */
2900 ret = register_netdev(ndev);
2901 if (ret)
Sergei Shtylyov37191092013-06-19 23:30:23 +04002902 goto out_napi_del;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002903
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002904 /* print device information */
Sergei Shtylyovf75f14e2014-03-15 03:27:54 +03002905 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2906 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002907
Ben Dooksb5893a02014-03-21 12:09:14 +01002908 pm_runtime_put(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002909 platform_set_drvdata(pdev, ndev);
2910
2911 return ret;
2912
Sergei Shtylyov37191092013-06-19 23:30:23 +04002913out_napi_del:
2914 netif_napi_del(&mdp->napi);
Laurent Pinchartdaacf032014-03-20 15:00:34 +01002915 sh_mdio_release(mdp);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002916
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002917out_release:
2918 /* net_dev free */
2919 if (ndev)
2920 free_netdev(ndev);
2921
Ben Dooksb5893a02014-03-21 12:09:14 +01002922 pm_runtime_put(&pdev->dev);
2923 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002924 return ret;
2925}
2926
2927static int sh_eth_drv_remove(struct platform_device *pdev)
2928{
2929 struct net_device *ndev = platform_get_drvdata(pdev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002930 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002931
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002932 unregister_netdev(ndev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002933 netif_napi_del(&mdp->napi);
Laurent Pinchartdaacf032014-03-20 15:00:34 +01002934 sh_mdio_release(mdp);
Magnus Dammbcd51492009-10-09 00:20:04 +00002935 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002936 free_netdev(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002937
2938 return 0;
2939}
2940
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002941#ifdef CONFIG_PM
Magnus Dammbcd51492009-10-09 00:20:04 +00002942static int sh_eth_runtime_nop(struct device *dev)
2943{
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002944 /* Runtime PM callback shared between ->runtime_suspend()
Magnus Dammbcd51492009-10-09 00:20:04 +00002945 * and ->runtime_resume(). Simply returns success.
2946 *
2947 * This driver re-initializes all registers after
2948 * pm_runtime_get_sync() anyway so there is no need
2949 * to save and restore registers here.
2950 */
2951 return 0;
2952}
2953
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002954static const struct dev_pm_ops sh_eth_dev_pm_ops = {
Magnus Dammbcd51492009-10-09 00:20:04 +00002955 .runtime_suspend = sh_eth_runtime_nop,
2956 .runtime_resume = sh_eth_runtime_nop,
2957};
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002958#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2959#else
2960#define SH_ETH_PM_OPS NULL
2961#endif
Magnus Dammbcd51492009-10-09 00:20:04 +00002962
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002963static struct platform_device_id sh_eth_id_table[] = {
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +00002964 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +00002965 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +00002966 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002967 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
Sergei Shtylyov24549e22013-06-07 13:59:21 +00002968 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2969 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002970 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
Simon Hormandb893472014-01-17 09:22:28 +09002971 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +00002972 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
Sergei Shtylyov589ebde2013-06-07 14:05:59 +00002973 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
Sergei Shtylyov94a12b12013-12-08 02:59:18 +03002974 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2975 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
Hisashi Nakamura0f76b9d2014-08-01 17:03:00 +02002976 { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002977 { }
2978};
2979MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2980
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002981static struct platform_driver sh_eth_driver = {
2982 .probe = sh_eth_drv_probe,
2983 .remove = sh_eth_drv_remove,
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002984 .id_table = sh_eth_id_table,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002985 .driver = {
2986 .name = CARDNAME,
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002987 .pm = SH_ETH_PM_OPS,
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002988 .of_match_table = of_match_ptr(sh_eth_match_table),
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002989 },
2990};
2991
Axel Lindb62f682011-11-27 16:44:17 +00002992module_platform_driver(sh_eth_driver);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002993
2994MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2995MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2996MODULE_LICENSE("GPL v2");