blob: da604059b1481dff061123542aa38a5096941b9c [file] [log] [blame]
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001/*
2 * SuperH Ethernet device driver
3 *
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +00004 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00005 * Copyright (C) 2008-2013 Renesas Solutions Corp.
6 * Copyright (C) 2013 Cogent Embedded, Inc.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07007 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 */
23
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070024#include <linux/init.h>
Yoshihiro Shimoda06540112011-09-29 17:16:57 +000025#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/spinlock.h>
David S. Miller823dcd22011-08-20 10:39:12 -070028#include <linux/interrupt.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070029#include <linux/dma-mapping.h>
30#include <linux/etherdevice.h>
31#include <linux/delay.h>
32#include <linux/platform_device.h>
33#include <linux/mdio-bitbang.h>
34#include <linux/netdevice.h>
35#include <linux/phy.h>
36#include <linux/cache.h>
37#include <linux/io.h>
Magnus Dammbcd51492009-10-09 00:20:04 +000038#include <linux/pm_runtime.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000040#include <linux/ethtool.h>
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +000041#include <linux/if_vlan.h>
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +000042#include <linux/clk.h>
Yoshihiro Shimodad4fa0e32011-09-27 21:49:12 +000043#include <linux/sh_eth.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070044
45#include "sh_eth.h"
46
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000047#define SH_ETH_DEF_MSG_ENABLE \
48 (NETIF_MSG_LINK | \
49 NETIF_MSG_TIMER | \
50 NETIF_MSG_RX_ERR| \
51 NETIF_MSG_TX_ERR)
52
Sergei Shtylyovc0013f62013-03-28 11:48:26 +000053static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
54 [EDSR] = 0x0000,
55 [EDMR] = 0x0400,
56 [EDTRR] = 0x0408,
57 [EDRRR] = 0x0410,
58 [EESR] = 0x0428,
59 [EESIPR] = 0x0430,
60 [TDLAR] = 0x0010,
61 [TDFAR] = 0x0014,
62 [TDFXR] = 0x0018,
63 [TDFFR] = 0x001c,
64 [RDLAR] = 0x0030,
65 [RDFAR] = 0x0034,
66 [RDFXR] = 0x0038,
67 [RDFFR] = 0x003c,
68 [TRSCER] = 0x0438,
69 [RMFCR] = 0x0440,
70 [TFTR] = 0x0448,
71 [FDR] = 0x0450,
72 [RMCR] = 0x0458,
73 [RPADIR] = 0x0460,
74 [FCFTR] = 0x0468,
75 [CSMR] = 0x04E4,
76
77 [ECMR] = 0x0500,
78 [ECSR] = 0x0510,
79 [ECSIPR] = 0x0518,
80 [PIR] = 0x0520,
81 [PSR] = 0x0528,
82 [PIPR] = 0x052c,
83 [RFLR] = 0x0508,
84 [APR] = 0x0554,
85 [MPR] = 0x0558,
86 [PFTCR] = 0x055c,
87 [PFRCR] = 0x0560,
88 [TPAUSER] = 0x0564,
89 [GECMR] = 0x05b0,
90 [BCULR] = 0x05b4,
91 [MAHR] = 0x05c0,
92 [MALR] = 0x05c8,
93 [TROCR] = 0x0700,
94 [CDCR] = 0x0708,
95 [LCCR] = 0x0710,
96 [CEFCR] = 0x0740,
97 [FRECR] = 0x0748,
98 [TSFRCR] = 0x0750,
99 [TLFRCR] = 0x0758,
100 [RFCR] = 0x0760,
101 [CERCR] = 0x0768,
102 [CEECR] = 0x0770,
103 [MAFCR] = 0x0778,
104 [RMII_MII] = 0x0790,
105
106 [ARSTR] = 0x0000,
107 [TSU_CTRST] = 0x0004,
108 [TSU_FWEN0] = 0x0010,
109 [TSU_FWEN1] = 0x0014,
110 [TSU_FCM] = 0x0018,
111 [TSU_BSYSL0] = 0x0020,
112 [TSU_BSYSL1] = 0x0024,
113 [TSU_PRISL0] = 0x0028,
114 [TSU_PRISL1] = 0x002c,
115 [TSU_FWSL0] = 0x0030,
116 [TSU_FWSL1] = 0x0034,
117 [TSU_FWSLC] = 0x0038,
118 [TSU_QTAG0] = 0x0040,
119 [TSU_QTAG1] = 0x0044,
120 [TSU_FWSR] = 0x0050,
121 [TSU_FWINMK] = 0x0054,
122 [TSU_ADQT0] = 0x0048,
123 [TSU_ADQT1] = 0x004c,
124 [TSU_VTAG0] = 0x0058,
125 [TSU_VTAG1] = 0x005c,
126 [TSU_ADSBSY] = 0x0060,
127 [TSU_TEN] = 0x0064,
128 [TSU_POST1] = 0x0070,
129 [TSU_POST2] = 0x0074,
130 [TSU_POST3] = 0x0078,
131 [TSU_POST4] = 0x007c,
132 [TSU_ADRH0] = 0x0100,
133 [TSU_ADRL0] = 0x0104,
134 [TSU_ADRH31] = 0x01f8,
135 [TSU_ADRL31] = 0x01fc,
136
137 [TXNLCR0] = 0x0080,
138 [TXALCR0] = 0x0084,
139 [RXNLCR0] = 0x0088,
140 [RXALCR0] = 0x008c,
141 [FWNLCR0] = 0x0090,
142 [FWALCR0] = 0x0094,
143 [TXNLCR1] = 0x00a0,
144 [TXALCR1] = 0x00a0,
145 [RXNLCR1] = 0x00a8,
146 [RXALCR1] = 0x00ac,
147 [FWNLCR1] = 0x00b0,
148 [FWALCR1] = 0x00b4,
149};
150
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000151static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
152 [ECMR] = 0x0300,
153 [RFLR] = 0x0308,
154 [ECSR] = 0x0310,
155 [ECSIPR] = 0x0318,
156 [PIR] = 0x0320,
157 [PSR] = 0x0328,
158 [RDMLR] = 0x0340,
159 [IPGR] = 0x0350,
160 [APR] = 0x0354,
161 [MPR] = 0x0358,
162 [RFCF] = 0x0360,
163 [TPAUSER] = 0x0364,
164 [TPAUSECR] = 0x0368,
165 [MAHR] = 0x03c0,
166 [MALR] = 0x03c8,
167 [TROCR] = 0x03d0,
168 [CDCR] = 0x03d4,
169 [LCCR] = 0x03d8,
170 [CNDCR] = 0x03dc,
171 [CEFCR] = 0x03e4,
172 [FRECR] = 0x03e8,
173 [TSFRCR] = 0x03ec,
174 [TLFRCR] = 0x03f0,
175 [RFCR] = 0x03f4,
176 [MAFCR] = 0x03f8,
177
178 [EDMR] = 0x0200,
179 [EDTRR] = 0x0208,
180 [EDRRR] = 0x0210,
181 [TDLAR] = 0x0218,
182 [RDLAR] = 0x0220,
183 [EESR] = 0x0228,
184 [EESIPR] = 0x0230,
185 [TRSCER] = 0x0238,
186 [RMFCR] = 0x0240,
187 [TFTR] = 0x0248,
188 [FDR] = 0x0250,
189 [RMCR] = 0x0258,
190 [TFUCR] = 0x0264,
191 [RFOCR] = 0x0268,
192 [FCFTR] = 0x0270,
193 [TRIMD] = 0x027c,
194};
195
Sergei Shtylyovc0013f62013-03-28 11:48:26 +0000196static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
197 [ECMR] = 0x0100,
198 [RFLR] = 0x0108,
199 [ECSR] = 0x0110,
200 [ECSIPR] = 0x0118,
201 [PIR] = 0x0120,
202 [PSR] = 0x0128,
203 [RDMLR] = 0x0140,
204 [IPGR] = 0x0150,
205 [APR] = 0x0154,
206 [MPR] = 0x0158,
207 [TPAUSER] = 0x0164,
208 [RFCF] = 0x0160,
209 [TPAUSECR] = 0x0168,
210 [BCFRR] = 0x016c,
211 [MAHR] = 0x01c0,
212 [MALR] = 0x01c8,
213 [TROCR] = 0x01d0,
214 [CDCR] = 0x01d4,
215 [LCCR] = 0x01d8,
216 [CNDCR] = 0x01dc,
217 [CEFCR] = 0x01e4,
218 [FRECR] = 0x01e8,
219 [TSFRCR] = 0x01ec,
220 [TLFRCR] = 0x01f0,
221 [RFCR] = 0x01f4,
222 [MAFCR] = 0x01f8,
223 [RTRATE] = 0x01fc,
224
225 [EDMR] = 0x0000,
226 [EDTRR] = 0x0008,
227 [EDRRR] = 0x0010,
228 [TDLAR] = 0x0018,
229 [RDLAR] = 0x0020,
230 [EESR] = 0x0028,
231 [EESIPR] = 0x0030,
232 [TRSCER] = 0x0038,
233 [RMFCR] = 0x0040,
234 [TFTR] = 0x0048,
235 [FDR] = 0x0050,
236 [RMCR] = 0x0058,
237 [TFUCR] = 0x0064,
238 [RFOCR] = 0x0068,
239 [FCFTR] = 0x0070,
240 [RPADIR] = 0x0078,
241 [TRIMD] = 0x007c,
242 [RBWAR] = 0x00c8,
243 [RDFAR] = 0x00cc,
244 [TBRAR] = 0x00d4,
245 [TDFAR] = 0x00d8,
246};
247
248static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
249 [ECMR] = 0x0160,
250 [ECSR] = 0x0164,
251 [ECSIPR] = 0x0168,
252 [PIR] = 0x016c,
253 [MAHR] = 0x0170,
254 [MALR] = 0x0174,
255 [RFLR] = 0x0178,
256 [PSR] = 0x017c,
257 [TROCR] = 0x0180,
258 [CDCR] = 0x0184,
259 [LCCR] = 0x0188,
260 [CNDCR] = 0x018c,
261 [CEFCR] = 0x0194,
262 [FRECR] = 0x0198,
263 [TSFRCR] = 0x019c,
264 [TLFRCR] = 0x01a0,
265 [RFCR] = 0x01a4,
266 [MAFCR] = 0x01a8,
267 [IPGR] = 0x01b4,
268 [APR] = 0x01b8,
269 [MPR] = 0x01bc,
270 [TPAUSER] = 0x01c4,
271 [BCFR] = 0x01cc,
272
273 [ARSTR] = 0x0000,
274 [TSU_CTRST] = 0x0004,
275 [TSU_FWEN0] = 0x0010,
276 [TSU_FWEN1] = 0x0014,
277 [TSU_FCM] = 0x0018,
278 [TSU_BSYSL0] = 0x0020,
279 [TSU_BSYSL1] = 0x0024,
280 [TSU_PRISL0] = 0x0028,
281 [TSU_PRISL1] = 0x002c,
282 [TSU_FWSL0] = 0x0030,
283 [TSU_FWSL1] = 0x0034,
284 [TSU_FWSLC] = 0x0038,
285 [TSU_QTAGM0] = 0x0040,
286 [TSU_QTAGM1] = 0x0044,
287 [TSU_ADQT0] = 0x0048,
288 [TSU_ADQT1] = 0x004c,
289 [TSU_FWSR] = 0x0050,
290 [TSU_FWINMK] = 0x0054,
291 [TSU_ADSBSY] = 0x0060,
292 [TSU_TEN] = 0x0064,
293 [TSU_POST1] = 0x0070,
294 [TSU_POST2] = 0x0074,
295 [TSU_POST3] = 0x0078,
296 [TSU_POST4] = 0x007c,
297
298 [TXNLCR0] = 0x0080,
299 [TXALCR0] = 0x0084,
300 [RXNLCR0] = 0x0088,
301 [RXALCR0] = 0x008c,
302 [FWNLCR0] = 0x0090,
303 [FWALCR0] = 0x0094,
304 [TXNLCR1] = 0x00a0,
305 [TXALCR1] = 0x00a0,
306 [RXNLCR1] = 0x00a8,
307 [RXALCR1] = 0x00ac,
308 [FWNLCR1] = 0x00b0,
309 [FWALCR1] = 0x00b4,
310
311 [TSU_ADRH0] = 0x0100,
312 [TSU_ADRL0] = 0x0104,
313 [TSU_ADRL31] = 0x01fc,
314};
315
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000316#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
317 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
318 defined(CONFIG_ARCH_R8A7740)
319static void sh_eth_select_mii(struct net_device *ndev)
320{
321 u32 value = 0x0;
322 struct sh_eth_private *mdp = netdev_priv(ndev);
323
324 switch (mdp->phy_interface) {
325 case PHY_INTERFACE_MODE_GMII:
326 value = 0x2;
327 break;
328 case PHY_INTERFACE_MODE_MII:
329 value = 0x1;
330 break;
331 case PHY_INTERFACE_MODE_RMII:
332 value = 0x0;
333 break;
334 default:
335 pr_warn("PHY interface mode was not setup. Set to MII.\n");
336 value = 0x1;
337 break;
338 }
339
340 sh_eth_write(ndev, value, RMII_MII);
341}
342#endif
343
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000344/* There is CPU dependent code */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000345#if defined(CONFIG_ARCH_R8A7779)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000346#define SH_ETH_RESET_DEFAULT 1
347static void sh_eth_set_duplex(struct net_device *ndev)
348{
349 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000350
351 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000352 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000353 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000354 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000355}
356
357static void sh_eth_set_rate(struct net_device *ndev)
358{
359 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000360
361 switch (mdp->speed) {
362 case 10: /* 10BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000363 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000364 break;
365 case 100:/* 100BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000366 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
367 break;
368 default:
369 break;
370 }
371}
372
373/* R8A7779 */
374static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
375 .set_duplex = sh_eth_set_duplex,
376 .set_rate = sh_eth_set_rate,
377
378 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
379 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
380 .eesipr_value = 0x01ff009f,
381
382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
384 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
385 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
386
387 .apr = 1,
388 .mpr = 1,
389 .tpauser = 1,
390 .hw_swap = 1,
391};
392#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
393#define SH_ETH_RESET_DEFAULT 1
394static void sh_eth_set_duplex(struct net_device *ndev)
395{
396 struct sh_eth_private *mdp = netdev_priv(ndev);
397
398 if (mdp->duplex) /* Full */
399 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
400 else /* Half */
401 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
402}
403
404static void sh_eth_set_rate(struct net_device *ndev)
405{
406 struct sh_eth_private *mdp = netdev_priv(ndev);
407
408 switch (mdp->speed) {
409 case 10: /* 10BASE */
410 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
411 break;
412 case 100:/* 100BASE */
413 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000414 break;
415 default:
416 break;
417 }
418}
419
420/* SH7724 */
421static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
422 .set_duplex = sh_eth_set_duplex,
423 .set_rate = sh_eth_set_rate,
424
425 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
426 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
427 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
428
429 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
430 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
431 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
432 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
433
434 .apr = 1,
435 .mpr = 1,
436 .tpauser = 1,
437 .hw_swap = 1,
Magnus Damm503914c2009-12-15 21:16:55 -0800438 .rpadir = 1,
439 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000440};
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000441#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000442#define SH_ETH_HAS_BOTH_MODULES 1
443#define SH_ETH_HAS_TSU 1
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000444static int sh_eth_check_reset(struct net_device *ndev);
445
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000446static void sh_eth_set_duplex(struct net_device *ndev)
447{
448 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000449
450 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000451 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000452 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000453 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000454}
455
456static void sh_eth_set_rate(struct net_device *ndev)
457{
458 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000459
460 switch (mdp->speed) {
461 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000462 sh_eth_write(ndev, 0, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000463 break;
464 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000465 sh_eth_write(ndev, 1, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000466 break;
467 default:
468 break;
469 }
470}
471
472/* SH7757 */
473static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
474 .set_duplex = sh_eth_set_duplex,
475 .set_rate = sh_eth_set_rate,
476
477 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
478 .rmcr_value = 0x00000001,
479
480 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
481 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
482 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
483 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
484
485 .apr = 1,
486 .mpr = 1,
487 .tpauser = 1,
488 .hw_swap = 1,
489 .no_ade = 1,
Yoshihiro Shimoda2e98e792011-07-05 20:33:57 +0000490 .rpadir = 1,
491 .rpadir_value = 2 << 16,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000492};
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000493
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000494#define SH_GIGA_ETH_BASE 0xfee00000
495#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
496#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
497static void sh_eth_chip_reset_giga(struct net_device *ndev)
498{
499 int i;
500 unsigned long mahr[2], malr[2];
501
502 /* save MAHR and MALR */
503 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000504 malr[i] = ioread32((void *)GIGA_MALR(i));
505 mahr[i] = ioread32((void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000506 }
507
508 /* reset device */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000509 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000510 mdelay(1);
511
512 /* restore MAHR and MALR */
513 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000514 iowrite32(malr[i], (void *)GIGA_MALR(i));
515 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000516 }
517}
518
519static int sh_eth_is_gether(struct sh_eth_private *mdp);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000520static int sh_eth_reset(struct net_device *ndev)
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000521{
522 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000523 int ret = 0;
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000524
525 if (sh_eth_is_gether(mdp)) {
526 sh_eth_write(ndev, 0x03, EDSR);
527 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
528 EDMR);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000529
530 ret = sh_eth_check_reset(ndev);
531 if (ret)
532 goto out;
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000533
534 /* Table Init */
535 sh_eth_write(ndev, 0x0, TDLAR);
536 sh_eth_write(ndev, 0x0, TDFAR);
537 sh_eth_write(ndev, 0x0, TDFXR);
538 sh_eth_write(ndev, 0x0, TDFFR);
539 sh_eth_write(ndev, 0x0, RDLAR);
540 sh_eth_write(ndev, 0x0, RDFAR);
541 sh_eth_write(ndev, 0x0, RDFXR);
542 sh_eth_write(ndev, 0x0, RDFFR);
543 } else {
544 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
545 EDMR);
546 mdelay(3);
547 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
548 EDMR);
549 }
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000550
551out:
552 return ret;
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000553}
554
555static void sh_eth_set_duplex_giga(struct net_device *ndev)
556{
557 struct sh_eth_private *mdp = netdev_priv(ndev);
558
559 if (mdp->duplex) /* Full */
560 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
561 else /* Half */
562 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
563}
564
565static void sh_eth_set_rate_giga(struct net_device *ndev)
566{
567 struct sh_eth_private *mdp = netdev_priv(ndev);
568
569 switch (mdp->speed) {
570 case 10: /* 10BASE */
571 sh_eth_write(ndev, 0x00000000, GECMR);
572 break;
573 case 100:/* 100BASE */
574 sh_eth_write(ndev, 0x00000010, GECMR);
575 break;
576 case 1000: /* 1000BASE */
577 sh_eth_write(ndev, 0x00000020, GECMR);
578 break;
579 default:
580 break;
581 }
582}
583
584/* SH7757(GETHERC) */
585static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
586 .chip_reset = sh_eth_chip_reset_giga,
587 .set_duplex = sh_eth_set_duplex_giga,
588 .set_rate = sh_eth_set_rate_giga,
589
590 .ecsr_value = ECSR_ICD | ECSR_MPD,
591 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
592 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
593
594 .tx_check = EESR_TC1 | EESR_FTC,
595 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
596 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
597 EESR_ECI,
598 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
599 EESR_TFE,
600 .fdr_value = 0x0000072f,
601 .rmcr_value = 0x00000001,
602
603 .apr = 1,
604 .mpr = 1,
605 .tpauser = 1,
606 .bculr = 1,
607 .hw_swap = 1,
608 .rpadir = 1,
609 .rpadir_value = 2 << 16,
610 .no_trimd = 1,
611 .no_ade = 1,
Yoshihiro Shimoda3acbc972012-02-15 17:54:51 +0000612 .tsu = 1,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000613};
614
615static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
616{
617 if (sh_eth_is_gether(mdp))
618 return &sh_eth_my_cpu_data_giga;
619 else
620 return &sh_eth_my_cpu_data;
621}
622
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +0000623#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000624#define SH_ETH_HAS_TSU 1
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000625static int sh_eth_check_reset(struct net_device *ndev);
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +0000626static void sh_eth_reset_hw_crc(struct net_device *ndev);
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000627
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000628static void sh_eth_chip_reset(struct net_device *ndev)
629{
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000630 struct sh_eth_private *mdp = netdev_priv(ndev);
631
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000632 /* reset device */
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000633 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000634 mdelay(1);
635}
636
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000637static void sh_eth_set_duplex(struct net_device *ndev)
638{
639 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000640
641 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000642 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000643 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000644 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000645}
646
647static void sh_eth_set_rate(struct net_device *ndev)
648{
649 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000650
651 switch (mdp->speed) {
652 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000653 sh_eth_write(ndev, GECMR_10, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000654 break;
655 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000656 sh_eth_write(ndev, GECMR_100, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000657 break;
658 case 1000: /* 1000BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000659 sh_eth_write(ndev, GECMR_1000, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000660 break;
661 default:
662 break;
663 }
664}
665
666/* sh7763 */
667static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
668 .chip_reset = sh_eth_chip_reset,
669 .set_duplex = sh_eth_set_duplex,
670 .set_rate = sh_eth_set_rate,
671
672 .ecsr_value = ECSR_ICD | ECSR_MPD,
673 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
674 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
675
676 .tx_check = EESR_TC1 | EESR_FTC,
677 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
678 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
679 EESR_ECI,
680 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
681 EESR_TFE,
682
683 .apr = 1,
684 .mpr = 1,
685 .tpauser = 1,
686 .bculr = 1,
687 .hw_swap = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000688 .no_trimd = 1,
689 .no_ade = 1,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000690 .tsu = 1,
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +0000691#if defined(CONFIG_CPU_SUBTYPE_SH7734)
692 .hw_crc = 1,
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000693 .select_mii = 1,
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +0000694#endif
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000695};
696
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000697static int sh_eth_reset(struct net_device *ndev)
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000698{
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000699 int ret = 0;
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000700
701 sh_eth_write(ndev, EDSR_ENALL, EDSR);
702 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000703
704 ret = sh_eth_check_reset(ndev);
705 if (ret)
706 goto out;
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000707
708 /* Table Init */
709 sh_eth_write(ndev, 0x0, TDLAR);
710 sh_eth_write(ndev, 0x0, TDFAR);
711 sh_eth_write(ndev, 0x0, TDFXR);
712 sh_eth_write(ndev, 0x0, TDFFR);
713 sh_eth_write(ndev, 0x0, RDLAR);
714 sh_eth_write(ndev, 0x0, RDFAR);
715 sh_eth_write(ndev, 0x0, RDFXR);
716 sh_eth_write(ndev, 0x0, RDFFR);
717
718 /* Reset HW CRC register */
719 sh_eth_reset_hw_crc(ndev);
720
721 /* Select MII mode */
722 if (sh_eth_my_cpu_data.select_mii)
723 sh_eth_select_mii(ndev);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000724out:
725 return ret;
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000726}
727
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +0000728static void sh_eth_reset_hw_crc(struct net_device *ndev)
729{
730 if (sh_eth_my_cpu_data.hw_crc)
731 sh_eth_write(ndev, 0x0, CSMR);
732}
733
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000734#elif defined(CONFIG_ARCH_R8A7740)
735#define SH_ETH_HAS_TSU 1
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000736static int sh_eth_check_reset(struct net_device *ndev);
737
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000738static void sh_eth_chip_reset(struct net_device *ndev)
739{
740 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000741
742 /* reset device */
743 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
744 mdelay(1);
745
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000746 sh_eth_select_mii(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000747}
748
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000749static int sh_eth_reset(struct net_device *ndev)
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000750{
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000751 int ret = 0;
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000752
753 sh_eth_write(ndev, EDSR_ENALL, EDSR);
754 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000755
756 ret = sh_eth_check_reset(ndev);
757 if (ret)
758 goto out;
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000759
760 /* Table Init */
761 sh_eth_write(ndev, 0x0, TDLAR);
762 sh_eth_write(ndev, 0x0, TDFAR);
763 sh_eth_write(ndev, 0x0, TDFXR);
764 sh_eth_write(ndev, 0x0, TDFFR);
765 sh_eth_write(ndev, 0x0, RDLAR);
766 sh_eth_write(ndev, 0x0, RDFAR);
767 sh_eth_write(ndev, 0x0, RDFXR);
768 sh_eth_write(ndev, 0x0, RDFFR);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000769
770out:
771 return ret;
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000772}
773
774static void sh_eth_set_duplex(struct net_device *ndev)
775{
776 struct sh_eth_private *mdp = netdev_priv(ndev);
777
778 if (mdp->duplex) /* Full */
779 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
780 else /* Half */
781 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
782}
783
784static void sh_eth_set_rate(struct net_device *ndev)
785{
786 struct sh_eth_private *mdp = netdev_priv(ndev);
787
788 switch (mdp->speed) {
789 case 10: /* 10BASE */
790 sh_eth_write(ndev, GECMR_10, GECMR);
791 break;
792 case 100:/* 100BASE */
793 sh_eth_write(ndev, GECMR_100, GECMR);
794 break;
795 case 1000: /* 1000BASE */
796 sh_eth_write(ndev, GECMR_1000, GECMR);
797 break;
798 default:
799 break;
800 }
801}
802
803/* R8A7740 */
804static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
805 .chip_reset = sh_eth_chip_reset,
806 .set_duplex = sh_eth_set_duplex,
807 .set_rate = sh_eth_set_rate,
808
809 .ecsr_value = ECSR_ICD | ECSR_MPD,
810 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
811 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
812
813 .tx_check = EESR_TC1 | EESR_FTC,
814 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
815 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
816 EESR_ECI,
817 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
818 EESR_TFE,
819
820 .apr = 1,
821 .mpr = 1,
822 .tpauser = 1,
823 .bculr = 1,
824 .hw_swap = 1,
825 .no_trimd = 1,
826 .no_ade = 1,
827 .tsu = 1,
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000828 .select_mii = 1,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000829};
830
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000831#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
832#define SH_ETH_RESET_DEFAULT 1
833static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
834 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
835
836 .apr = 1,
837 .mpr = 1,
838 .tpauser = 1,
839 .hw_swap = 1,
840};
841#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
842#define SH_ETH_RESET_DEFAULT 1
843#define SH_ETH_HAS_TSU 1
844static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
845 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000846 .tsu = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000847};
848#endif
849
850static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
851{
852 if (!cd->ecsr_value)
853 cd->ecsr_value = DEFAULT_ECSR_INIT;
854
855 if (!cd->ecsipr_value)
856 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
857
858 if (!cd->fcftr_value)
859 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
860 DEFAULT_FIFO_F_D_RFD;
861
862 if (!cd->fdr_value)
863 cd->fdr_value = DEFAULT_FDR_INIT;
864
865 if (!cd->rmcr_value)
866 cd->rmcr_value = DEFAULT_RMCR_VALUE;
867
868 if (!cd->tx_check)
869 cd->tx_check = DEFAULT_TX_CHECK;
870
871 if (!cd->eesr_err_check)
872 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
873
874 if (!cd->tx_error_check)
875 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
876}
877
878#if defined(SH_ETH_RESET_DEFAULT)
879/* Chip Reset */
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000880static int sh_eth_reset(struct net_device *ndev)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000881{
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000882 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000883 mdelay(3);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000884 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000885
886 return 0;
887}
888#else
889static int sh_eth_check_reset(struct net_device *ndev)
890{
891 int ret = 0;
892 int cnt = 100;
893
894 while (cnt > 0) {
895 if (!(sh_eth_read(ndev, EDMR) & 0x3))
896 break;
897 mdelay(1);
898 cnt--;
899 }
900 if (cnt < 0) {
Nobuhiro Iwamatsu14c33262013-03-20 22:46:55 +0000901 pr_err("Device reset fail\n");
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000902 ret = -ETIMEDOUT;
903 }
904 return ret;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000905}
906#endif
907
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000908#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000909static void sh_eth_set_receive_align(struct sk_buff *skb)
910{
911 int reserve;
912
913 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
914 if (reserve)
915 skb_reserve(skb, reserve);
916}
917#else
918static void sh_eth_set_receive_align(struct sk_buff *skb)
919{
920 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
921}
922#endif
923
924
Yoshinori Sato71557a32008-08-06 19:49:00 -0400925/* CPU <-> EDMAC endian convert */
926static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
927{
928 switch (mdp->edmac_endian) {
929 case EDMAC_LITTLE_ENDIAN:
930 return cpu_to_le32(x);
931 case EDMAC_BIG_ENDIAN:
932 return cpu_to_be32(x);
933 }
934 return x;
935}
936
937static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
938{
939 switch (mdp->edmac_endian) {
940 case EDMAC_LITTLE_ENDIAN:
941 return le32_to_cpu(x);
942 case EDMAC_BIG_ENDIAN:
943 return be32_to_cpu(x);
944 }
945 return x;
946}
947
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700948/*
949 * Program the hardware MAC address from dev->dev_addr.
950 */
951static void update_mac_address(struct net_device *ndev)
952{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000953 sh_eth_write(ndev,
954 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
955 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
956 sh_eth_write(ndev,
957 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700958}
959
960/*
961 * Get MAC address from SuperH MAC address register
962 *
963 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
964 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
965 * When you want use this device, you must set MAC address in bootloader.
966 *
967 */
Magnus Damm748031f2009-10-09 00:17:14 +0000968static void read_mac_address(struct net_device *ndev, unsigned char *mac)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700969{
Magnus Damm748031f2009-10-09 00:17:14 +0000970 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
971 memcpy(ndev->dev_addr, mac, 6);
972 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000973 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
974 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
975 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
976 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
977 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
978 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
Magnus Damm748031f2009-10-09 00:17:14 +0000979 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700980}
981
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000982static int sh_eth_is_gether(struct sh_eth_private *mdp)
983{
984 if (mdp->reg_offset == sh_eth_offset_gigabit)
985 return 1;
986 else
987 return 0;
988}
989
990static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
991{
992 if (sh_eth_is_gether(mdp))
993 return EDTRR_TRNS_GETHER;
994 else
995 return EDTRR_TRNS_ETHER;
996}
997
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700998struct bb_info {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000999 void (*set_gate)(void *addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001000 struct mdiobb_ctrl ctrl;
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001001 void *addr;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001002 u32 mmd_msk;/* MMD */
1003 u32 mdo_msk;
1004 u32 mdi_msk;
1005 u32 mdc_msk;
1006};
1007
1008/* PHY bit set */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001009static void bb_set(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001010{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001011 iowrite32(ioread32(addr) | msk, addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001012}
1013
1014/* PHY bit clear */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001015static void bb_clr(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001016{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001017 iowrite32((ioread32(addr) & ~msk), addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001018}
1019
1020/* PHY bit read */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001021static int bb_read(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001022{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001023 return (ioread32(addr) & msk) != 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001024}
1025
1026/* Data I/O pin control */
1027static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1028{
1029 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001030
1031 if (bitbang->set_gate)
1032 bitbang->set_gate(bitbang->addr);
1033
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001034 if (bit)
1035 bb_set(bitbang->addr, bitbang->mmd_msk);
1036 else
1037 bb_clr(bitbang->addr, bitbang->mmd_msk);
1038}
1039
1040/* Set bit data*/
1041static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1042{
1043 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1044
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001045 if (bitbang->set_gate)
1046 bitbang->set_gate(bitbang->addr);
1047
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001048 if (bit)
1049 bb_set(bitbang->addr, bitbang->mdo_msk);
1050 else
1051 bb_clr(bitbang->addr, bitbang->mdo_msk);
1052}
1053
1054/* Get bit data*/
1055static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1056{
1057 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001058
1059 if (bitbang->set_gate)
1060 bitbang->set_gate(bitbang->addr);
1061
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001062 return bb_read(bitbang->addr, bitbang->mdi_msk);
1063}
1064
1065/* MDC pin control */
1066static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1067{
1068 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1069
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001070 if (bitbang->set_gate)
1071 bitbang->set_gate(bitbang->addr);
1072
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001073 if (bit)
1074 bb_set(bitbang->addr, bitbang->mdc_msk);
1075 else
1076 bb_clr(bitbang->addr, bitbang->mdc_msk);
1077}
1078
1079/* mdio bus control struct */
1080static struct mdiobb_ops bb_ops = {
1081 .owner = THIS_MODULE,
1082 .set_mdc = sh_mdc_ctrl,
1083 .set_mdio_dir = sh_mmd_ctrl,
1084 .set_mdio_data = sh_set_mdio,
1085 .get_mdio_data = sh_get_mdio,
1086};
1087
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001088/* free skb and descriptor buffer */
1089static void sh_eth_ring_free(struct net_device *ndev)
1090{
1091 struct sh_eth_private *mdp = netdev_priv(ndev);
1092 int i;
1093
1094 /* Free Rx skb ringbuffer */
1095 if (mdp->rx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001096 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001097 if (mdp->rx_skbuff[i])
1098 dev_kfree_skb(mdp->rx_skbuff[i]);
1099 }
1100 }
1101 kfree(mdp->rx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001102 mdp->rx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001103
1104 /* Free Tx skb ringbuffer */
1105 if (mdp->tx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001106 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001107 if (mdp->tx_skbuff[i])
1108 dev_kfree_skb(mdp->tx_skbuff[i]);
1109 }
1110 }
1111 kfree(mdp->tx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001112 mdp->tx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001113}
1114
1115/* format skb and descriptor buffer */
1116static void sh_eth_ring_format(struct net_device *ndev)
1117{
1118 struct sh_eth_private *mdp = netdev_priv(ndev);
1119 int i;
1120 struct sk_buff *skb;
1121 struct sh_eth_rxdesc *rxdesc = NULL;
1122 struct sh_eth_txdesc *txdesc = NULL;
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001125
1126 mdp->cur_rx = mdp->cur_tx = 0;
1127 mdp->dirty_rx = mdp->dirty_tx = 0;
1128
1129 memset(mdp->rx_ring, 0, rx_ringsize);
1130
1131 /* build Rx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001132 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001133 /* skb */
1134 mdp->rx_skbuff[i] = NULL;
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001135 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001136 mdp->rx_skbuff[i] = skb;
1137 if (skb == NULL)
1138 break;
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001139 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Yoshihiro Shimodae88aae72009-05-24 23:52:35 +00001140 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001141 sh_eth_set_receive_align(skb);
1142
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001143 /* RX descriptor */
1144 rxdesc = &mdp->rx_ring[i];
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001145 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Yoshinori Sato71557a32008-08-06 19:49:00 -04001146 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001147
1148 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001149 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001150 /* Rx descriptor address set */
1151 if (i == 0) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001152 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001153 if (sh_eth_is_gether(mdp))
1154 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001155 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001156 }
1157
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001158 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001159
1160 /* Mark the last entry as wrapping the ring. */
Yoshinori Sato71557a32008-08-06 19:49:00 -04001161 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001162
1163 memset(mdp->tx_ring, 0, tx_ringsize);
1164
1165 /* build Tx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001166 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001167 mdp->tx_skbuff[i] = NULL;
1168 txdesc = &mdp->tx_ring[i];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001169 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001170 txdesc->buffer_length = 0;
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001171 if (i == 0) {
Yoshinori Sato71557a32008-08-06 19:49:00 -04001172 /* Tx descriptor address set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001173 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001174 if (sh_eth_is_gether(mdp))
1175 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001176 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001177 }
1178
Yoshinori Sato71557a32008-08-06 19:49:00 -04001179 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001180}
1181
1182/* Get skb and descriptor buffer */
1183static int sh_eth_ring_init(struct net_device *ndev)
1184{
1185 struct sh_eth_private *mdp = netdev_priv(ndev);
1186 int rx_ringsize, tx_ringsize, ret = 0;
1187
1188 /*
1189 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1190 * card needs room to do 8 byte alignment, +2 so we can reserve
1191 * the first 2 bytes, and +16 gets room for the status word from the
1192 * card.
1193 */
1194 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1195 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
Magnus Damm503914c2009-12-15 21:16:55 -08001196 if (mdp->cd->rpadir)
1197 mdp->rx_buf_sz += NET_IP_ALIGN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001198
1199 /* Allocate RX and TX skb rings */
Joe Perchesb2adaca2013-02-03 17:43:58 +00001200 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1201 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001202 if (!mdp->rx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001203 ret = -ENOMEM;
1204 return ret;
1205 }
1206
Joe Perchesb2adaca2013-02-03 17:43:58 +00001207 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1208 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001209 if (!mdp->tx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001210 ret = -ENOMEM;
1211 goto skb_ring_free;
1212 }
1213
1214 /* Allocate all Rx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001215 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001216 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001217 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001218 if (!mdp->rx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001219 ret = -ENOMEM;
1220 goto desc_ring_free;
1221 }
1222
1223 mdp->dirty_rx = 0;
1224
1225 /* Allocate all Tx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001226 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001227 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001228 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001229 if (!mdp->tx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001230 ret = -ENOMEM;
1231 goto desc_ring_free;
1232 }
1233 return ret;
1234
1235desc_ring_free:
1236 /* free DMA buffer */
1237 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1238
1239skb_ring_free:
1240 /* Free Rx and Tx skb ring buffer */
1241 sh_eth_ring_free(ndev);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001242 mdp->tx_ring = NULL;
1243 mdp->rx_ring = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001244
1245 return ret;
1246}
1247
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001248static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1249{
1250 int ringsize;
1251
1252 if (mdp->rx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001253 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001254 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1255 mdp->rx_desc_dma);
1256 mdp->rx_ring = NULL;
1257 }
1258
1259 if (mdp->tx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001260 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001261 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1262 mdp->tx_desc_dma);
1263 mdp->tx_ring = NULL;
1264 }
1265}
1266
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001267static int sh_eth_dev_init(struct net_device *ndev, bool start)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001268{
1269 int ret = 0;
1270 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001271 u32 val;
1272
1273 /* Soft Reset */
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001274 ret = sh_eth_reset(ndev);
1275 if (ret)
1276 goto out;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001277
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001278 /* Descriptor format */
1279 sh_eth_ring_format(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001280 if (mdp->cd->rpadir)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001281 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001282
1283 /* all sh_eth int mask */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001284 sh_eth_write(ndev, 0, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001285
Yoshihiro Shimoda10b91942012-03-29 19:32:08 +00001286#if defined(__LITTLE_ENDIAN)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001287 if (mdp->cd->hw_swap)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001288 sh_eth_write(ndev, EDMR_EL, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001289 else
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001290#endif
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001291 sh_eth_write(ndev, 0, EDMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001292
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001293 /* FIFO size set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001294 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1295 sh_eth_write(ndev, 0, TFTR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001296
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001297 /* Frame recv control */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001298 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001299
Yoshihiro Shimoda2ecbb782012-06-26 19:59:58 +00001300 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001301
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001302 if (mdp->cd->bculr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001303 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001304
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001305 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001306
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001307 if (!mdp->cd->no_trimd)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001308 sh_eth_write(ndev, 0, TRIMD);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001309
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001310 /* Recv frame limit set register */
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +00001311 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1312 RFLR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001313
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001314 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001315 if (start)
1316 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001317
1318 /* PAUSE Prohibition */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001319 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001320 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1321
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001322 sh_eth_write(ndev, val, ECMR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001323
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001324 if (mdp->cd->set_rate)
1325 mdp->cd->set_rate(ndev);
1326
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001327 /* E-MAC Status Register clear */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001328 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001329
1330 /* E-MAC Interrupt Enable register */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001331 if (start)
1332 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001333
1334 /* Set MAC address */
1335 update_mac_address(ndev);
1336
1337 /* mask reset */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001338 if (mdp->cd->apr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001339 sh_eth_write(ndev, APR_AP, APR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001340 if (mdp->cd->mpr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001341 sh_eth_write(ndev, MPR_MP, MPR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001342 if (mdp->cd->tpauser)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001343 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001344
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001345 if (start) {
1346 /* Setting the Rx mode will start the Rx process. */
1347 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001348
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001349 netif_start_queue(ndev);
1350 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001351
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001352out:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001353 return ret;
1354}
1355
1356/* free Tx skb function */
1357static int sh_eth_txfree(struct net_device *ndev)
1358{
1359 struct sh_eth_private *mdp = netdev_priv(ndev);
1360 struct sh_eth_txdesc *txdesc;
1361 int freeNum = 0;
1362 int entry = 0;
1363
1364 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001365 entry = mdp->dirty_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001366 txdesc = &mdp->tx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001367 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001368 break;
1369 /* Free the original skb. */
1370 if (mdp->tx_skbuff[entry]) {
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00001371 dma_unmap_single(&ndev->dev, txdesc->addr,
1372 txdesc->buffer_length, DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001373 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1374 mdp->tx_skbuff[entry] = NULL;
1375 freeNum++;
1376 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001377 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001378 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001379 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001380
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001381 ndev->stats.tx_packets++;
1382 ndev->stats.tx_bytes += txdesc->buffer_length;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001383 }
1384 return freeNum;
1385}
1386
1387/* Packet receive function */
Yoshihiro Shimodaa18e08b2012-06-20 15:26:34 +00001388static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001389{
1390 struct sh_eth_private *mdp = netdev_priv(ndev);
1391 struct sh_eth_rxdesc *rxdesc;
1392
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001393 int entry = mdp->cur_rx % mdp->num_rx_ring;
1394 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001395 struct sk_buff *skb;
1396 u16 pkt_len = 0;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001397 u32 desc_status;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001398
1399 rxdesc = &mdp->rx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001400 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1401 desc_status = edmac_to_cpu(mdp, rxdesc->status);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001402 pkt_len = rxdesc->frame_length;
1403
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +00001404#if defined(CONFIG_ARCH_R8A7740)
1405 desc_status >>= 16;
1406#endif
1407
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001408 if (--boguscnt < 0)
1409 break;
1410
1411 if (!(desc_status & RDFEND))
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001412 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001413
1414 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1415 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001416 ndev->stats.rx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001417 if (desc_status & RD_RFS1)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001418 ndev->stats.rx_crc_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001419 if (desc_status & RD_RFS2)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001420 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001421 if (desc_status & RD_RFS3)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001422 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001423 if (desc_status & RD_RFS4)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001424 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001425 if (desc_status & RD_RFS6)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001426 ndev->stats.rx_missed_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001427 if (desc_status & RD_RFS10)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001428 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001429 } else {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001430 if (!mdp->cd->hw_swap)
1431 sh_eth_soft_swap(
1432 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1433 pkt_len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001434 skb = mdp->rx_skbuff[entry];
1435 mdp->rx_skbuff[entry] = NULL;
Magnus Damm503914c2009-12-15 21:16:55 -08001436 if (mdp->cd->rpadir)
1437 skb_reserve(skb, NET_IP_ALIGN);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001438 skb_put(skb, pkt_len);
1439 skb->protocol = eth_type_trans(skb, ndev);
1440 netif_rx(skb);
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001441 ndev->stats.rx_packets++;
1442 ndev->stats.rx_bytes += pkt_len;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001443 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001444 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001445 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
Yoshihiro Shimoda862df492009-05-24 23:53:40 +00001446 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001447 }
1448
1449 /* Refill the Rx ring buffers. */
1450 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001451 entry = mdp->dirty_rx % mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001452 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001453 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001454 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001455
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001456 if (mdp->rx_skbuff[entry] == NULL) {
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001457 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001458 mdp->rx_skbuff[entry] = skb;
1459 if (skb == NULL)
1460 break; /* Better luck next round. */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001461 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Yoshihiro Shimodae88aae72009-05-24 23:52:35 +00001462 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001463 sh_eth_set_receive_align(skb);
1464
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001465 skb_checksum_none_assert(skb);
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001466 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001467 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001468 if (entry >= mdp->num_rx_ring - 1)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001469 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001470 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001471 else
1472 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001473 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001474 }
1475
1476 /* Restart Rx engine if stopped. */
1477 /* If we don't need to check status, don't. -KDU */
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001478 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
Yoshihiro Shimodaa18e08b2012-06-20 15:26:34 +00001479 /* fix the values for the next receiving if RDE is set */
1480 if (intr_status & EESR_RDE)
1481 mdp->cur_rx = mdp->dirty_rx =
1482 (sh_eth_read(ndev, RDFAR) -
1483 sh_eth_read(ndev, RDLAR)) >> 4;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001484 sh_eth_write(ndev, EDRRR_R, EDRRR);
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001485 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001486
1487 return 0;
1488}
1489
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001490static void sh_eth_rcv_snd_disable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001491{
1492 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001493 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1494 ~(ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001495}
1496
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001497static void sh_eth_rcv_snd_enable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001498{
1499 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001500 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1501 (ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001502}
1503
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001504/* error control function */
1505static void sh_eth_error(struct net_device *ndev, int intr_status)
1506{
1507 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001508 u32 felic_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001509 u32 link_stat;
1510 u32 mask;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001511
1512 if (intr_status & EESR_ECI) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001513 felic_stat = sh_eth_read(ndev, ECSR);
1514 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001515 if (felic_stat & ECSR_ICD)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001516 ndev->stats.tx_carrier_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001517 if (felic_stat & ECSR_LCHNG) {
1518 /* Link Changed */
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001519 if (mdp->cd->no_psr || mdp->no_ether_link) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001520 if (mdp->link == PHY_DOWN)
1521 link_stat = 0;
1522 else
1523 link_stat = PHY_ST_LINK;
1524 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001525 link_stat = (sh_eth_read(ndev, PSR));
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001526 if (mdp->ether_link_active_low)
1527 link_stat = ~link_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001528 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001529 if (!(link_stat & PHY_ST_LINK))
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001530 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001531 else {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001532 /* Link Up */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001533 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1534 ~DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001535 /*clear int */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001536 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1537 ECSR);
1538 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1539 DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001540 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001541 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001542 }
1543 }
1544 }
1545
1546 if (intr_status & EESR_TWB) {
1547 /* Write buck end. unused write back interrupt */
1548 if (intr_status & EESR_TABT) /* Transmit Abort int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001549 ndev->stats.tx_aborted_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001550 if (netif_msg_tx_err(mdp))
1551 dev_err(&ndev->dev, "Transmit Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001552 }
1553
1554 if (intr_status & EESR_RABT) {
1555 /* Receive Abort int */
1556 if (intr_status & EESR_RFRMER) {
1557 /* Receive Frame Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001558 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001559 if (netif_msg_rx_err(mdp))
1560 dev_err(&ndev->dev, "Receive Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001561 }
1562 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001563
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001564 if (intr_status & EESR_TDE) {
1565 /* Transmit Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001566 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001567 if (netif_msg_tx_err(mdp))
1568 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1569 }
1570
1571 if (intr_status & EESR_TFE) {
1572 /* FIFO under flow */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001573 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001574 if (netif_msg_tx_err(mdp))
1575 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001576 }
1577
1578 if (intr_status & EESR_RDE) {
1579 /* Receive Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001580 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001581
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001582 if (netif_msg_rx_err(mdp))
1583 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001584 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001585
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001586 if (intr_status & EESR_RFE) {
1587 /* Receive FIFO Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001588 ndev->stats.rx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001589 if (netif_msg_rx_err(mdp))
1590 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1591 }
1592
1593 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1594 /* Address Error */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001595 ndev->stats.tx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001596 if (netif_msg_tx_err(mdp))
1597 dev_err(&ndev->dev, "Address Error\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001598 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001599
1600 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1601 if (mdp->cd->no_ade)
1602 mask &= ~EESR_ADE;
1603 if (intr_status & mask) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001604 /* Tx error */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001605 u32 edtrr = sh_eth_read(ndev, EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001606 /* dmesg */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001607 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1608 intr_status, mdp->cur_tx);
1609 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001610 mdp->dirty_tx, (u32) ndev->state, edtrr);
1611 /* dirty buffer free */
1612 sh_eth_txfree(ndev);
1613
1614 /* SH7712 BUG */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001615 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001616 /* tx dma start */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001617 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001618 }
1619 /* wakeup */
1620 netif_wake_queue(ndev);
1621 }
1622}
1623
1624static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1625{
1626 struct net_device *ndev = netdev;
1627 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001628 struct sh_eth_cpu_data *cd = mdp->cd;
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001629 irqreturn_t ret = IRQ_NONE;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001630 u32 intr_status = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001631
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001632 spin_lock(&mdp->lock);
1633
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001634 /* Get interrpt stat */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001635 intr_status = sh_eth_read(ndev, EESR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001636 /* Clear interrupt */
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001637 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1638 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001639 cd->tx_check | cd->eesr_err_check)) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001640 sh_eth_write(ndev, intr_status, EESR);
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001641 ret = IRQ_HANDLED;
1642 } else
1643 goto other_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001644
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001645 if (intr_status & (EESR_FRC | /* Frame recv*/
1646 EESR_RMAF | /* Multi cast address recv*/
1647 EESR_RRF | /* Bit frame recv */
1648 EESR_RTLF | /* Long frame recv*/
1649 EESR_RTSF | /* short frame recv */
1650 EESR_PRE | /* PHY-LSI recv error */
1651 EESR_CERF)){ /* recv frame CRC error */
Yoshihiro Shimodaa18e08b2012-06-20 15:26:34 +00001652 sh_eth_rx(ndev, intr_status);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001653 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001654
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001655 /* Tx Check */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001656 if (intr_status & cd->tx_check) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001657 sh_eth_txfree(ndev);
1658 netif_wake_queue(ndev);
1659 }
1660
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001661 if (intr_status & cd->eesr_err_check)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001662 sh_eth_error(ndev, intr_status);
1663
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001664other_irq:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001665 spin_unlock(&mdp->lock);
1666
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001667 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001668}
1669
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001670/* PHY state control function */
1671static void sh_eth_adjust_link(struct net_device *ndev)
1672{
1673 struct sh_eth_private *mdp = netdev_priv(ndev);
1674 struct phy_device *phydev = mdp->phydev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001675 int new_state = 0;
1676
1677 if (phydev->link != PHY_DOWN) {
1678 if (phydev->duplex != mdp->duplex) {
1679 new_state = 1;
1680 mdp->duplex = phydev->duplex;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001681 if (mdp->cd->set_duplex)
1682 mdp->cd->set_duplex(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001683 }
1684
1685 if (phydev->speed != mdp->speed) {
1686 new_state = 1;
1687 mdp->speed = phydev->speed;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001688 if (mdp->cd->set_rate)
1689 mdp->cd->set_rate(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001690 }
1691 if (mdp->link == PHY_DOWN) {
Yoshihiro Shimoda91a56152011-07-05 20:33:51 +00001692 sh_eth_write(ndev,
1693 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001694 new_state = 1;
1695 mdp->link = phydev->link;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001696 }
1697 } else if (mdp->link) {
1698 new_state = 1;
1699 mdp->link = PHY_DOWN;
1700 mdp->speed = 0;
1701 mdp->duplex = -1;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001702 }
1703
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001704 if (new_state && netif_msg_link(mdp))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001705 phy_print_status(phydev);
1706}
1707
1708/* PHY init function */
1709static int sh_eth_phy_init(struct net_device *ndev)
1710{
1711 struct sh_eth_private *mdp = netdev_priv(ndev);
David S. Miller0a372eb2009-05-26 21:11:09 -07001712 char phy_id[MII_BUS_ID_SIZE + 3];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001713 struct phy_device *phydev = NULL;
1714
Kay Sieversfb28ad32008-11-10 13:55:14 -08001715 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001716 mdp->mii_bus->id , mdp->phy_id);
1717
1718 mdp->link = PHY_DOWN;
1719 mdp->speed = 0;
1720 mdp->duplex = -1;
1721
1722 /* Try connect to PHY */
Joe Perchesc061b182010-08-23 18:20:03 +00001723 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
Florian Fainellif9a8f832013-01-14 00:52:52 +00001724 mdp->phy_interface);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001725 if (IS_ERR(phydev)) {
1726 dev_err(&ndev->dev, "phy_connect failed\n");
1727 return PTR_ERR(phydev);
1728 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001729
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001730 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001731 phydev->addr, phydev->drv->name);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001732
1733 mdp->phydev = phydev;
1734
1735 return 0;
1736}
1737
1738/* PHY control start function */
1739static int sh_eth_phy_start(struct net_device *ndev)
1740{
1741 struct sh_eth_private *mdp = netdev_priv(ndev);
1742 int ret;
1743
1744 ret = sh_eth_phy_init(ndev);
1745 if (ret)
1746 return ret;
1747
1748 /* reset phy - this also wakes it from PDOWN */
1749 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1750 phy_start(mdp->phydev);
1751
1752 return 0;
1753}
1754
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001755static int sh_eth_get_settings(struct net_device *ndev,
1756 struct ethtool_cmd *ecmd)
1757{
1758 struct sh_eth_private *mdp = netdev_priv(ndev);
1759 unsigned long flags;
1760 int ret;
1761
1762 spin_lock_irqsave(&mdp->lock, flags);
1763 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1764 spin_unlock_irqrestore(&mdp->lock, flags);
1765
1766 return ret;
1767}
1768
1769static int sh_eth_set_settings(struct net_device *ndev,
1770 struct ethtool_cmd *ecmd)
1771{
1772 struct sh_eth_private *mdp = netdev_priv(ndev);
1773 unsigned long flags;
1774 int ret;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001775
1776 spin_lock_irqsave(&mdp->lock, flags);
1777
1778 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001779 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001780
1781 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1782 if (ret)
1783 goto error_exit;
1784
1785 if (ecmd->duplex == DUPLEX_FULL)
1786 mdp->duplex = 1;
1787 else
1788 mdp->duplex = 0;
1789
1790 if (mdp->cd->set_duplex)
1791 mdp->cd->set_duplex(ndev);
1792
1793error_exit:
1794 mdelay(1);
1795
1796 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001797 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001798
1799 spin_unlock_irqrestore(&mdp->lock, flags);
1800
1801 return ret;
1802}
1803
1804static int sh_eth_nway_reset(struct net_device *ndev)
1805{
1806 struct sh_eth_private *mdp = netdev_priv(ndev);
1807 unsigned long flags;
1808 int ret;
1809
1810 spin_lock_irqsave(&mdp->lock, flags);
1811 ret = phy_start_aneg(mdp->phydev);
1812 spin_unlock_irqrestore(&mdp->lock, flags);
1813
1814 return ret;
1815}
1816
1817static u32 sh_eth_get_msglevel(struct net_device *ndev)
1818{
1819 struct sh_eth_private *mdp = netdev_priv(ndev);
1820 return mdp->msg_enable;
1821}
1822
1823static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1824{
1825 struct sh_eth_private *mdp = netdev_priv(ndev);
1826 mdp->msg_enable = value;
1827}
1828
1829static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1830 "rx_current", "tx_current",
1831 "rx_dirty", "tx_dirty",
1832};
1833#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1834
1835static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1836{
1837 switch (sset) {
1838 case ETH_SS_STATS:
1839 return SH_ETH_STATS_LEN;
1840 default:
1841 return -EOPNOTSUPP;
1842 }
1843}
1844
1845static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1846 struct ethtool_stats *stats, u64 *data)
1847{
1848 struct sh_eth_private *mdp = netdev_priv(ndev);
1849 int i = 0;
1850
1851 /* device-specific stats */
1852 data[i++] = mdp->cur_rx;
1853 data[i++] = mdp->cur_tx;
1854 data[i++] = mdp->dirty_rx;
1855 data[i++] = mdp->dirty_tx;
1856}
1857
1858static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1859{
1860 switch (stringset) {
1861 case ETH_SS_STATS:
1862 memcpy(data, *sh_eth_gstrings_stats,
1863 sizeof(sh_eth_gstrings_stats));
1864 break;
1865 }
1866}
1867
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001868static void sh_eth_get_ringparam(struct net_device *ndev,
1869 struct ethtool_ringparam *ring)
1870{
1871 struct sh_eth_private *mdp = netdev_priv(ndev);
1872
1873 ring->rx_max_pending = RX_RING_MAX;
1874 ring->tx_max_pending = TX_RING_MAX;
1875 ring->rx_pending = mdp->num_rx_ring;
1876 ring->tx_pending = mdp->num_tx_ring;
1877}
1878
1879static int sh_eth_set_ringparam(struct net_device *ndev,
1880 struct ethtool_ringparam *ring)
1881{
1882 struct sh_eth_private *mdp = netdev_priv(ndev);
1883 int ret;
1884
1885 if (ring->tx_pending > TX_RING_MAX ||
1886 ring->rx_pending > RX_RING_MAX ||
1887 ring->tx_pending < TX_RING_MIN ||
1888 ring->rx_pending < RX_RING_MIN)
1889 return -EINVAL;
1890 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1891 return -EINVAL;
1892
1893 if (netif_running(ndev)) {
1894 netif_tx_disable(ndev);
1895 /* Disable interrupts by clearing the interrupt mask. */
1896 sh_eth_write(ndev, 0x0000, EESIPR);
1897 /* Stop the chip's Tx and Rx processes. */
1898 sh_eth_write(ndev, 0, EDTRR);
1899 sh_eth_write(ndev, 0, EDRRR);
1900 synchronize_irq(ndev->irq);
1901 }
1902
1903 /* Free all the skbuffs in the Rx queue. */
1904 sh_eth_ring_free(ndev);
1905 /* Free DMA buffer */
1906 sh_eth_free_dma_buffer(mdp);
1907
1908 /* Set new parameters */
1909 mdp->num_rx_ring = ring->rx_pending;
1910 mdp->num_tx_ring = ring->tx_pending;
1911
1912 ret = sh_eth_ring_init(ndev);
1913 if (ret < 0) {
1914 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1915 return ret;
1916 }
1917 ret = sh_eth_dev_init(ndev, false);
1918 if (ret < 0) {
1919 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1920 return ret;
1921 }
1922
1923 if (netif_running(ndev)) {
1924 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1925 /* Setting the Rx mode will start the Rx process. */
1926 sh_eth_write(ndev, EDRRR_R, EDRRR);
1927 netif_wake_queue(ndev);
1928 }
1929
1930 return 0;
1931}
1932
stephen hemminger9b07be42012-01-04 12:59:49 +00001933static const struct ethtool_ops sh_eth_ethtool_ops = {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001934 .get_settings = sh_eth_get_settings,
1935 .set_settings = sh_eth_set_settings,
stephen hemminger9b07be42012-01-04 12:59:49 +00001936 .nway_reset = sh_eth_nway_reset,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001937 .get_msglevel = sh_eth_get_msglevel,
1938 .set_msglevel = sh_eth_set_msglevel,
stephen hemminger9b07be42012-01-04 12:59:49 +00001939 .get_link = ethtool_op_get_link,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001940 .get_strings = sh_eth_get_strings,
1941 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1942 .get_sset_count = sh_eth_get_sset_count,
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001943 .get_ringparam = sh_eth_get_ringparam,
1944 .set_ringparam = sh_eth_set_ringparam,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001945};
1946
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001947/* network device open function */
1948static int sh_eth_open(struct net_device *ndev)
1949{
1950 int ret = 0;
1951 struct sh_eth_private *mdp = netdev_priv(ndev);
1952
Magnus Dammbcd51492009-10-09 00:20:04 +00001953 pm_runtime_get_sync(&mdp->pdev->dev);
1954
Joe Perchesa0607fd2009-11-18 23:29:17 -08001955 ret = request_irq(ndev->irq, sh_eth_interrupt,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +00001956#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001957 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1958 defined(CONFIG_CPU_SUBTYPE_SH7757)
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001959 IRQF_SHARED,
1960#else
1961 0,
1962#endif
1963 ndev->name, ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001964 if (ret) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001965 dev_err(&ndev->dev, "Can not assign IRQ number\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001966 return ret;
1967 }
1968
1969 /* Descriptor set */
1970 ret = sh_eth_ring_init(ndev);
1971 if (ret)
1972 goto out_free_irq;
1973
1974 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001975 ret = sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001976 if (ret)
1977 goto out_free_irq;
1978
1979 /* PHY control start*/
1980 ret = sh_eth_phy_start(ndev);
1981 if (ret)
1982 goto out_free_irq;
1983
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001984 return ret;
1985
1986out_free_irq:
1987 free_irq(ndev->irq, ndev);
Magnus Dammbcd51492009-10-09 00:20:04 +00001988 pm_runtime_put_sync(&mdp->pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001989 return ret;
1990}
1991
1992/* Timeout function */
1993static void sh_eth_tx_timeout(struct net_device *ndev)
1994{
1995 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001996 struct sh_eth_rxdesc *rxdesc;
1997 int i;
1998
1999 netif_stop_queue(ndev);
2000
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002001 if (netif_msg_timer(mdp))
2002 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002003 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002004
2005 /* tx_errors count up */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002006 ndev->stats.tx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002007
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002008 /* Free all the skbuffs in the Rx queue. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002009 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002010 rxdesc = &mdp->rx_ring[i];
2011 rxdesc->status = 0;
2012 rxdesc->addr = 0xBADF00D0;
2013 if (mdp->rx_skbuff[i])
2014 dev_kfree_skb(mdp->rx_skbuff[i]);
2015 mdp->rx_skbuff[i] = NULL;
2016 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002017 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002018 if (mdp->tx_skbuff[i])
2019 dev_kfree_skb(mdp->tx_skbuff[i]);
2020 mdp->tx_skbuff[i] = NULL;
2021 }
2022
2023 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002024 sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002025}
2026
2027/* Packet transmit function */
2028static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2029{
2030 struct sh_eth_private *mdp = netdev_priv(ndev);
2031 struct sh_eth_txdesc *txdesc;
2032 u32 entry;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00002033 unsigned long flags;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002034
2035 spin_lock_irqsave(&mdp->lock, flags);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002036 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002037 if (!sh_eth_txfree(ndev)) {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002038 if (netif_msg_tx_queued(mdp))
2039 dev_warn(&ndev->dev, "TxFD exhausted.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002040 netif_stop_queue(ndev);
2041 spin_unlock_irqrestore(&mdp->lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +00002042 return NETDEV_TX_BUSY;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002043 }
2044 }
2045 spin_unlock_irqrestore(&mdp->lock, flags);
2046
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002047 entry = mdp->cur_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002048 mdp->tx_skbuff[entry] = skb;
2049 txdesc = &mdp->tx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002050 /* soft swap. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002051 if (!mdp->cd->hw_swap)
2052 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2053 skb->len + 2);
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00002054 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2055 DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002056 if (skb->len < ETHERSMALL)
2057 txdesc->buffer_length = ETHERSMALL;
2058 else
2059 txdesc->buffer_length = skb->len;
2060
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002061 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04002062 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002063 else
Yoshinori Sato71557a32008-08-06 19:49:00 -04002064 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002065
2066 mdp->cur_tx++;
2067
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002068 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2069 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09002070
Patrick McHardy6ed10652009-06-23 06:03:08 +00002071 return NETDEV_TX_OK;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002072}
2073
2074/* device close function */
2075static int sh_eth_close(struct net_device *ndev)
2076{
2077 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002078
2079 netif_stop_queue(ndev);
2080
2081 /* Disable interrupts by clearing the interrupt mask. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002082 sh_eth_write(ndev, 0x0000, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002083
2084 /* Stop the chip's Tx and Rx processes. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002085 sh_eth_write(ndev, 0, EDTRR);
2086 sh_eth_write(ndev, 0, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002087
2088 /* PHY Disconnect */
2089 if (mdp->phydev) {
2090 phy_stop(mdp->phydev);
2091 phy_disconnect(mdp->phydev);
2092 }
2093
2094 free_irq(ndev->irq, ndev);
2095
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002096 /* Free all the skbuffs in the Rx queue. */
2097 sh_eth_ring_free(ndev);
2098
2099 /* free DMA buffer */
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00002100 sh_eth_free_dma_buffer(mdp);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002101
Magnus Dammbcd51492009-10-09 00:20:04 +00002102 pm_runtime_put_sync(&mdp->pdev->dev);
2103
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002104 return 0;
2105}
2106
2107static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2108{
2109 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002110
Magnus Dammbcd51492009-10-09 00:20:04 +00002111 pm_runtime_get_sync(&mdp->pdev->dev);
2112
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002113 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002114 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002115 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002116 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002117 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002118 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002119 if (sh_eth_is_gether(mdp)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002120 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002121 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002122 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002123 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2124 } else {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002125 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002126 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2127 }
Magnus Dammbcd51492009-10-09 00:20:04 +00002128 pm_runtime_put_sync(&mdp->pdev->dev);
2129
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002130 return &ndev->stats;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002131}
2132
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002133/* ioctl to device function */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002134static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
2135 int cmd)
2136{
2137 struct sh_eth_private *mdp = netdev_priv(ndev);
2138 struct phy_device *phydev = mdp->phydev;
2139
2140 if (!netif_running(ndev))
2141 return -EINVAL;
2142
2143 if (!phydev)
2144 return -ENODEV;
2145
Richard Cochran28b04112010-07-17 08:48:55 +00002146 return phy_mii_ioctl(phydev, rq, cmd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002147}
2148
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002149#if defined(SH_ETH_HAS_TSU)
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002150/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2151static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2152 int entry)
2153{
2154 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2155}
2156
2157static u32 sh_eth_tsu_get_post_mask(int entry)
2158{
2159 return 0x0f << (28 - ((entry % 8) * 4));
2160}
2161
2162static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2163{
2164 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2165}
2166
2167static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2168 int entry)
2169{
2170 struct sh_eth_private *mdp = netdev_priv(ndev);
2171 u32 tmp;
2172 void *reg_offset;
2173
2174 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2175 tmp = ioread32(reg_offset);
2176 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2177}
2178
2179static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2180 int entry)
2181{
2182 struct sh_eth_private *mdp = netdev_priv(ndev);
2183 u32 post_mask, ref_mask, tmp;
2184 void *reg_offset;
2185
2186 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2187 post_mask = sh_eth_tsu_get_post_mask(entry);
2188 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2189
2190 tmp = ioread32(reg_offset);
2191 iowrite32(tmp & ~post_mask, reg_offset);
2192
2193 /* If other port enables, the function returns "true" */
2194 return tmp & ref_mask;
2195}
2196
2197static int sh_eth_tsu_busy(struct net_device *ndev)
2198{
2199 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2200 struct sh_eth_private *mdp = netdev_priv(ndev);
2201
2202 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2203 udelay(10);
2204 timeout--;
2205 if (timeout <= 0) {
2206 dev_err(&ndev->dev, "%s: timeout\n", __func__);
2207 return -ETIMEDOUT;
2208 }
2209 }
2210
2211 return 0;
2212}
2213
2214static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2215 const u8 *addr)
2216{
2217 u32 val;
2218
2219 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2220 iowrite32(val, reg);
2221 if (sh_eth_tsu_busy(ndev) < 0)
2222 return -EBUSY;
2223
2224 val = addr[4] << 8 | addr[5];
2225 iowrite32(val, reg + 4);
2226 if (sh_eth_tsu_busy(ndev) < 0)
2227 return -EBUSY;
2228
2229 return 0;
2230}
2231
2232static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2233{
2234 u32 val;
2235
2236 val = ioread32(reg);
2237 addr[0] = (val >> 24) & 0xff;
2238 addr[1] = (val >> 16) & 0xff;
2239 addr[2] = (val >> 8) & 0xff;
2240 addr[3] = val & 0xff;
2241 val = ioread32(reg + 4);
2242 addr[4] = (val >> 8) & 0xff;
2243 addr[5] = val & 0xff;
2244}
2245
2246
2247static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2248{
2249 struct sh_eth_private *mdp = netdev_priv(ndev);
2250 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2251 int i;
2252 u8 c_addr[ETH_ALEN];
2253
2254 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2255 sh_eth_tsu_read_entry(reg_offset, c_addr);
2256 if (memcmp(addr, c_addr, ETH_ALEN) == 0)
2257 return i;
2258 }
2259
2260 return -ENOENT;
2261}
2262
2263static int sh_eth_tsu_find_empty(struct net_device *ndev)
2264{
2265 u8 blank[ETH_ALEN];
2266 int entry;
2267
2268 memset(blank, 0, sizeof(blank));
2269 entry = sh_eth_tsu_find_entry(ndev, blank);
2270 return (entry < 0) ? -ENOMEM : entry;
2271}
2272
2273static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2274 int entry)
2275{
2276 struct sh_eth_private *mdp = netdev_priv(ndev);
2277 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2278 int ret;
2279 u8 blank[ETH_ALEN];
2280
2281 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2282 ~(1 << (31 - entry)), TSU_TEN);
2283
2284 memset(blank, 0, sizeof(blank));
2285 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2286 if (ret < 0)
2287 return ret;
2288 return 0;
2289}
2290
2291static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2292{
2293 struct sh_eth_private *mdp = netdev_priv(ndev);
2294 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2295 int i, ret;
2296
2297 if (!mdp->cd->tsu)
2298 return 0;
2299
2300 i = sh_eth_tsu_find_entry(ndev, addr);
2301 if (i < 0) {
2302 /* No entry found, create one */
2303 i = sh_eth_tsu_find_empty(ndev);
2304 if (i < 0)
2305 return -ENOMEM;
2306 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2307 if (ret < 0)
2308 return ret;
2309
2310 /* Enable the entry */
2311 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2312 (1 << (31 - i)), TSU_TEN);
2313 }
2314
2315 /* Entry found or created, enable POST */
2316 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2317
2318 return 0;
2319}
2320
2321static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2322{
2323 struct sh_eth_private *mdp = netdev_priv(ndev);
2324 int i, ret;
2325
2326 if (!mdp->cd->tsu)
2327 return 0;
2328
2329 i = sh_eth_tsu_find_entry(ndev, addr);
2330 if (i) {
2331 /* Entry found */
2332 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2333 goto done;
2334
2335 /* Disable the entry if both ports was disabled */
2336 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2337 if (ret < 0)
2338 return ret;
2339 }
2340done:
2341 return 0;
2342}
2343
2344static int sh_eth_tsu_purge_all(struct net_device *ndev)
2345{
2346 struct sh_eth_private *mdp = netdev_priv(ndev);
2347 int i, ret;
2348
2349 if (unlikely(!mdp->cd->tsu))
2350 return 0;
2351
2352 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2353 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2354 continue;
2355
2356 /* Disable the entry if both ports was disabled */
2357 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2358 if (ret < 0)
2359 return ret;
2360 }
2361
2362 return 0;
2363}
2364
2365static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2366{
2367 struct sh_eth_private *mdp = netdev_priv(ndev);
2368 u8 addr[ETH_ALEN];
2369 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2370 int i;
2371
2372 if (unlikely(!mdp->cd->tsu))
2373 return;
2374
2375 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2376 sh_eth_tsu_read_entry(reg_offset, addr);
2377 if (is_multicast_ether_addr(addr))
2378 sh_eth_tsu_del_entry(ndev, addr);
2379 }
2380}
2381
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002382/* Multicast reception directions set */
2383static void sh_eth_set_multicast_list(struct net_device *ndev)
2384{
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002385 struct sh_eth_private *mdp = netdev_priv(ndev);
2386 u32 ecmr_bits;
2387 int mcast_all = 0;
2388 unsigned long flags;
2389
2390 spin_lock_irqsave(&mdp->lock, flags);
2391 /*
2392 * Initial condition is MCT = 1, PRM = 0.
2393 * Depending on ndev->flags, set PRM or clear MCT
2394 */
2395 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2396
2397 if (!(ndev->flags & IFF_MULTICAST)) {
2398 sh_eth_tsu_purge_mcast(ndev);
2399 mcast_all = 1;
2400 }
2401 if (ndev->flags & IFF_ALLMULTI) {
2402 sh_eth_tsu_purge_mcast(ndev);
2403 ecmr_bits &= ~ECMR_MCT;
2404 mcast_all = 1;
2405 }
2406
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002407 if (ndev->flags & IFF_PROMISC) {
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002408 sh_eth_tsu_purge_all(ndev);
2409 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2410 } else if (mdp->cd->tsu) {
2411 struct netdev_hw_addr *ha;
2412 netdev_for_each_mc_addr(ha, ndev) {
2413 if (mcast_all && is_multicast_ether_addr(ha->addr))
2414 continue;
2415
2416 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2417 if (!mcast_all) {
2418 sh_eth_tsu_purge_mcast(ndev);
2419 ecmr_bits &= ~ECMR_MCT;
2420 mcast_all = 1;
2421 }
2422 }
2423 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002424 } else {
2425 /* Normal, unicast/broadcast-only mode. */
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002426 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002427 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002428
2429 /* update the ethernet mode */
2430 sh_eth_write(ndev, ecmr_bits, ECMR);
2431
2432 spin_unlock_irqrestore(&mdp->lock, flags);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002433}
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002434
2435static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2436{
2437 if (!mdp->port)
2438 return TSU_VTAG0;
2439 else
2440 return TSU_VTAG1;
2441}
2442
2443static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2444{
2445 struct sh_eth_private *mdp = netdev_priv(ndev);
2446 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2447
2448 if (unlikely(!mdp->cd->tsu))
2449 return -EPERM;
2450
2451 /* No filtering if vid = 0 */
2452 if (!vid)
2453 return 0;
2454
2455 mdp->vlan_num_ids++;
2456
2457 /*
2458 * The controller has one VLAN tag HW filter. So, if the filter is
2459 * already enabled, the driver disables it and the filte
2460 */
2461 if (mdp->vlan_num_ids > 1) {
2462 /* disable VLAN filter */
2463 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2464 return 0;
2465 }
2466
2467 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2468 vtag_reg_index);
2469
2470 return 0;
2471}
2472
2473static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2474{
2475 struct sh_eth_private *mdp = netdev_priv(ndev);
2476 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2477
2478 if (unlikely(!mdp->cd->tsu))
2479 return -EPERM;
2480
2481 /* No filtering if vid = 0 */
2482 if (!vid)
2483 return 0;
2484
2485 mdp->vlan_num_ids--;
2486 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2487
2488 return 0;
2489}
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00002490#endif /* SH_ETH_HAS_TSU */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002491
2492/* SuperH's TSU register init function */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002493static void sh_eth_tsu_init(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002494{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002495 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2496 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2497 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2498 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2499 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2500 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2501 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2502 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2503 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2504 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002505 if (sh_eth_is_gether(mdp)) {
2506 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2507 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2508 } else {
2509 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2510 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2511 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002512 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2513 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2514 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2515 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2516 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2517 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2518 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002519}
2520
2521/* MDIO bus release function */
2522static int sh_mdio_release(struct net_device *ndev)
2523{
2524 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2525
2526 /* unregister mdio bus */
2527 mdiobus_unregister(bus);
2528
2529 /* remove mdio bus info from net_device */
2530 dev_set_drvdata(&ndev->dev, NULL);
2531
2532 /* free bitbang info */
2533 free_mdio_bitbang(bus);
2534
2535 return 0;
2536}
2537
2538/* MDIO bus init function */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002539static int sh_mdio_init(struct net_device *ndev, int id,
2540 struct sh_eth_plat_data *pd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002541{
2542 int ret, i;
2543 struct bb_info *bitbang;
2544 struct sh_eth_private *mdp = netdev_priv(ndev);
2545
2546 /* create bit control struct for PHY */
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002547 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2548 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002549 if (!bitbang) {
2550 ret = -ENOMEM;
2551 goto out;
2552 }
2553
2554 /* bitbang init */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002555 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002556 bitbang->set_gate = pd->set_mdio_gate;
Sergei Shtylyovdfed5e72013-03-21 10:37:54 +00002557 bitbang->mdi_msk = PIR_MDI;
2558 bitbang->mdo_msk = PIR_MDO;
2559 bitbang->mmd_msk = PIR_MMD;
2560 bitbang->mdc_msk = PIR_MDC;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002561 bitbang->ctrl.ops = &bb_ops;
2562
Stefan Weilc2e07b32010-08-03 19:44:52 +02002563 /* MII controller setting */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002564 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2565 if (!mdp->mii_bus) {
2566 ret = -ENOMEM;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002567 goto out;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002568 }
2569
2570 /* Hook up MII support for ethtool */
2571 mdp->mii_bus->name = "sh_mii";
Lennert Buytenhek18ee49d2008-10-01 15:41:33 +00002572 mdp->mii_bus->parent = &ndev->dev;
Florian Fainelli5278fb52012-01-09 23:59:17 +00002573 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Nobuhiro Iwamatsu34aa6f12012-01-16 16:50:16 +00002574 mdp->pdev->name, id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002575
2576 /* PHY IRQ */
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002577 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2578 sizeof(int) * PHY_MAX_ADDR,
2579 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002580 if (!mdp->mii_bus->irq) {
2581 ret = -ENOMEM;
2582 goto out_free_bus;
2583 }
2584
2585 for (i = 0; i < PHY_MAX_ADDR; i++)
2586 mdp->mii_bus->irq[i] = PHY_POLL;
2587
YOSHIFUJI Hideaki / 吉藤英明8f6352f2012-11-02 04:45:07 +00002588 /* register mdio bus */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002589 ret = mdiobus_register(mdp->mii_bus);
2590 if (ret)
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002591 goto out_free_bus;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002592
2593 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2594
2595 return 0;
2596
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002597out_free_bus:
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07002598 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002599
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002600out:
2601 return ret;
2602}
2603
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002604static const u16 *sh_eth_get_register_offset(int register_type)
2605{
2606 const u16 *reg_offset = NULL;
2607
2608 switch (register_type) {
2609 case SH_ETH_REG_GIGABIT:
2610 reg_offset = sh_eth_offset_gigabit;
2611 break;
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00002612 case SH_ETH_REG_FAST_RCAR:
2613 reg_offset = sh_eth_offset_fast_rcar;
2614 break;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002615 case SH_ETH_REG_FAST_SH4:
2616 reg_offset = sh_eth_offset_fast_sh4;
2617 break;
2618 case SH_ETH_REG_FAST_SH3_SH2:
2619 reg_offset = sh_eth_offset_fast_sh3_sh2;
2620 break;
2621 default:
Nobuhiro Iwamatsu14c33262013-03-20 22:46:55 +00002622 pr_err("Unknown register type (%d)\n", register_type);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002623 break;
2624 }
2625
2626 return reg_offset;
2627}
2628
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002629static const struct net_device_ops sh_eth_netdev_ops = {
2630 .ndo_open = sh_eth_open,
2631 .ndo_stop = sh_eth_close,
2632 .ndo_start_xmit = sh_eth_start_xmit,
2633 .ndo_get_stats = sh_eth_get_stats,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002634#if defined(SH_ETH_HAS_TSU)
Jiri Pirkoafc4b132011-08-16 06:29:01 +00002635 .ndo_set_rx_mode = sh_eth_set_multicast_list,
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002636 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2637 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002638#endif
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002639 .ndo_tx_timeout = sh_eth_tx_timeout,
2640 .ndo_do_ioctl = sh_eth_do_ioctl,
2641 .ndo_validate_addr = eth_validate_addr,
2642 .ndo_set_mac_address = eth_mac_addr,
2643 .ndo_change_mtu = eth_change_mtu,
2644};
2645
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002646static int sh_eth_drv_probe(struct platform_device *pdev)
2647{
Kuninori Morimoto9c386572010-08-19 00:39:45 -07002648 int ret, devno = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002649 struct resource *res;
2650 struct net_device *ndev = NULL;
Kuninori Morimotoec0d7552011-06-23 16:02:38 +00002651 struct sh_eth_private *mdp = NULL;
Sergei Shtylyov564044b2013-03-21 10:39:22 +00002652 struct sh_eth_plat_data *pd = pdev->dev.platform_data;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002653
2654 /* get base addr */
2655 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2656 if (unlikely(res == NULL)) {
2657 dev_err(&pdev->dev, "invalid resource\n");
2658 ret = -EINVAL;
2659 goto out;
2660 }
2661
2662 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2663 if (!ndev) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002664 ret = -ENOMEM;
2665 goto out;
2666 }
2667
2668 /* The sh Ether-specific entries in the device structure. */
2669 ndev->base_addr = res->start;
2670 devno = pdev->id;
2671 if (devno < 0)
2672 devno = 0;
2673
2674 ndev->dma = -1;
roel kluincc3c0802008-09-10 19:22:44 +02002675 ret = platform_get_irq(pdev, 0);
2676 if (ret < 0) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002677 ret = -ENODEV;
2678 goto out_release;
2679 }
roel kluincc3c0802008-09-10 19:22:44 +02002680 ndev->irq = ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002681
2682 SET_NETDEV_DEV(ndev, &pdev->dev);
2683
2684 /* Fill in the fields of the device structure with ethernet values. */
2685 ether_setup(ndev);
2686
2687 mdp = netdev_priv(ndev);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002688 mdp->num_tx_ring = TX_RING_SIZE;
2689 mdp->num_rx_ring = RX_RING_SIZE;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002690 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2691 if (IS_ERR(mdp->addr)) {
2692 ret = PTR_ERR(mdp->addr);
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002693 goto out_release;
2694 }
2695
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002696 spin_lock_init(&mdp->lock);
Magnus Dammbcd51492009-10-09 00:20:04 +00002697 mdp->pdev = pdev;
2698 pm_runtime_enable(&pdev->dev);
2699 pm_runtime_resume(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002700
2701 /* get PHY ID */
Yoshinori Sato71557a32008-08-06 19:49:00 -04002702 mdp->phy_id = pd->phy;
Yoshihiro Shimodae47c9052011-03-07 21:59:45 +00002703 mdp->phy_interface = pd->phy_interface;
Yoshinori Sato71557a32008-08-06 19:49:00 -04002704 /* EDMAC endian */
2705 mdp->edmac_endian = pd->edmac_endian;
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00002706 mdp->no_ether_link = pd->no_ether_link;
2707 mdp->ether_link_active_low = pd->ether_link_active_low;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002708 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002709
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002710 /* set cpu data */
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +00002711#if defined(SH_ETH_HAS_BOTH_MODULES)
2712 mdp->cd = sh_eth_get_cpu_data(mdp);
2713#else
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002714 mdp->cd = &sh_eth_my_cpu_data;
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +00002715#endif
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002716 sh_eth_set_default_cpu_data(mdp->cd);
2717
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002718 /* set function */
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002719 ndev->netdev_ops = &sh_eth_netdev_ops;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002720 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002721 ndev->watchdog_timeo = TX_TIMEOUT;
2722
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002723 /* debug message level */
2724 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002725
2726 /* read and set MAC address */
Magnus Damm748031f2009-10-09 00:17:14 +00002727 read_mac_address(ndev, pd->mac_addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002728
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002729 /* ioremap the TSU registers */
2730 if (mdp->cd->tsu) {
2731 struct resource *rtsu;
2732 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2733 if (!rtsu) {
2734 dev_err(&pdev->dev, "Not found TSU resource\n");
Peter Senna Tschudin043c4782012-10-05 12:40:52 +00002735 ret = -ENODEV;
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002736 goto out_release;
2737 }
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002738 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2739 if (IS_ERR(mdp->tsu_addr)) {
2740 ret = PTR_ERR(mdp->tsu_addr);
Sergei Shtylyovfc0c0902013-03-19 13:41:32 +00002741 goto out_release;
2742 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002743 mdp->port = devno % 2;
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002744 ndev->features = NETIF_F_HW_VLAN_FILTER;
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002745 }
2746
Yoshihiro Shimoda150647f2012-02-15 17:54:56 +00002747 /* initialize first or needed device */
2748 if (!devno || pd->needs_init) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002749 if (mdp->cd->chip_reset)
2750 mdp->cd->chip_reset(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002751
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00002752 if (mdp->cd->tsu) {
2753 /* TSU init (Init only)*/
2754 sh_eth_tsu_init(mdp);
2755 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002756 }
2757
2758 /* network device register */
2759 ret = register_netdev(ndev);
2760 if (ret)
2761 goto out_release;
2762
2763 /* mdio bus init */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002764 ret = sh_mdio_init(ndev, pdev->id, pd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002765 if (ret)
2766 goto out_unregister;
2767
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002768 /* print device information */
H Hartley Sweeten6cd9b492009-12-29 20:10:35 -08002769 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2770 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002771
2772 platform_set_drvdata(pdev, ndev);
2773
2774 return ret;
2775
2776out_unregister:
2777 unregister_netdev(ndev);
2778
2779out_release:
2780 /* net_dev free */
2781 if (ndev)
2782 free_netdev(ndev);
2783
2784out:
2785 return ret;
2786}
2787
2788static int sh_eth_drv_remove(struct platform_device *pdev)
2789{
2790 struct net_device *ndev = platform_get_drvdata(pdev);
2791
2792 sh_mdio_release(ndev);
2793 unregister_netdev(ndev);
Magnus Dammbcd51492009-10-09 00:20:04 +00002794 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002795 free_netdev(ndev);
2796 platform_set_drvdata(pdev, NULL);
2797
2798 return 0;
2799}
2800
Magnus Dammbcd51492009-10-09 00:20:04 +00002801static int sh_eth_runtime_nop(struct device *dev)
2802{
2803 /*
2804 * Runtime PM callback shared between ->runtime_suspend()
2805 * and ->runtime_resume(). Simply returns success.
2806 *
2807 * This driver re-initializes all registers after
2808 * pm_runtime_get_sync() anyway so there is no need
2809 * to save and restore registers here.
2810 */
2811 return 0;
2812}
2813
2814static struct dev_pm_ops sh_eth_dev_pm_ops = {
2815 .runtime_suspend = sh_eth_runtime_nop,
2816 .runtime_resume = sh_eth_runtime_nop,
2817};
2818
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002819static struct platform_driver sh_eth_driver = {
2820 .probe = sh_eth_drv_probe,
2821 .remove = sh_eth_drv_remove,
2822 .driver = {
2823 .name = CARDNAME,
Magnus Dammbcd51492009-10-09 00:20:04 +00002824 .pm = &sh_eth_dev_pm_ops,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002825 },
2826};
2827
Axel Lindb62f682011-11-27 16:44:17 +00002828module_platform_driver(sh_eth_driver);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002829
2830MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2831MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2832MODULE_LICENSE("GPL v2");