blob: 3da168a859ca45ebc782c98a6cb244f98a894247 [file] [log] [blame]
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001/*
2 * SuperH Ethernet device driver
3 *
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09004 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00005 * Copyright (C) 2008-2009 Renesas Solutions Corp.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07006 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 */
22
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070023#include <linux/init.h>
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/mdio-bitbang.h>
29#include <linux/netdevice.h>
30#include <linux/phy.h>
31#include <linux/cache.h>
32#include <linux/io.h>
Magnus Dammbcd51492009-10-09 00:20:04 +000033#include <linux/pm_runtime.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000035#include <linux/ethtool.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070036
37#include "sh_eth.h"
38
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000039#define SH_ETH_DEF_MSG_ENABLE \
40 (NETIF_MSG_LINK | \
41 NETIF_MSG_TIMER | \
42 NETIF_MSG_RX_ERR| \
43 NETIF_MSG_TX_ERR)
44
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +000045/* There is CPU dependent code */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000046#if defined(CONFIG_CPU_SUBTYPE_SH7724)
47#define SH_ETH_RESET_DEFAULT 1
48static void sh_eth_set_duplex(struct net_device *ndev)
49{
50 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000051
52 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +000053 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000054 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +000055 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000056}
57
58static void sh_eth_set_rate(struct net_device *ndev)
59{
60 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000061
62 switch (mdp->speed) {
63 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +000064 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000065 break;
66 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +000067 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000068 break;
69 default:
70 break;
71 }
72}
73
74/* SH7724 */
75static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
76 .set_duplex = sh_eth_set_duplex,
77 .set_rate = sh_eth_set_rate,
78
79 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
80 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
81 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
82
83 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
84 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
85 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
86 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
87
88 .apr = 1,
89 .mpr = 1,
90 .tpauser = 1,
91 .hw_swap = 1,
Magnus Damm503914c2009-12-15 21:16:55 -080092 .rpadir = 1,
93 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000094};
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +000095#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +000096#define SH_ETH_HAS_BOTH_MODULES 1
97#define SH_ETH_HAS_TSU 1
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +000098static void sh_eth_set_duplex(struct net_device *ndev)
99{
100 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000101
102 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000103 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000104 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000105 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000106}
107
108static void sh_eth_set_rate(struct net_device *ndev)
109{
110 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000111
112 switch (mdp->speed) {
113 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000114 sh_eth_write(ndev, 0, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000115 break;
116 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000117 sh_eth_write(ndev, 1, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000118 break;
119 default:
120 break;
121 }
122}
123
124/* SH7757 */
125static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
126 .set_duplex = sh_eth_set_duplex,
127 .set_rate = sh_eth_set_rate,
128
129 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
130 .rmcr_value = 0x00000001,
131
132 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
133 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
134 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
135 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
136
137 .apr = 1,
138 .mpr = 1,
139 .tpauser = 1,
140 .hw_swap = 1,
141 .no_ade = 1,
142};
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000143
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000144#define SH_GIGA_ETH_BASE 0xfee00000
145#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
146#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
147static void sh_eth_chip_reset_giga(struct net_device *ndev)
148{
149 int i;
150 unsigned long mahr[2], malr[2];
151
152 /* save MAHR and MALR */
153 for (i = 0; i < 2; i++) {
154 malr[i] = readl(GIGA_MALR(i));
155 mahr[i] = readl(GIGA_MAHR(i));
156 }
157
158 /* reset device */
159 writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
160 mdelay(1);
161
162 /* restore MAHR and MALR */
163 for (i = 0; i < 2; i++) {
164 writel(malr[i], GIGA_MALR(i));
165 writel(mahr[i], GIGA_MAHR(i));
166 }
167}
168
169static int sh_eth_is_gether(struct sh_eth_private *mdp);
170static void sh_eth_reset(struct net_device *ndev)
171{
172 struct sh_eth_private *mdp = netdev_priv(ndev);
173 int cnt = 100;
174
175 if (sh_eth_is_gether(mdp)) {
176 sh_eth_write(ndev, 0x03, EDSR);
177 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
178 EDMR);
179 while (cnt > 0) {
180 if (!(sh_eth_read(ndev, EDMR) & 0x3))
181 break;
182 mdelay(1);
183 cnt--;
184 }
185 if (cnt < 0)
186 printk(KERN_ERR "Device reset fail\n");
187
188 /* Table Init */
189 sh_eth_write(ndev, 0x0, TDLAR);
190 sh_eth_write(ndev, 0x0, TDFAR);
191 sh_eth_write(ndev, 0x0, TDFXR);
192 sh_eth_write(ndev, 0x0, TDFFR);
193 sh_eth_write(ndev, 0x0, RDLAR);
194 sh_eth_write(ndev, 0x0, RDFAR);
195 sh_eth_write(ndev, 0x0, RDFXR);
196 sh_eth_write(ndev, 0x0, RDFFR);
197 } else {
198 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
199 EDMR);
200 mdelay(3);
201 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
202 EDMR);
203 }
204}
205
206static void sh_eth_set_duplex_giga(struct net_device *ndev)
207{
208 struct sh_eth_private *mdp = netdev_priv(ndev);
209
210 if (mdp->duplex) /* Full */
211 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
212 else /* Half */
213 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
214}
215
216static void sh_eth_set_rate_giga(struct net_device *ndev)
217{
218 struct sh_eth_private *mdp = netdev_priv(ndev);
219
220 switch (mdp->speed) {
221 case 10: /* 10BASE */
222 sh_eth_write(ndev, 0x00000000, GECMR);
223 break;
224 case 100:/* 100BASE */
225 sh_eth_write(ndev, 0x00000010, GECMR);
226 break;
227 case 1000: /* 1000BASE */
228 sh_eth_write(ndev, 0x00000020, GECMR);
229 break;
230 default:
231 break;
232 }
233}
234
235/* SH7757(GETHERC) */
236static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
237 .chip_reset = sh_eth_chip_reset_giga,
238 .set_duplex = sh_eth_set_duplex_giga,
239 .set_rate = sh_eth_set_rate_giga,
240
241 .ecsr_value = ECSR_ICD | ECSR_MPD,
242 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
243 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
244
245 .tx_check = EESR_TC1 | EESR_FTC,
246 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
247 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
248 EESR_ECI,
249 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
250 EESR_TFE,
251 .fdr_value = 0x0000072f,
252 .rmcr_value = 0x00000001,
253
254 .apr = 1,
255 .mpr = 1,
256 .tpauser = 1,
257 .bculr = 1,
258 .hw_swap = 1,
259 .rpadir = 1,
260 .rpadir_value = 2 << 16,
261 .no_trimd = 1,
262 .no_ade = 1,
263};
264
265static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
266{
267 if (sh_eth_is_gether(mdp))
268 return &sh_eth_my_cpu_data_giga;
269 else
270 return &sh_eth_my_cpu_data;
271}
272
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000273#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000274#define SH_ETH_HAS_TSU 1
275static void sh_eth_chip_reset(struct net_device *ndev)
276{
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000277 struct sh_eth_private *mdp = netdev_priv(ndev);
278
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000279 /* reset device */
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000280 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000281 mdelay(1);
282}
283
284static void sh_eth_reset(struct net_device *ndev)
285{
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000286 int cnt = 100;
287
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000288 sh_eth_write(ndev, EDSR_ENALL, EDSR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000289 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000290 while (cnt > 0) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000291 if (!(sh_eth_read(ndev, EDMR) & 0x3))
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000292 break;
293 mdelay(1);
294 cnt--;
295 }
roel kluin890c8c12009-12-30 01:43:45 +0000296 if (cnt == 0)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000297 printk(KERN_ERR "Device reset fail\n");
298
299 /* Table Init */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000300 sh_eth_write(ndev, 0x0, TDLAR);
301 sh_eth_write(ndev, 0x0, TDFAR);
302 sh_eth_write(ndev, 0x0, TDFXR);
303 sh_eth_write(ndev, 0x0, TDFFR);
304 sh_eth_write(ndev, 0x0, RDLAR);
305 sh_eth_write(ndev, 0x0, RDFAR);
306 sh_eth_write(ndev, 0x0, RDFXR);
307 sh_eth_write(ndev, 0x0, RDFFR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000308}
309
310static void sh_eth_set_duplex(struct net_device *ndev)
311{
312 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000313
314 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000315 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000316 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000317 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000318}
319
320static void sh_eth_set_rate(struct net_device *ndev)
321{
322 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000323
324 switch (mdp->speed) {
325 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000326 sh_eth_write(ndev, GECMR_10, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000327 break;
328 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000329 sh_eth_write(ndev, GECMR_100, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000330 break;
331 case 1000: /* 1000BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000332 sh_eth_write(ndev, GECMR_1000, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000333 break;
334 default:
335 break;
336 }
337}
338
339/* sh7763 */
340static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
341 .chip_reset = sh_eth_chip_reset,
342 .set_duplex = sh_eth_set_duplex,
343 .set_rate = sh_eth_set_rate,
344
345 .ecsr_value = ECSR_ICD | ECSR_MPD,
346 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
347 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
348
349 .tx_check = EESR_TC1 | EESR_FTC,
350 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
351 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
352 EESR_ECI,
353 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
354 EESR_TFE,
355
356 .apr = 1,
357 .mpr = 1,
358 .tpauser = 1,
359 .bculr = 1,
360 .hw_swap = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000361 .no_trimd = 1,
362 .no_ade = 1,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000363 .tsu = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000364};
365
366#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
367#define SH_ETH_RESET_DEFAULT 1
368static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
369 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
370
371 .apr = 1,
372 .mpr = 1,
373 .tpauser = 1,
374 .hw_swap = 1,
375};
376#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
377#define SH_ETH_RESET_DEFAULT 1
378#define SH_ETH_HAS_TSU 1
379static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
380 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000381 .tsu = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000382};
383#endif
384
385static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
386{
387 if (!cd->ecsr_value)
388 cd->ecsr_value = DEFAULT_ECSR_INIT;
389
390 if (!cd->ecsipr_value)
391 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
392
393 if (!cd->fcftr_value)
394 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
395 DEFAULT_FIFO_F_D_RFD;
396
397 if (!cd->fdr_value)
398 cd->fdr_value = DEFAULT_FDR_INIT;
399
400 if (!cd->rmcr_value)
401 cd->rmcr_value = DEFAULT_RMCR_VALUE;
402
403 if (!cd->tx_check)
404 cd->tx_check = DEFAULT_TX_CHECK;
405
406 if (!cd->eesr_err_check)
407 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
408
409 if (!cd->tx_error_check)
410 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
411}
412
413#if defined(SH_ETH_RESET_DEFAULT)
414/* Chip Reset */
415static void sh_eth_reset(struct net_device *ndev)
416{
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000417 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000418 mdelay(3);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000419 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000420}
421#endif
422
423#if defined(CONFIG_CPU_SH4)
424static void sh_eth_set_receive_align(struct sk_buff *skb)
425{
426 int reserve;
427
428 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
429 if (reserve)
430 skb_reserve(skb, reserve);
431}
432#else
433static void sh_eth_set_receive_align(struct sk_buff *skb)
434{
435 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
436}
437#endif
438
439
Yoshinori Sato71557a32008-08-06 19:49:00 -0400440/* CPU <-> EDMAC endian convert */
441static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
442{
443 switch (mdp->edmac_endian) {
444 case EDMAC_LITTLE_ENDIAN:
445 return cpu_to_le32(x);
446 case EDMAC_BIG_ENDIAN:
447 return cpu_to_be32(x);
448 }
449 return x;
450}
451
452static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
453{
454 switch (mdp->edmac_endian) {
455 case EDMAC_LITTLE_ENDIAN:
456 return le32_to_cpu(x);
457 case EDMAC_BIG_ENDIAN:
458 return be32_to_cpu(x);
459 }
460 return x;
461}
462
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700463/*
464 * Program the hardware MAC address from dev->dev_addr.
465 */
466static void update_mac_address(struct net_device *ndev)
467{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000468 sh_eth_write(ndev,
469 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
470 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
471 sh_eth_write(ndev,
472 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700473}
474
475/*
476 * Get MAC address from SuperH MAC address register
477 *
478 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
479 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
480 * When you want use this device, you must set MAC address in bootloader.
481 *
482 */
Magnus Damm748031f2009-10-09 00:17:14 +0000483static void read_mac_address(struct net_device *ndev, unsigned char *mac)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700484{
Magnus Damm748031f2009-10-09 00:17:14 +0000485 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
486 memcpy(ndev->dev_addr, mac, 6);
487 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000488 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
489 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
490 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
491 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
492 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
493 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
Magnus Damm748031f2009-10-09 00:17:14 +0000494 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700495}
496
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000497static int sh_eth_is_gether(struct sh_eth_private *mdp)
498{
499 if (mdp->reg_offset == sh_eth_offset_gigabit)
500 return 1;
501 else
502 return 0;
503}
504
505static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
506{
507 if (sh_eth_is_gether(mdp))
508 return EDTRR_TRNS_GETHER;
509 else
510 return EDTRR_TRNS_ETHER;
511}
512
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700513struct bb_info {
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000514 void (*set_gate)(unsigned long addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700515 struct mdiobb_ctrl ctrl;
516 u32 addr;
517 u32 mmd_msk;/* MMD */
518 u32 mdo_msk;
519 u32 mdi_msk;
520 u32 mdc_msk;
521};
522
523/* PHY bit set */
524static void bb_set(u32 addr, u32 msk)
525{
Paul Mundt900fcf02010-11-01 09:29:24 +0000526 writel(readl(addr) | msk, addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700527}
528
529/* PHY bit clear */
530static void bb_clr(u32 addr, u32 msk)
531{
Paul Mundt900fcf02010-11-01 09:29:24 +0000532 writel((readl(addr) & ~msk), addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700533}
534
535/* PHY bit read */
536static int bb_read(u32 addr, u32 msk)
537{
Paul Mundt900fcf02010-11-01 09:29:24 +0000538 return (readl(addr) & msk) != 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700539}
540
541/* Data I/O pin control */
542static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
543{
544 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000545
546 if (bitbang->set_gate)
547 bitbang->set_gate(bitbang->addr);
548
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700549 if (bit)
550 bb_set(bitbang->addr, bitbang->mmd_msk);
551 else
552 bb_clr(bitbang->addr, bitbang->mmd_msk);
553}
554
555/* Set bit data*/
556static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
557{
558 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
559
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000560 if (bitbang->set_gate)
561 bitbang->set_gate(bitbang->addr);
562
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700563 if (bit)
564 bb_set(bitbang->addr, bitbang->mdo_msk);
565 else
566 bb_clr(bitbang->addr, bitbang->mdo_msk);
567}
568
569/* Get bit data*/
570static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
571{
572 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000573
574 if (bitbang->set_gate)
575 bitbang->set_gate(bitbang->addr);
576
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700577 return bb_read(bitbang->addr, bitbang->mdi_msk);
578}
579
580/* MDC pin control */
581static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
582{
583 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
584
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +0000585 if (bitbang->set_gate)
586 bitbang->set_gate(bitbang->addr);
587
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700588 if (bit)
589 bb_set(bitbang->addr, bitbang->mdc_msk);
590 else
591 bb_clr(bitbang->addr, bitbang->mdc_msk);
592}
593
594/* mdio bus control struct */
595static struct mdiobb_ops bb_ops = {
596 .owner = THIS_MODULE,
597 .set_mdc = sh_mdc_ctrl,
598 .set_mdio_dir = sh_mmd_ctrl,
599 .set_mdio_data = sh_set_mdio,
600 .get_mdio_data = sh_get_mdio,
601};
602
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700603/* free skb and descriptor buffer */
604static void sh_eth_ring_free(struct net_device *ndev)
605{
606 struct sh_eth_private *mdp = netdev_priv(ndev);
607 int i;
608
609 /* Free Rx skb ringbuffer */
610 if (mdp->rx_skbuff) {
611 for (i = 0; i < RX_RING_SIZE; i++) {
612 if (mdp->rx_skbuff[i])
613 dev_kfree_skb(mdp->rx_skbuff[i]);
614 }
615 }
616 kfree(mdp->rx_skbuff);
617
618 /* Free Tx skb ringbuffer */
619 if (mdp->tx_skbuff) {
620 for (i = 0; i < TX_RING_SIZE; i++) {
621 if (mdp->tx_skbuff[i])
622 dev_kfree_skb(mdp->tx_skbuff[i]);
623 }
624 }
625 kfree(mdp->tx_skbuff);
626}
627
628/* format skb and descriptor buffer */
629static void sh_eth_ring_format(struct net_device *ndev)
630{
631 struct sh_eth_private *mdp = netdev_priv(ndev);
632 int i;
633 struct sk_buff *skb;
634 struct sh_eth_rxdesc *rxdesc = NULL;
635 struct sh_eth_txdesc *txdesc = NULL;
636 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
637 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
638
639 mdp->cur_rx = mdp->cur_tx = 0;
640 mdp->dirty_rx = mdp->dirty_tx = 0;
641
642 memset(mdp->rx_ring, 0, rx_ringsize);
643
644 /* build Rx ring buffer */
645 for (i = 0; i < RX_RING_SIZE; i++) {
646 /* skb */
647 mdp->rx_skbuff[i] = NULL;
648 skb = dev_alloc_skb(mdp->rx_buf_sz);
649 mdp->rx_skbuff[i] = skb;
650 if (skb == NULL)
651 break;
Yoshihiro Shimodae88aae72009-05-24 23:52:35 +0000652 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
653 DMA_FROM_DEVICE);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900654 skb->dev = ndev; /* Mark as being used by this device. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000655 sh_eth_set_receive_align(skb);
656
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700657 /* RX descriptor */
658 rxdesc = &mdp->rx_ring[i];
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000659 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Yoshinori Sato71557a32008-08-06 19:49:00 -0400660 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700661
662 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000663 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900664 /* Rx descriptor address set */
665 if (i == 0) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000666 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000667 if (sh_eth_is_gether(mdp))
668 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900669 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700670 }
671
672 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
673
674 /* Mark the last entry as wrapping the ring. */
Yoshinori Sato71557a32008-08-06 19:49:00 -0400675 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700676
677 memset(mdp->tx_ring, 0, tx_ringsize);
678
679 /* build Tx ring buffer */
680 for (i = 0; i < TX_RING_SIZE; i++) {
681 mdp->tx_skbuff[i] = NULL;
682 txdesc = &mdp->tx_ring[i];
Yoshinori Sato71557a32008-08-06 19:49:00 -0400683 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700684 txdesc->buffer_length = 0;
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900685 if (i == 0) {
Yoshinori Sato71557a32008-08-06 19:49:00 -0400686 /* Tx descriptor address set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000687 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000688 if (sh_eth_is_gether(mdp))
689 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900690 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700691 }
692
Yoshinori Sato71557a32008-08-06 19:49:00 -0400693 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700694}
695
696/* Get skb and descriptor buffer */
697static int sh_eth_ring_init(struct net_device *ndev)
698{
699 struct sh_eth_private *mdp = netdev_priv(ndev);
700 int rx_ringsize, tx_ringsize, ret = 0;
701
702 /*
703 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
704 * card needs room to do 8 byte alignment, +2 so we can reserve
705 * the first 2 bytes, and +16 gets room for the status word from the
706 * card.
707 */
708 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
709 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
Magnus Damm503914c2009-12-15 21:16:55 -0800710 if (mdp->cd->rpadir)
711 mdp->rx_buf_sz += NET_IP_ALIGN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700712
713 /* Allocate RX and TX skb rings */
714 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
715 GFP_KERNEL);
716 if (!mdp->rx_skbuff) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000717 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700718 ret = -ENOMEM;
719 return ret;
720 }
721
722 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
723 GFP_KERNEL);
724 if (!mdp->tx_skbuff) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000725 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700726 ret = -ENOMEM;
727 goto skb_ring_free;
728 }
729
730 /* Allocate all Rx descriptors. */
731 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
732 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
733 GFP_KERNEL);
734
735 if (!mdp->rx_ring) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000736 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
737 rx_ringsize);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700738 ret = -ENOMEM;
739 goto desc_ring_free;
740 }
741
742 mdp->dirty_rx = 0;
743
744 /* Allocate all Tx descriptors. */
745 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
746 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
747 GFP_KERNEL);
748 if (!mdp->tx_ring) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000749 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
750 tx_ringsize);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700751 ret = -ENOMEM;
752 goto desc_ring_free;
753 }
754 return ret;
755
756desc_ring_free:
757 /* free DMA buffer */
758 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
759
760skb_ring_free:
761 /* Free Rx and Tx skb ring buffer */
762 sh_eth_ring_free(ndev);
763
764 return ret;
765}
766
767static int sh_eth_dev_init(struct net_device *ndev)
768{
769 int ret = 0;
770 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700771 u_int32_t rx_int_var, tx_int_var;
772 u32 val;
773
774 /* Soft Reset */
775 sh_eth_reset(ndev);
776
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900777 /* Descriptor format */
778 sh_eth_ring_format(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000779 if (mdp->cd->rpadir)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000780 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700781
782 /* all sh_eth int mask */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000783 sh_eth_write(ndev, 0, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700784
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000785#if defined(__LITTLE_ENDIAN__)
786 if (mdp->cd->hw_swap)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000787 sh_eth_write(ndev, EDMR_EL, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000788 else
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900789#endif
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000790 sh_eth_write(ndev, 0, EDMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700791
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900792 /* FIFO size set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000793 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
794 sh_eth_write(ndev, 0, TFTR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700795
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900796 /* Frame recv control */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000797 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700798
799 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
800 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000801 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700802
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000803 if (mdp->cd->bculr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000804 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900805
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000806 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900807
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000808 if (!mdp->cd->no_trimd)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000809 sh_eth_write(ndev, 0, TRIMD);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700810
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900811 /* Recv frame limit set register */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000812 sh_eth_write(ndev, RFLR_VALUE, RFLR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700813
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000814 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
815 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700816
817 /* PAUSE Prohibition */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000818 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700819 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
820
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000821 sh_eth_write(ndev, val, ECMR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900822
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000823 if (mdp->cd->set_rate)
824 mdp->cd->set_rate(ndev);
825
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900826 /* E-MAC Status Register clear */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000827 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900828
829 /* E-MAC Interrupt Enable register */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000830 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700831
832 /* Set MAC address */
833 update_mac_address(ndev);
834
835 /* mask reset */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000836 if (mdp->cd->apr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000837 sh_eth_write(ndev, APR_AP, APR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000838 if (mdp->cd->mpr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000839 sh_eth_write(ndev, MPR_MP, MPR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000840 if (mdp->cd->tpauser)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000841 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900842
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700843 /* Setting the Rx mode will start the Rx process. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000844 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700845
846 netif_start_queue(ndev);
847
848 return ret;
849}
850
851/* free Tx skb function */
852static int sh_eth_txfree(struct net_device *ndev)
853{
854 struct sh_eth_private *mdp = netdev_priv(ndev);
855 struct sh_eth_txdesc *txdesc;
856 int freeNum = 0;
857 int entry = 0;
858
859 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
860 entry = mdp->dirty_tx % TX_RING_SIZE;
861 txdesc = &mdp->tx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -0400862 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700863 break;
864 /* Free the original skb. */
865 if (mdp->tx_skbuff[entry]) {
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +0000866 dma_unmap_single(&ndev->dev, txdesc->addr,
867 txdesc->buffer_length, DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700868 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
869 mdp->tx_skbuff[entry] = NULL;
870 freeNum++;
871 }
Yoshinori Sato71557a32008-08-06 19:49:00 -0400872 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700873 if (entry >= TX_RING_SIZE - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -0400874 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700875
876 mdp->stats.tx_packets++;
877 mdp->stats.tx_bytes += txdesc->buffer_length;
878 }
879 return freeNum;
880}
881
882/* Packet receive function */
883static int sh_eth_rx(struct net_device *ndev)
884{
885 struct sh_eth_private *mdp = netdev_priv(ndev);
886 struct sh_eth_rxdesc *rxdesc;
887
888 int entry = mdp->cur_rx % RX_RING_SIZE;
889 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
890 struct sk_buff *skb;
891 u16 pkt_len = 0;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000892 u32 desc_status;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700893
894 rxdesc = &mdp->rx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -0400895 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
896 desc_status = edmac_to_cpu(mdp, rxdesc->status);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700897 pkt_len = rxdesc->frame_length;
898
899 if (--boguscnt < 0)
900 break;
901
902 if (!(desc_status & RDFEND))
903 mdp->stats.rx_length_errors++;
904
905 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
906 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
907 mdp->stats.rx_errors++;
908 if (desc_status & RD_RFS1)
909 mdp->stats.rx_crc_errors++;
910 if (desc_status & RD_RFS2)
911 mdp->stats.rx_frame_errors++;
912 if (desc_status & RD_RFS3)
913 mdp->stats.rx_length_errors++;
914 if (desc_status & RD_RFS4)
915 mdp->stats.rx_length_errors++;
916 if (desc_status & RD_RFS6)
917 mdp->stats.rx_missed_errors++;
918 if (desc_status & RD_RFS10)
919 mdp->stats.rx_over_errors++;
920 } else {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000921 if (!mdp->cd->hw_swap)
922 sh_eth_soft_swap(
923 phys_to_virt(ALIGN(rxdesc->addr, 4)),
924 pkt_len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700925 skb = mdp->rx_skbuff[entry];
926 mdp->rx_skbuff[entry] = NULL;
Magnus Damm503914c2009-12-15 21:16:55 -0800927 if (mdp->cd->rpadir)
928 skb_reserve(skb, NET_IP_ALIGN);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700929 skb_put(skb, pkt_len);
930 skb->protocol = eth_type_trans(skb, ndev);
931 netif_rx(skb);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700932 mdp->stats.rx_packets++;
933 mdp->stats.rx_bytes += pkt_len;
934 }
Yoshinori Sato71557a32008-08-06 19:49:00 -0400935 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700936 entry = (++mdp->cur_rx) % RX_RING_SIZE;
Yoshihiro Shimoda862df492009-05-24 23:53:40 +0000937 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700938 }
939
940 /* Refill the Rx ring buffers. */
941 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
942 entry = mdp->dirty_rx % RX_RING_SIZE;
943 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900944 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000945 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900946
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700947 if (mdp->rx_skbuff[entry] == NULL) {
948 skb = dev_alloc_skb(mdp->rx_buf_sz);
949 mdp->rx_skbuff[entry] = skb;
950 if (skb == NULL)
951 break; /* Better luck next round. */
Yoshihiro Shimodae88aae72009-05-24 23:52:35 +0000952 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
953 DMA_FROM_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700954 skb->dev = ndev;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000955 sh_eth_set_receive_align(skb);
956
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700957 skb_checksum_none_assert(skb);
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000958 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700959 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700960 if (entry >= RX_RING_SIZE - 1)
961 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -0400962 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700963 else
964 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -0400965 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700966 }
967
968 /* Restart Rx engine if stopped. */
969 /* If we don't need to check status, don't. -KDU */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000970 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
971 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700972
973 return 0;
974}
975
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000976static void sh_eth_rcv_snd_disable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +0000977{
978 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000979 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
980 ~(ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +0000981}
982
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000983static void sh_eth_rcv_snd_enable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +0000984{
985 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000986 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
987 (ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +0000988}
989
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700990/* error control function */
991static void sh_eth_error(struct net_device *ndev, int intr_status)
992{
993 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700994 u32 felic_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000995 u32 link_stat;
996 u32 mask;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700997
998 if (intr_status & EESR_ECI) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000999 felic_stat = sh_eth_read(ndev, ECSR);
1000 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001001 if (felic_stat & ECSR_ICD)
1002 mdp->stats.tx_carrier_errors++;
1003 if (felic_stat & ECSR_LCHNG) {
1004 /* Link Changed */
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001005 if (mdp->cd->no_psr || mdp->no_ether_link) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001006 if (mdp->link == PHY_DOWN)
1007 link_stat = 0;
1008 else
1009 link_stat = PHY_ST_LINK;
1010 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001011 link_stat = (sh_eth_read(ndev, PSR));
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001012 if (mdp->ether_link_active_low)
1013 link_stat = ~link_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001014 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001015 if (!(link_stat & PHY_ST_LINK))
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001016 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001017 else {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001018 /* Link Up */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001019 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1020 ~DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001021 /*clear int */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001022 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1023 ECSR);
1024 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1025 DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001026 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001027 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001028 }
1029 }
1030 }
1031
1032 if (intr_status & EESR_TWB) {
1033 /* Write buck end. unused write back interrupt */
1034 if (intr_status & EESR_TABT) /* Transmit Abort int */
1035 mdp->stats.tx_aborted_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001036 if (netif_msg_tx_err(mdp))
1037 dev_err(&ndev->dev, "Transmit Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001038 }
1039
1040 if (intr_status & EESR_RABT) {
1041 /* Receive Abort int */
1042 if (intr_status & EESR_RFRMER) {
1043 /* Receive Frame Overflow int */
1044 mdp->stats.rx_frame_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001045 if (netif_msg_rx_err(mdp))
1046 dev_err(&ndev->dev, "Receive Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001047 }
1048 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001049
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001050 if (intr_status & EESR_TDE) {
1051 /* Transmit Descriptor Empty int */
1052 mdp->stats.tx_fifo_errors++;
1053 if (netif_msg_tx_err(mdp))
1054 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1055 }
1056
1057 if (intr_status & EESR_TFE) {
1058 /* FIFO under flow */
1059 mdp->stats.tx_fifo_errors++;
1060 if (netif_msg_tx_err(mdp))
1061 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001062 }
1063
1064 if (intr_status & EESR_RDE) {
1065 /* Receive Descriptor Empty int */
1066 mdp->stats.rx_over_errors++;
1067
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001068 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1069 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001070 if (netif_msg_rx_err(mdp))
1071 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001072 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001073
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001074 if (intr_status & EESR_RFE) {
1075 /* Receive FIFO Overflow int */
1076 mdp->stats.rx_fifo_errors++;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001077 if (netif_msg_rx_err(mdp))
1078 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1079 }
1080
1081 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1082 /* Address Error */
1083 mdp->stats.tx_fifo_errors++;
1084 if (netif_msg_tx_err(mdp))
1085 dev_err(&ndev->dev, "Address Error\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001086 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001087
1088 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1089 if (mdp->cd->no_ade)
1090 mask &= ~EESR_ADE;
1091 if (intr_status & mask) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001092 /* Tx error */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001093 u32 edtrr = sh_eth_read(ndev, EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001094 /* dmesg */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001095 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1096 intr_status, mdp->cur_tx);
1097 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001098 mdp->dirty_tx, (u32) ndev->state, edtrr);
1099 /* dirty buffer free */
1100 sh_eth_txfree(ndev);
1101
1102 /* SH7712 BUG */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001103 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001104 /* tx dma start */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001105 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001106 }
1107 /* wakeup */
1108 netif_wake_queue(ndev);
1109 }
1110}
1111
1112static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1113{
1114 struct net_device *ndev = netdev;
1115 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001116 struct sh_eth_cpu_data *cd = mdp->cd;
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001117 irqreturn_t ret = IRQ_NONE;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001118 u32 intr_status = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001119
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001120 spin_lock(&mdp->lock);
1121
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001122 /* Get interrpt stat */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001123 intr_status = sh_eth_read(ndev, EESR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001124 /* Clear interrupt */
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001125 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1126 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001127 cd->tx_check | cd->eesr_err_check)) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001128 sh_eth_write(ndev, intr_status, EESR);
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001129 ret = IRQ_HANDLED;
1130 } else
1131 goto other_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001132
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001133 if (intr_status & (EESR_FRC | /* Frame recv*/
1134 EESR_RMAF | /* Multi cast address recv*/
1135 EESR_RRF | /* Bit frame recv */
1136 EESR_RTLF | /* Long frame recv*/
1137 EESR_RTSF | /* short frame recv */
1138 EESR_PRE | /* PHY-LSI recv error */
1139 EESR_CERF)){ /* recv frame CRC error */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001140 sh_eth_rx(ndev);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001141 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001142
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001143 /* Tx Check */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001144 if (intr_status & cd->tx_check) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001145 sh_eth_txfree(ndev);
1146 netif_wake_queue(ndev);
1147 }
1148
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001149 if (intr_status & cd->eesr_err_check)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001150 sh_eth_error(ndev, intr_status);
1151
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001152other_irq:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001153 spin_unlock(&mdp->lock);
1154
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001155 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001156}
1157
1158static void sh_eth_timer(unsigned long data)
1159{
1160 struct net_device *ndev = (struct net_device *)data;
1161 struct sh_eth_private *mdp = netdev_priv(ndev);
1162
1163 mod_timer(&mdp->timer, jiffies + (10 * HZ));
1164}
1165
1166/* PHY state control function */
1167static void sh_eth_adjust_link(struct net_device *ndev)
1168{
1169 struct sh_eth_private *mdp = netdev_priv(ndev);
1170 struct phy_device *phydev = mdp->phydev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001171 int new_state = 0;
1172
1173 if (phydev->link != PHY_DOWN) {
1174 if (phydev->duplex != mdp->duplex) {
1175 new_state = 1;
1176 mdp->duplex = phydev->duplex;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001177 if (mdp->cd->set_duplex)
1178 mdp->cd->set_duplex(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001179 }
1180
1181 if (phydev->speed != mdp->speed) {
1182 new_state = 1;
1183 mdp->speed = phydev->speed;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001184 if (mdp->cd->set_rate)
1185 mdp->cd->set_rate(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001186 }
1187 if (mdp->link == PHY_DOWN) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001188 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
1189 | ECMR_DM, ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001190 new_state = 1;
1191 mdp->link = phydev->link;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001192 }
1193 } else if (mdp->link) {
1194 new_state = 1;
1195 mdp->link = PHY_DOWN;
1196 mdp->speed = 0;
1197 mdp->duplex = -1;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001198 }
1199
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001200 if (new_state && netif_msg_link(mdp))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001201 phy_print_status(phydev);
1202}
1203
1204/* PHY init function */
1205static int sh_eth_phy_init(struct net_device *ndev)
1206{
1207 struct sh_eth_private *mdp = netdev_priv(ndev);
David S. Miller0a372eb2009-05-26 21:11:09 -07001208 char phy_id[MII_BUS_ID_SIZE + 3];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001209 struct phy_device *phydev = NULL;
1210
Kay Sieversfb28ad352008-11-10 13:55:14 -08001211 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001212 mdp->mii_bus->id , mdp->phy_id);
1213
1214 mdp->link = PHY_DOWN;
1215 mdp->speed = 0;
1216 mdp->duplex = -1;
1217
1218 /* Try connect to PHY */
Joe Perchesc061b182010-08-23 18:20:03 +00001219 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
Yoshihiro Shimodae47c9052011-03-07 21:59:45 +00001220 0, mdp->phy_interface);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001221 if (IS_ERR(phydev)) {
1222 dev_err(&ndev->dev, "phy_connect failed\n");
1223 return PTR_ERR(phydev);
1224 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001225
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001226 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001227 phydev->addr, phydev->drv->name);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001228
1229 mdp->phydev = phydev;
1230
1231 return 0;
1232}
1233
1234/* PHY control start function */
1235static int sh_eth_phy_start(struct net_device *ndev)
1236{
1237 struct sh_eth_private *mdp = netdev_priv(ndev);
1238 int ret;
1239
1240 ret = sh_eth_phy_init(ndev);
1241 if (ret)
1242 return ret;
1243
1244 /* reset phy - this also wakes it from PDOWN */
1245 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1246 phy_start(mdp->phydev);
1247
1248 return 0;
1249}
1250
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001251static int sh_eth_get_settings(struct net_device *ndev,
1252 struct ethtool_cmd *ecmd)
1253{
1254 struct sh_eth_private *mdp = netdev_priv(ndev);
1255 unsigned long flags;
1256 int ret;
1257
1258 spin_lock_irqsave(&mdp->lock, flags);
1259 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1260 spin_unlock_irqrestore(&mdp->lock, flags);
1261
1262 return ret;
1263}
1264
1265static int sh_eth_set_settings(struct net_device *ndev,
1266 struct ethtool_cmd *ecmd)
1267{
1268 struct sh_eth_private *mdp = netdev_priv(ndev);
1269 unsigned long flags;
1270 int ret;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001271
1272 spin_lock_irqsave(&mdp->lock, flags);
1273
1274 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001275 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001276
1277 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1278 if (ret)
1279 goto error_exit;
1280
1281 if (ecmd->duplex == DUPLEX_FULL)
1282 mdp->duplex = 1;
1283 else
1284 mdp->duplex = 0;
1285
1286 if (mdp->cd->set_duplex)
1287 mdp->cd->set_duplex(ndev);
1288
1289error_exit:
1290 mdelay(1);
1291
1292 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001293 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001294
1295 spin_unlock_irqrestore(&mdp->lock, flags);
1296
1297 return ret;
1298}
1299
1300static int sh_eth_nway_reset(struct net_device *ndev)
1301{
1302 struct sh_eth_private *mdp = netdev_priv(ndev);
1303 unsigned long flags;
1304 int ret;
1305
1306 spin_lock_irqsave(&mdp->lock, flags);
1307 ret = phy_start_aneg(mdp->phydev);
1308 spin_unlock_irqrestore(&mdp->lock, flags);
1309
1310 return ret;
1311}
1312
1313static u32 sh_eth_get_msglevel(struct net_device *ndev)
1314{
1315 struct sh_eth_private *mdp = netdev_priv(ndev);
1316 return mdp->msg_enable;
1317}
1318
1319static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1320{
1321 struct sh_eth_private *mdp = netdev_priv(ndev);
1322 mdp->msg_enable = value;
1323}
1324
1325static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1326 "rx_current", "tx_current",
1327 "rx_dirty", "tx_dirty",
1328};
1329#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1330
1331static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1332{
1333 switch (sset) {
1334 case ETH_SS_STATS:
1335 return SH_ETH_STATS_LEN;
1336 default:
1337 return -EOPNOTSUPP;
1338 }
1339}
1340
1341static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1342 struct ethtool_stats *stats, u64 *data)
1343{
1344 struct sh_eth_private *mdp = netdev_priv(ndev);
1345 int i = 0;
1346
1347 /* device-specific stats */
1348 data[i++] = mdp->cur_rx;
1349 data[i++] = mdp->cur_tx;
1350 data[i++] = mdp->dirty_rx;
1351 data[i++] = mdp->dirty_tx;
1352}
1353
1354static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1355{
1356 switch (stringset) {
1357 case ETH_SS_STATS:
1358 memcpy(data, *sh_eth_gstrings_stats,
1359 sizeof(sh_eth_gstrings_stats));
1360 break;
1361 }
1362}
1363
1364static struct ethtool_ops sh_eth_ethtool_ops = {
1365 .get_settings = sh_eth_get_settings,
1366 .set_settings = sh_eth_set_settings,
1367 .nway_reset = sh_eth_nway_reset,
1368 .get_msglevel = sh_eth_get_msglevel,
1369 .set_msglevel = sh_eth_set_msglevel,
1370 .get_link = ethtool_op_get_link,
1371 .get_strings = sh_eth_get_strings,
1372 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1373 .get_sset_count = sh_eth_get_sset_count,
1374};
1375
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001376/* network device open function */
1377static int sh_eth_open(struct net_device *ndev)
1378{
1379 int ret = 0;
1380 struct sh_eth_private *mdp = netdev_priv(ndev);
1381
Magnus Dammbcd51492009-10-09 00:20:04 +00001382 pm_runtime_get_sync(&mdp->pdev->dev);
1383
Joe Perchesa0607fd2009-11-18 23:29:17 -08001384 ret = request_irq(ndev->irq, sh_eth_interrupt,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +00001385#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001386 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1387 defined(CONFIG_CPU_SUBTYPE_SH7757)
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001388 IRQF_SHARED,
1389#else
1390 0,
1391#endif
1392 ndev->name, ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001393 if (ret) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001394 dev_err(&ndev->dev, "Can not assign IRQ number\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001395 return ret;
1396 }
1397
1398 /* Descriptor set */
1399 ret = sh_eth_ring_init(ndev);
1400 if (ret)
1401 goto out_free_irq;
1402
1403 /* device init */
1404 ret = sh_eth_dev_init(ndev);
1405 if (ret)
1406 goto out_free_irq;
1407
1408 /* PHY control start*/
1409 ret = sh_eth_phy_start(ndev);
1410 if (ret)
1411 goto out_free_irq;
1412
1413 /* Set the timer to check for link beat. */
1414 init_timer(&mdp->timer);
1415 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001416 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001417
1418 return ret;
1419
1420out_free_irq:
1421 free_irq(ndev->irq, ndev);
Magnus Dammbcd51492009-10-09 00:20:04 +00001422 pm_runtime_put_sync(&mdp->pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001423 return ret;
1424}
1425
1426/* Timeout function */
1427static void sh_eth_tx_timeout(struct net_device *ndev)
1428{
1429 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001430 struct sh_eth_rxdesc *rxdesc;
1431 int i;
1432
1433 netif_stop_queue(ndev);
1434
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001435 if (netif_msg_timer(mdp))
1436 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001437 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001438
1439 /* tx_errors count up */
1440 mdp->stats.tx_errors++;
1441
1442 /* timer off */
1443 del_timer_sync(&mdp->timer);
1444
1445 /* Free all the skbuffs in the Rx queue. */
1446 for (i = 0; i < RX_RING_SIZE; i++) {
1447 rxdesc = &mdp->rx_ring[i];
1448 rxdesc->status = 0;
1449 rxdesc->addr = 0xBADF00D0;
1450 if (mdp->rx_skbuff[i])
1451 dev_kfree_skb(mdp->rx_skbuff[i]);
1452 mdp->rx_skbuff[i] = NULL;
1453 }
1454 for (i = 0; i < TX_RING_SIZE; i++) {
1455 if (mdp->tx_skbuff[i])
1456 dev_kfree_skb(mdp->tx_skbuff[i]);
1457 mdp->tx_skbuff[i] = NULL;
1458 }
1459
1460 /* device init */
1461 sh_eth_dev_init(ndev);
1462
1463 /* timer on */
1464 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1465 add_timer(&mdp->timer);
1466}
1467
1468/* Packet transmit function */
1469static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1470{
1471 struct sh_eth_private *mdp = netdev_priv(ndev);
1472 struct sh_eth_txdesc *txdesc;
1473 u32 entry;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00001474 unsigned long flags;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001475
1476 spin_lock_irqsave(&mdp->lock, flags);
1477 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1478 if (!sh_eth_txfree(ndev)) {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001479 if (netif_msg_tx_queued(mdp))
1480 dev_warn(&ndev->dev, "TxFD exhausted.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001481 netif_stop_queue(ndev);
1482 spin_unlock_irqrestore(&mdp->lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +00001483 return NETDEV_TX_BUSY;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001484 }
1485 }
1486 spin_unlock_irqrestore(&mdp->lock, flags);
1487
1488 entry = mdp->cur_tx % TX_RING_SIZE;
1489 mdp->tx_skbuff[entry] = skb;
1490 txdesc = &mdp->tx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001491 /* soft swap. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001492 if (!mdp->cd->hw_swap)
1493 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1494 skb->len + 2);
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00001495 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1496 DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001497 if (skb->len < ETHERSMALL)
1498 txdesc->buffer_length = ETHERSMALL;
1499 else
1500 txdesc->buffer_length = skb->len;
1501
1502 if (entry >= TX_RING_SIZE - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001503 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001504 else
Yoshinori Sato71557a32008-08-06 19:49:00 -04001505 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001506
1507 mdp->cur_tx++;
1508
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001509 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1510 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001511
Patrick McHardy6ed10652009-06-23 06:03:08 +00001512 return NETDEV_TX_OK;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001513}
1514
1515/* device close function */
1516static int sh_eth_close(struct net_device *ndev)
1517{
1518 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001519 int ringsize;
1520
1521 netif_stop_queue(ndev);
1522
1523 /* Disable interrupts by clearing the interrupt mask. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001524 sh_eth_write(ndev, 0x0000, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001525
1526 /* Stop the chip's Tx and Rx processes. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001527 sh_eth_write(ndev, 0, EDTRR);
1528 sh_eth_write(ndev, 0, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001529
1530 /* PHY Disconnect */
1531 if (mdp->phydev) {
1532 phy_stop(mdp->phydev);
1533 phy_disconnect(mdp->phydev);
1534 }
1535
1536 free_irq(ndev->irq, ndev);
1537
1538 del_timer_sync(&mdp->timer);
1539
1540 /* Free all the skbuffs in the Rx queue. */
1541 sh_eth_ring_free(ndev);
1542
1543 /* free DMA buffer */
1544 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1545 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1546
1547 /* free DMA buffer */
1548 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1549 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1550
Magnus Dammbcd51492009-10-09 00:20:04 +00001551 pm_runtime_put_sync(&mdp->pdev->dev);
1552
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001553 return 0;
1554}
1555
1556static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1557{
1558 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001559
Magnus Dammbcd51492009-10-09 00:20:04 +00001560 pm_runtime_get_sync(&mdp->pdev->dev);
1561
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001562 mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1563 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
1564 mdp->stats.collisions += sh_eth_read(ndev, CDCR);
1565 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
1566 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1567 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001568 if (sh_eth_is_gether(mdp)) {
1569 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1570 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
1571 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1572 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
1573 } else {
1574 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1575 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
1576 }
Magnus Dammbcd51492009-10-09 00:20:04 +00001577 pm_runtime_put_sync(&mdp->pdev->dev);
1578
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001579 return &mdp->stats;
1580}
1581
1582/* ioctl to device funciotn*/
1583static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1584 int cmd)
1585{
1586 struct sh_eth_private *mdp = netdev_priv(ndev);
1587 struct phy_device *phydev = mdp->phydev;
1588
1589 if (!netif_running(ndev))
1590 return -EINVAL;
1591
1592 if (!phydev)
1593 return -ENODEV;
1594
Richard Cochran28b04112010-07-17 08:48:55 +00001595 return phy_mii_ioctl(phydev, rq, cmd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001596}
1597
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001598#if defined(SH_ETH_HAS_TSU)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001599/* Multicast reception directions set */
1600static void sh_eth_set_multicast_list(struct net_device *ndev)
1601{
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001602 if (ndev->flags & IFF_PROMISC) {
1603 /* Set promiscuous. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001604 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
1605 ECMR_PRM, ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001606 } else {
1607 /* Normal, unicast/broadcast-only mode. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001608 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
1609 ECMR_MCT, ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001610 }
1611}
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00001612#endif /* SH_ETH_HAS_TSU */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001613
1614/* SuperH's TSU register init function */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001615static void sh_eth_tsu_init(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001616{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001617 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
1618 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
1619 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
1620 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1621 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1622 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1623 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1624 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1625 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1626 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001627 if (sh_eth_is_gether(mdp)) {
1628 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
1629 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
1630 } else {
1631 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
1632 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
1633 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001634 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
1635 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
1636 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
1637 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
1638 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
1639 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
1640 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001641}
1642
1643/* MDIO bus release function */
1644static int sh_mdio_release(struct net_device *ndev)
1645{
1646 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1647
1648 /* unregister mdio bus */
1649 mdiobus_unregister(bus);
1650
1651 /* remove mdio bus info from net_device */
1652 dev_set_drvdata(&ndev->dev, NULL);
1653
Denis Kirjanov0f0b4052010-05-20 04:00:59 +00001654 /* free interrupts memory */
1655 kfree(bus->irq);
1656
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001657 /* free bitbang info */
1658 free_mdio_bitbang(bus);
1659
1660 return 0;
1661}
1662
1663/* MDIO bus init function */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001664static int sh_mdio_init(struct net_device *ndev, int id,
1665 struct sh_eth_plat_data *pd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001666{
1667 int ret, i;
1668 struct bb_info *bitbang;
1669 struct sh_eth_private *mdp = netdev_priv(ndev);
1670
1671 /* create bit control struct for PHY */
1672 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1673 if (!bitbang) {
1674 ret = -ENOMEM;
1675 goto out;
1676 }
1677
1678 /* bitbang init */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001679 bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001680 bitbang->set_gate = pd->set_mdio_gate;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001681 bitbang->mdi_msk = 0x08;
1682 bitbang->mdo_msk = 0x04;
1683 bitbang->mmd_msk = 0x02;/* MMD */
1684 bitbang->mdc_msk = 0x01;
1685 bitbang->ctrl.ops = &bb_ops;
1686
Stefan Weilc2e07b32010-08-03 19:44:52 +02001687 /* MII controller setting */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001688 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1689 if (!mdp->mii_bus) {
1690 ret = -ENOMEM;
1691 goto out_free_bitbang;
1692 }
1693
1694 /* Hook up MII support for ethtool */
1695 mdp->mii_bus->name = "sh_mii";
Lennert Buytenhek18ee49d2008-10-01 15:41:33 +00001696 mdp->mii_bus->parent = &ndev->dev;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00001697 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001698
1699 /* PHY IRQ */
1700 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1701 if (!mdp->mii_bus->irq) {
1702 ret = -ENOMEM;
1703 goto out_free_bus;
1704 }
1705
1706 for (i = 0; i < PHY_MAX_ADDR; i++)
1707 mdp->mii_bus->irq[i] = PHY_POLL;
1708
1709 /* regist mdio bus */
1710 ret = mdiobus_register(mdp->mii_bus);
1711 if (ret)
1712 goto out_free_irq;
1713
1714 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1715
1716 return 0;
1717
1718out_free_irq:
1719 kfree(mdp->mii_bus->irq);
1720
1721out_free_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001722 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001723
1724out_free_bitbang:
1725 kfree(bitbang);
1726
1727out:
1728 return ret;
1729}
1730
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001731static const u16 *sh_eth_get_register_offset(int register_type)
1732{
1733 const u16 *reg_offset = NULL;
1734
1735 switch (register_type) {
1736 case SH_ETH_REG_GIGABIT:
1737 reg_offset = sh_eth_offset_gigabit;
1738 break;
1739 case SH_ETH_REG_FAST_SH4:
1740 reg_offset = sh_eth_offset_fast_sh4;
1741 break;
1742 case SH_ETH_REG_FAST_SH3_SH2:
1743 reg_offset = sh_eth_offset_fast_sh3_sh2;
1744 break;
1745 default:
1746 printk(KERN_ERR "Unknown register type (%d)\n", register_type);
1747 break;
1748 }
1749
1750 return reg_offset;
1751}
1752
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001753static const struct net_device_ops sh_eth_netdev_ops = {
1754 .ndo_open = sh_eth_open,
1755 .ndo_stop = sh_eth_close,
1756 .ndo_start_xmit = sh_eth_start_xmit,
1757 .ndo_get_stats = sh_eth_get_stats,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001758#if defined(SH_ETH_HAS_TSU)
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001759 .ndo_set_multicast_list = sh_eth_set_multicast_list,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001760#endif
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001761 .ndo_tx_timeout = sh_eth_tx_timeout,
1762 .ndo_do_ioctl = sh_eth_do_ioctl,
1763 .ndo_validate_addr = eth_validate_addr,
1764 .ndo_set_mac_address = eth_mac_addr,
1765 .ndo_change_mtu = eth_change_mtu,
1766};
1767
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001768static int sh_eth_drv_probe(struct platform_device *pdev)
1769{
Kuninori Morimoto9c386572010-08-19 00:39:45 -07001770 int ret, devno = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001771 struct resource *res;
1772 struct net_device *ndev = NULL;
Kuninori Morimotoec0d7552011-06-23 16:02:38 +00001773 struct sh_eth_private *mdp = NULL;
Yoshinori Sato71557a32008-08-06 19:49:00 -04001774 struct sh_eth_plat_data *pd;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001775
1776 /* get base addr */
1777 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1778 if (unlikely(res == NULL)) {
1779 dev_err(&pdev->dev, "invalid resource\n");
1780 ret = -EINVAL;
1781 goto out;
1782 }
1783
1784 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1785 if (!ndev) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001786 dev_err(&pdev->dev, "Could not allocate device.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001787 ret = -ENOMEM;
1788 goto out;
1789 }
1790
1791 /* The sh Ether-specific entries in the device structure. */
1792 ndev->base_addr = res->start;
1793 devno = pdev->id;
1794 if (devno < 0)
1795 devno = 0;
1796
1797 ndev->dma = -1;
roel kluincc3c0802008-09-10 19:22:44 +02001798 ret = platform_get_irq(pdev, 0);
1799 if (ret < 0) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001800 ret = -ENODEV;
1801 goto out_release;
1802 }
roel kluincc3c0802008-09-10 19:22:44 +02001803 ndev->irq = ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001804
1805 SET_NETDEV_DEV(ndev, &pdev->dev);
1806
1807 /* Fill in the fields of the device structure with ethernet values. */
1808 ether_setup(ndev);
1809
1810 mdp = netdev_priv(ndev);
1811 spin_lock_init(&mdp->lock);
Magnus Dammbcd51492009-10-09 00:20:04 +00001812 mdp->pdev = pdev;
1813 pm_runtime_enable(&pdev->dev);
1814 pm_runtime_resume(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001815
Yoshinori Sato71557a32008-08-06 19:49:00 -04001816 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001817 /* get PHY ID */
Yoshinori Sato71557a32008-08-06 19:49:00 -04001818 mdp->phy_id = pd->phy;
Yoshihiro Shimodae47c9052011-03-07 21:59:45 +00001819 mdp->phy_interface = pd->phy_interface;
Yoshinori Sato71557a32008-08-06 19:49:00 -04001820 /* EDMAC endian */
1821 mdp->edmac_endian = pd->edmac_endian;
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001822 mdp->no_ether_link = pd->no_ether_link;
1823 mdp->ether_link_active_low = pd->ether_link_active_low;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001824 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001825
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001826 /* set cpu data */
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +00001827#if defined(SH_ETH_HAS_BOTH_MODULES)
1828 mdp->cd = sh_eth_get_cpu_data(mdp);
1829#else
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001830 mdp->cd = &sh_eth_my_cpu_data;
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +00001831#endif
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001832 sh_eth_set_default_cpu_data(mdp->cd);
1833
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001834 /* set function */
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001835 ndev->netdev_ops = &sh_eth_netdev_ops;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001836 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001837 ndev->watchdog_timeo = TX_TIMEOUT;
1838
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001839 /* debug message level */
1840 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001841 mdp->post_rx = POST_RX >> (devno << 1);
1842 mdp->post_fw = POST_FW >> (devno << 1);
1843
1844 /* read and set MAC address */
Magnus Damm748031f2009-10-09 00:17:14 +00001845 read_mac_address(ndev, pd->mac_addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001846
1847 /* First device only init */
1848 if (!devno) {
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00001849 if (mdp->cd->tsu) {
1850 struct resource *rtsu;
1851 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1852 if (!rtsu) {
1853 dev_err(&pdev->dev, "Not found TSU resource\n");
1854 goto out_release;
1855 }
1856 mdp->tsu_addr = ioremap(rtsu->start,
1857 resource_size(rtsu));
1858 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001859 if (mdp->cd->chip_reset)
1860 mdp->cd->chip_reset(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001861
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00001862 if (mdp->cd->tsu) {
1863 /* TSU init (Init only)*/
1864 sh_eth_tsu_init(mdp);
1865 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001866 }
1867
1868 /* network device register */
1869 ret = register_netdev(ndev);
1870 if (ret)
1871 goto out_release;
1872
1873 /* mdio bus init */
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001874 ret = sh_mdio_init(ndev, pdev->id, pd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001875 if (ret)
1876 goto out_unregister;
1877
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001878 /* print device information */
H Hartley Sweeten6cd9b492009-12-29 20:10:35 -08001879 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1880 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001881
1882 platform_set_drvdata(pdev, ndev);
1883
1884 return ret;
1885
1886out_unregister:
1887 unregister_netdev(ndev);
1888
1889out_release:
1890 /* net_dev free */
Kuninori Morimotoec0d7552011-06-23 16:02:38 +00001891 if (mdp && mdp->tsu_addr)
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00001892 iounmap(mdp->tsu_addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001893 if (ndev)
1894 free_netdev(ndev);
1895
1896out:
1897 return ret;
1898}
1899
1900static int sh_eth_drv_remove(struct platform_device *pdev)
1901{
1902 struct net_device *ndev = platform_get_drvdata(pdev);
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00001903 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001904
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00001905 iounmap(mdp->tsu_addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001906 sh_mdio_release(ndev);
1907 unregister_netdev(ndev);
Magnus Dammbcd51492009-10-09 00:20:04 +00001908 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001909 free_netdev(ndev);
1910 platform_set_drvdata(pdev, NULL);
1911
1912 return 0;
1913}
1914
Magnus Dammbcd51492009-10-09 00:20:04 +00001915static int sh_eth_runtime_nop(struct device *dev)
1916{
1917 /*
1918 * Runtime PM callback shared between ->runtime_suspend()
1919 * and ->runtime_resume(). Simply returns success.
1920 *
1921 * This driver re-initializes all registers after
1922 * pm_runtime_get_sync() anyway so there is no need
1923 * to save and restore registers here.
1924 */
1925 return 0;
1926}
1927
1928static struct dev_pm_ops sh_eth_dev_pm_ops = {
1929 .runtime_suspend = sh_eth_runtime_nop,
1930 .runtime_resume = sh_eth_runtime_nop,
1931};
1932
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001933static struct platform_driver sh_eth_driver = {
1934 .probe = sh_eth_drv_probe,
1935 .remove = sh_eth_drv_remove,
1936 .driver = {
1937 .name = CARDNAME,
Magnus Dammbcd51492009-10-09 00:20:04 +00001938 .pm = &sh_eth_dev_pm_ops,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001939 },
1940};
1941
1942static int __init sh_eth_init(void)
1943{
1944 return platform_driver_register(&sh_eth_driver);
1945}
1946
1947static void __exit sh_eth_cleanup(void)
1948{
1949 platform_driver_unregister(&sh_eth_driver);
1950}
1951
1952module_init(sh_eth_init);
1953module_exit(sh_eth_cleanup);
1954
1955MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1956MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1957MODULE_LICENSE("GPL v2");