blob: f8f3be3fd59fdd2fa78f292c61d37a2bde150d04 [file] [log] [blame]
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001/*
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002 * Keystone GBE and XGBE subsystem code
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16 * kind, whether express or implied; without even the implied warranty
17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/io.h>
Karicheri, Muralidharan58c11b52015-01-29 18:15:51 -050022#include <linux/module.h>
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050023#include <linux/of_mdio.h>
24#include <linux/of_address.h>
25#include <linux/if_vlan.h>
26#include <linux/ethtool.h>
27
28#include "cpsw_ale.h"
29#include "netcp.h"
30
31#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
32#define NETCP_DRIVER_VERSION "v1.0"
33
34#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
35#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
36#define GBE_MINOR_VERSION(reg) (reg & 0xff)
37#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
38
39/* 1G Ethernet SS defines */
40#define GBE_MODULE_NAME "netcp-gbe"
41#define GBE_SS_VERSION_14 0x4ed21104
42
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040043#define GBE_SS_REG_INDEX 0
44#define GBE_SGMII34_REG_INDEX 1
45#define GBE_SM_REG_INDEX 2
46/* offset relative to base of GBE_SS_REG_INDEX */
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050047#define GBE13_SGMII_MODULE_OFFSET 0x100
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040048/* offset relative to base of GBE_SM_REG_INDEX */
49#define GBE13_HOST_PORT_OFFSET 0x34
50#define GBE13_SLAVE_PORT_OFFSET 0x60
51#define GBE13_EMAC_OFFSET 0x100
52#define GBE13_SLAVE_PORT2_OFFSET 0x200
53#define GBE13_HW_STATS_OFFSET 0x300
54#define GBE13_ALE_OFFSET 0x600
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050055#define GBE13_HOST_PORT_NUM 0
56#define GBE13_NUM_SLAVES 4
57#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1)
58#define GBE13_NUM_ALE_ENTRIES 1024
59
Wingman Kwok90cff9e2015-01-15 19:12:52 -050060/* 10G Ethernet SS defines */
61#define XGBE_MODULE_NAME "netcp-xgbe"
62#define XGBE_SS_VERSION_10 0x4ee42100
63
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040064#define XGBE_SS_REG_INDEX 0
65#define XGBE_SM_REG_INDEX 1
66#define XGBE_SERDES_REG_INDEX 2
67
68/* offset relative to base of XGBE_SS_REG_INDEX */
Wingman Kwok90cff9e2015-01-15 19:12:52 -050069#define XGBE10_SGMII_MODULE_OFFSET 0x100
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040070/* offset relative to base of XGBE_SM_REG_INDEX */
71#define XGBE10_HOST_PORT_OFFSET 0x34
72#define XGBE10_SLAVE_PORT_OFFSET 0x64
73#define XGBE10_EMAC_OFFSET 0x400
74#define XGBE10_ALE_OFFSET 0x700
75#define XGBE10_HW_STATS_OFFSET 0x800
Wingman Kwok90cff9e2015-01-15 19:12:52 -050076#define XGBE10_HOST_PORT_NUM 0
77#define XGBE10_NUM_SLAVES 2
78#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1)
79#define XGBE10_NUM_ALE_ENTRIES 1024
80
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050081#define GBE_TIMER_INTERVAL (HZ / 2)
82
83/* Soft reset register values */
84#define SOFT_RESET_MASK BIT(0)
85#define SOFT_RESET BIT(0)
86#define DEVICE_EMACSL_RESET_POLL_COUNT 100
87#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
88
89#define MACSL_RX_ENABLE_CSF BIT(23)
90#define MACSL_ENABLE_EXT_CTL BIT(18)
Wingman Kwok90cff9e2015-01-15 19:12:52 -050091#define MACSL_XGMII_ENABLE BIT(13)
92#define MACSL_XGIG_MODE BIT(8)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050093#define MACSL_GIG_MODE BIT(7)
94#define MACSL_GMII_ENABLE BIT(5)
95#define MACSL_FULLDUPLEX BIT(0)
96
97#define GBE_CTL_P0_ENABLE BIT(2)
98#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff
Wingman Kwok90cff9e2015-01-15 19:12:52 -050099#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500100#define GBE_STATS_CD_SEL BIT(28)
101
102#define GBE_PORT_MASK(x) (BIT(x) - 1)
103#define GBE_MASK_NO_PORTS 0
104
105#define GBE_DEF_1G_MAC_CONTROL \
106 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
107 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
108
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500109#define GBE_DEF_10G_MAC_CONTROL \
110 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
111 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
112
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500113#define GBE_STATSA_MODULE 0
114#define GBE_STATSB_MODULE 1
115#define GBE_STATSC_MODULE 2
116#define GBE_STATSD_MODULE 3
117
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500118#define XGBE_STATS0_MODULE 0
119#define XGBE_STATS1_MODULE 1
120#define XGBE_STATS2_MODULE 2
121
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500122#define MAX_SLAVES GBE13_NUM_SLAVES
123/* s: 0-based slave_port */
124#define SGMII_BASE(s) \
125 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
126
127#define GBE_TX_QUEUE 648
128#define GBE_TXHOOK_ORDER 0
129#define GBE_DEFAULT_ALE_AGEOUT 30
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500130#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500131#define NETCP_LINK_STATE_INVALID -1
132
133#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
134 offsetof(struct gbe##_##rb, rn)
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500135#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
136 offsetof(struct xgbe##_##rb, rn)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500137#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
138
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500139struct xgbe_ss_regs {
140 u32 id_ver;
141 u32 synce_count;
142 u32 synce_mux;
143 u32 control;
144};
145
146struct xgbe_switch_regs {
147 u32 id_ver;
148 u32 control;
149 u32 emcontrol;
150 u32 stat_port_en;
151 u32 ptype;
152 u32 soft_idle;
153 u32 thru_rate;
154 u32 gap_thresh;
155 u32 tx_start_wds;
156 u32 flow_control;
157 u32 cppi_thresh;
158};
159
160struct xgbe_port_regs {
161 u32 blk_cnt;
162 u32 port_vlan;
163 u32 tx_pri_map;
164 u32 sa_lo;
165 u32 sa_hi;
166 u32 ts_ctl;
167 u32 ts_seq_ltype;
168 u32 ts_vlan;
169 u32 ts_ctl_ltype2;
170 u32 ts_ctl2;
171 u32 control;
172};
173
174struct xgbe_host_port_regs {
175 u32 blk_cnt;
176 u32 port_vlan;
177 u32 tx_pri_map;
178 u32 src_id;
179 u32 rx_pri_map;
180 u32 rx_maxlen;
181};
182
183struct xgbe_emac_regs {
184 u32 id_ver;
185 u32 mac_control;
186 u32 mac_status;
187 u32 soft_reset;
188 u32 rx_maxlen;
189 u32 __reserved_0;
190 u32 rx_pause;
191 u32 tx_pause;
192 u32 em_control;
193 u32 __reserved_1;
194 u32 tx_gap;
195 u32 rsvd[4];
196};
197
198struct xgbe_host_hw_stats {
199 u32 rx_good_frames;
200 u32 rx_broadcast_frames;
201 u32 rx_multicast_frames;
202 u32 __rsvd_0[3];
203 u32 rx_oversized_frames;
204 u32 __rsvd_1;
205 u32 rx_undersized_frames;
206 u32 __rsvd_2;
207 u32 overrun_type4;
208 u32 overrun_type5;
209 u32 rx_bytes;
210 u32 tx_good_frames;
211 u32 tx_broadcast_frames;
212 u32 tx_multicast_frames;
213 u32 __rsvd_3[9];
214 u32 tx_bytes;
215 u32 tx_64byte_frames;
216 u32 tx_65_to_127byte_frames;
217 u32 tx_128_to_255byte_frames;
218 u32 tx_256_to_511byte_frames;
219 u32 tx_512_to_1023byte_frames;
220 u32 tx_1024byte_frames;
221 u32 net_bytes;
222 u32 rx_sof_overruns;
223 u32 rx_mof_overruns;
224 u32 rx_dma_overruns;
225};
226
227struct xgbe_hw_stats {
228 u32 rx_good_frames;
229 u32 rx_broadcast_frames;
230 u32 rx_multicast_frames;
231 u32 rx_pause_frames;
232 u32 rx_crc_errors;
233 u32 rx_align_code_errors;
234 u32 rx_oversized_frames;
235 u32 rx_jabber_frames;
236 u32 rx_undersized_frames;
237 u32 rx_fragments;
238 u32 overrun_type4;
239 u32 overrun_type5;
240 u32 rx_bytes;
241 u32 tx_good_frames;
242 u32 tx_broadcast_frames;
243 u32 tx_multicast_frames;
244 u32 tx_pause_frames;
245 u32 tx_deferred_frames;
246 u32 tx_collision_frames;
247 u32 tx_single_coll_frames;
248 u32 tx_mult_coll_frames;
249 u32 tx_excessive_collisions;
250 u32 tx_late_collisions;
251 u32 tx_underrun;
252 u32 tx_carrier_sense_errors;
253 u32 tx_bytes;
254 u32 tx_64byte_frames;
255 u32 tx_65_to_127byte_frames;
256 u32 tx_128_to_255byte_frames;
257 u32 tx_256_to_511byte_frames;
258 u32 tx_512_to_1023byte_frames;
259 u32 tx_1024byte_frames;
260 u32 net_bytes;
261 u32 rx_sof_overruns;
262 u32 rx_mof_overruns;
263 u32 rx_dma_overruns;
264};
265
266#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
267
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500268struct gbe_ss_regs {
269 u32 id_ver;
270 u32 synce_count;
271 u32 synce_mux;
272};
273
274struct gbe_ss_regs_ofs {
275 u16 id_ver;
276 u16 control;
277};
278
279struct gbe_switch_regs {
280 u32 id_ver;
281 u32 control;
282 u32 soft_reset;
283 u32 stat_port_en;
284 u32 ptype;
285 u32 soft_idle;
286 u32 thru_rate;
287 u32 gap_thresh;
288 u32 tx_start_wds;
289 u32 flow_control;
290};
291
292struct gbe_switch_regs_ofs {
293 u16 id_ver;
294 u16 control;
295 u16 soft_reset;
296 u16 emcontrol;
297 u16 stat_port_en;
298 u16 ptype;
299 u16 flow_control;
300};
301
302struct gbe_port_regs {
303 u32 max_blks;
304 u32 blk_cnt;
305 u32 port_vlan;
306 u32 tx_pri_map;
307 u32 sa_lo;
308 u32 sa_hi;
309 u32 ts_ctl;
310 u32 ts_seq_ltype;
311 u32 ts_vlan;
312 u32 ts_ctl_ltype2;
313 u32 ts_ctl2;
314};
315
316struct gbe_port_regs_ofs {
317 u16 port_vlan;
318 u16 tx_pri_map;
319 u16 sa_lo;
320 u16 sa_hi;
321 u16 ts_ctl;
322 u16 ts_seq_ltype;
323 u16 ts_vlan;
324 u16 ts_ctl_ltype2;
325 u16 ts_ctl2;
326};
327
328struct gbe_host_port_regs {
329 u32 src_id;
330 u32 port_vlan;
331 u32 rx_pri_map;
332 u32 rx_maxlen;
333};
334
335struct gbe_host_port_regs_ofs {
336 u16 port_vlan;
337 u16 tx_pri_map;
338 u16 rx_maxlen;
339};
340
341struct gbe_emac_regs {
342 u32 id_ver;
343 u32 mac_control;
344 u32 mac_status;
345 u32 soft_reset;
346 u32 rx_maxlen;
347 u32 __reserved_0;
348 u32 rx_pause;
349 u32 tx_pause;
350 u32 __reserved_1;
351 u32 rx_pri_map;
352 u32 rsvd[6];
353};
354
355struct gbe_emac_regs_ofs {
356 u16 mac_control;
357 u16 soft_reset;
358 u16 rx_maxlen;
359};
360
361struct gbe_hw_stats {
362 u32 rx_good_frames;
363 u32 rx_broadcast_frames;
364 u32 rx_multicast_frames;
365 u32 rx_pause_frames;
366 u32 rx_crc_errors;
367 u32 rx_align_code_errors;
368 u32 rx_oversized_frames;
369 u32 rx_jabber_frames;
370 u32 rx_undersized_frames;
371 u32 rx_fragments;
372 u32 __pad_0[2];
373 u32 rx_bytes;
374 u32 tx_good_frames;
375 u32 tx_broadcast_frames;
376 u32 tx_multicast_frames;
377 u32 tx_pause_frames;
378 u32 tx_deferred_frames;
379 u32 tx_collision_frames;
380 u32 tx_single_coll_frames;
381 u32 tx_mult_coll_frames;
382 u32 tx_excessive_collisions;
383 u32 tx_late_collisions;
384 u32 tx_underrun;
385 u32 tx_carrier_sense_errors;
386 u32 tx_bytes;
387 u32 tx_64byte_frames;
388 u32 tx_65_to_127byte_frames;
389 u32 tx_128_to_255byte_frames;
390 u32 tx_256_to_511byte_frames;
391 u32 tx_512_to_1023byte_frames;
392 u32 tx_1024byte_frames;
393 u32 net_bytes;
394 u32 rx_sof_overruns;
395 u32 rx_mof_overruns;
396 u32 rx_dma_overruns;
397};
398
399#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
400#define GBE13_NUM_HW_STATS_MOD 2
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500401#define XGBE10_NUM_HW_STATS_MOD 3
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500402#define GBE_MAX_HW_STAT_MODS 3
403#define GBE_HW_STATS_REG_MAP_SZ 0x100
404
405struct gbe_slave {
406 void __iomem *port_regs;
407 void __iomem *emac_regs;
408 struct gbe_port_regs_ofs port_regs_ofs;
409 struct gbe_emac_regs_ofs emac_regs_ofs;
410 int slave_num; /* 0 based logical number */
411 int port_num; /* actual port number */
412 atomic_t link_state;
413 bool open;
414 struct phy_device *phy;
415 u32 link_interface;
416 u32 mac_control;
417 u8 phy_port_t;
418 struct device_node *phy_node;
419 struct list_head slave_list;
420};
421
422struct gbe_priv {
423 struct device *dev;
424 struct netcp_device *netcp_device;
425 struct timer_list timer;
426 u32 num_slaves;
427 u32 ale_entries;
428 u32 ale_ports;
429 bool enable_ale;
430 struct netcp_tx_pipe tx_pipe;
431
432 int host_port;
433 u32 rx_packet_max;
434 u32 ss_version;
435
436 void __iomem *ss_regs;
437 void __iomem *switch_regs;
438 void __iomem *host_port_regs;
439 void __iomem *ale_reg;
440 void __iomem *sgmii_port_regs;
441 void __iomem *sgmii_port34_regs;
442 void __iomem *xgbe_serdes_regs;
443 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
444
445 struct gbe_ss_regs_ofs ss_regs_ofs;
446 struct gbe_switch_regs_ofs switch_regs_ofs;
447 struct gbe_host_port_regs_ofs host_port_regs_ofs;
448
449 struct cpsw_ale *ale;
450 unsigned int tx_queue_id;
451 const char *dma_chan_name;
452
453 struct list_head gbe_intf_head;
454 struct list_head secondary_slaves;
455 struct net_device *dummy_ndev;
456
457 u64 *hw_stats;
458 const struct netcp_ethtool_stat *et_stats;
459 int num_et_stats;
460 /* Lock for updating the hwstats */
461 spinlock_t hw_stats_lock;
462};
463
464struct gbe_intf {
465 struct net_device *ndev;
466 struct device *dev;
467 struct gbe_priv *gbe_dev;
468 struct netcp_tx_pipe tx_pipe;
469 struct gbe_slave *slave;
470 struct list_head gbe_intf_list;
471 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
472};
473
474static struct netcp_module gbe_module;
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500475static struct netcp_module xgbe_module;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500476
477/* Statistic management */
478struct netcp_ethtool_stat {
479 char desc[ETH_GSTRING_LEN];
480 int type;
481 u32 size;
482 int offset;
483};
484
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400485#define GBE_STATSA_INFO(field) \
486{ \
487 "GBE_A:"#field, GBE_STATSA_MODULE, \
488 FIELD_SIZEOF(struct gbe_hw_stats, field), \
489 offsetof(struct gbe_hw_stats, field) \
490}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500491
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400492#define GBE_STATSB_INFO(field) \
493{ \
494 "GBE_B:"#field, GBE_STATSB_MODULE, \
495 FIELD_SIZEOF(struct gbe_hw_stats, field), \
496 offsetof(struct gbe_hw_stats, field) \
497}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500498
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400499#define GBE_STATSC_INFO(field) \
500{ \
501 "GBE_C:"#field, GBE_STATSC_MODULE, \
502 FIELD_SIZEOF(struct gbe_hw_stats, field), \
503 offsetof(struct gbe_hw_stats, field) \
504}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500505
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400506#define GBE_STATSD_INFO(field) \
507{ \
508 "GBE_D:"#field, GBE_STATSD_MODULE, \
509 FIELD_SIZEOF(struct gbe_hw_stats, field), \
510 offsetof(struct gbe_hw_stats, field) \
511}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500512
513static const struct netcp_ethtool_stat gbe13_et_stats[] = {
514 /* GBE module A */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400515 GBE_STATSA_INFO(rx_good_frames),
516 GBE_STATSA_INFO(rx_broadcast_frames),
517 GBE_STATSA_INFO(rx_multicast_frames),
518 GBE_STATSA_INFO(rx_pause_frames),
519 GBE_STATSA_INFO(rx_crc_errors),
520 GBE_STATSA_INFO(rx_align_code_errors),
521 GBE_STATSA_INFO(rx_oversized_frames),
522 GBE_STATSA_INFO(rx_jabber_frames),
523 GBE_STATSA_INFO(rx_undersized_frames),
524 GBE_STATSA_INFO(rx_fragments),
525 GBE_STATSA_INFO(rx_bytes),
526 GBE_STATSA_INFO(tx_good_frames),
527 GBE_STATSA_INFO(tx_broadcast_frames),
528 GBE_STATSA_INFO(tx_multicast_frames),
529 GBE_STATSA_INFO(tx_pause_frames),
530 GBE_STATSA_INFO(tx_deferred_frames),
531 GBE_STATSA_INFO(tx_collision_frames),
532 GBE_STATSA_INFO(tx_single_coll_frames),
533 GBE_STATSA_INFO(tx_mult_coll_frames),
534 GBE_STATSA_INFO(tx_excessive_collisions),
535 GBE_STATSA_INFO(tx_late_collisions),
536 GBE_STATSA_INFO(tx_underrun),
537 GBE_STATSA_INFO(tx_carrier_sense_errors),
538 GBE_STATSA_INFO(tx_bytes),
539 GBE_STATSA_INFO(tx_64byte_frames),
540 GBE_STATSA_INFO(tx_65_to_127byte_frames),
541 GBE_STATSA_INFO(tx_128_to_255byte_frames),
542 GBE_STATSA_INFO(tx_256_to_511byte_frames),
543 GBE_STATSA_INFO(tx_512_to_1023byte_frames),
544 GBE_STATSA_INFO(tx_1024byte_frames),
545 GBE_STATSA_INFO(net_bytes),
546 GBE_STATSA_INFO(rx_sof_overruns),
547 GBE_STATSA_INFO(rx_mof_overruns),
548 GBE_STATSA_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500549 /* GBE module B */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400550 GBE_STATSB_INFO(rx_good_frames),
551 GBE_STATSB_INFO(rx_broadcast_frames),
552 GBE_STATSB_INFO(rx_multicast_frames),
553 GBE_STATSB_INFO(rx_pause_frames),
554 GBE_STATSB_INFO(rx_crc_errors),
555 GBE_STATSB_INFO(rx_align_code_errors),
556 GBE_STATSB_INFO(rx_oversized_frames),
557 GBE_STATSB_INFO(rx_jabber_frames),
558 GBE_STATSB_INFO(rx_undersized_frames),
559 GBE_STATSB_INFO(rx_fragments),
560 GBE_STATSB_INFO(rx_bytes),
561 GBE_STATSB_INFO(tx_good_frames),
562 GBE_STATSB_INFO(tx_broadcast_frames),
563 GBE_STATSB_INFO(tx_multicast_frames),
564 GBE_STATSB_INFO(tx_pause_frames),
565 GBE_STATSB_INFO(tx_deferred_frames),
566 GBE_STATSB_INFO(tx_collision_frames),
567 GBE_STATSB_INFO(tx_single_coll_frames),
568 GBE_STATSB_INFO(tx_mult_coll_frames),
569 GBE_STATSB_INFO(tx_excessive_collisions),
570 GBE_STATSB_INFO(tx_late_collisions),
571 GBE_STATSB_INFO(tx_underrun),
572 GBE_STATSB_INFO(tx_carrier_sense_errors),
573 GBE_STATSB_INFO(tx_bytes),
574 GBE_STATSB_INFO(tx_64byte_frames),
575 GBE_STATSB_INFO(tx_65_to_127byte_frames),
576 GBE_STATSB_INFO(tx_128_to_255byte_frames),
577 GBE_STATSB_INFO(tx_256_to_511byte_frames),
578 GBE_STATSB_INFO(tx_512_to_1023byte_frames),
579 GBE_STATSB_INFO(tx_1024byte_frames),
580 GBE_STATSB_INFO(net_bytes),
581 GBE_STATSB_INFO(rx_sof_overruns),
582 GBE_STATSB_INFO(rx_mof_overruns),
583 GBE_STATSB_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500584 /* GBE module C */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400585 GBE_STATSC_INFO(rx_good_frames),
586 GBE_STATSC_INFO(rx_broadcast_frames),
587 GBE_STATSC_INFO(rx_multicast_frames),
588 GBE_STATSC_INFO(rx_pause_frames),
589 GBE_STATSC_INFO(rx_crc_errors),
590 GBE_STATSC_INFO(rx_align_code_errors),
591 GBE_STATSC_INFO(rx_oversized_frames),
592 GBE_STATSC_INFO(rx_jabber_frames),
593 GBE_STATSC_INFO(rx_undersized_frames),
594 GBE_STATSC_INFO(rx_fragments),
595 GBE_STATSC_INFO(rx_bytes),
596 GBE_STATSC_INFO(tx_good_frames),
597 GBE_STATSC_INFO(tx_broadcast_frames),
598 GBE_STATSC_INFO(tx_multicast_frames),
599 GBE_STATSC_INFO(tx_pause_frames),
600 GBE_STATSC_INFO(tx_deferred_frames),
601 GBE_STATSC_INFO(tx_collision_frames),
602 GBE_STATSC_INFO(tx_single_coll_frames),
603 GBE_STATSC_INFO(tx_mult_coll_frames),
604 GBE_STATSC_INFO(tx_excessive_collisions),
605 GBE_STATSC_INFO(tx_late_collisions),
606 GBE_STATSC_INFO(tx_underrun),
607 GBE_STATSC_INFO(tx_carrier_sense_errors),
608 GBE_STATSC_INFO(tx_bytes),
609 GBE_STATSC_INFO(tx_64byte_frames),
610 GBE_STATSC_INFO(tx_65_to_127byte_frames),
611 GBE_STATSC_INFO(tx_128_to_255byte_frames),
612 GBE_STATSC_INFO(tx_256_to_511byte_frames),
613 GBE_STATSC_INFO(tx_512_to_1023byte_frames),
614 GBE_STATSC_INFO(tx_1024byte_frames),
615 GBE_STATSC_INFO(net_bytes),
616 GBE_STATSC_INFO(rx_sof_overruns),
617 GBE_STATSC_INFO(rx_mof_overruns),
618 GBE_STATSC_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500619 /* GBE module D */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400620 GBE_STATSD_INFO(rx_good_frames),
621 GBE_STATSD_INFO(rx_broadcast_frames),
622 GBE_STATSD_INFO(rx_multicast_frames),
623 GBE_STATSD_INFO(rx_pause_frames),
624 GBE_STATSD_INFO(rx_crc_errors),
625 GBE_STATSD_INFO(rx_align_code_errors),
626 GBE_STATSD_INFO(rx_oversized_frames),
627 GBE_STATSD_INFO(rx_jabber_frames),
628 GBE_STATSD_INFO(rx_undersized_frames),
629 GBE_STATSD_INFO(rx_fragments),
630 GBE_STATSD_INFO(rx_bytes),
631 GBE_STATSD_INFO(tx_good_frames),
632 GBE_STATSD_INFO(tx_broadcast_frames),
633 GBE_STATSD_INFO(tx_multicast_frames),
634 GBE_STATSD_INFO(tx_pause_frames),
635 GBE_STATSD_INFO(tx_deferred_frames),
636 GBE_STATSD_INFO(tx_collision_frames),
637 GBE_STATSD_INFO(tx_single_coll_frames),
638 GBE_STATSD_INFO(tx_mult_coll_frames),
639 GBE_STATSD_INFO(tx_excessive_collisions),
640 GBE_STATSD_INFO(tx_late_collisions),
641 GBE_STATSD_INFO(tx_underrun),
642 GBE_STATSD_INFO(tx_carrier_sense_errors),
643 GBE_STATSD_INFO(tx_bytes),
644 GBE_STATSD_INFO(tx_64byte_frames),
645 GBE_STATSD_INFO(tx_65_to_127byte_frames),
646 GBE_STATSD_INFO(tx_128_to_255byte_frames),
647 GBE_STATSD_INFO(tx_256_to_511byte_frames),
648 GBE_STATSD_INFO(tx_512_to_1023byte_frames),
649 GBE_STATSD_INFO(tx_1024byte_frames),
650 GBE_STATSD_INFO(net_bytes),
651 GBE_STATSD_INFO(rx_sof_overruns),
652 GBE_STATSD_INFO(rx_mof_overruns),
653 GBE_STATSD_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500654};
655
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400656#define XGBE_STATS0_INFO(field) \
657{ \
658 "GBE_0:"#field, XGBE_STATS0_MODULE, \
659 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
660 offsetof(struct xgbe_hw_stats, field) \
661}
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500662
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400663#define XGBE_STATS1_INFO(field) \
664{ \
665 "GBE_1:"#field, XGBE_STATS1_MODULE, \
666 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
667 offsetof(struct xgbe_hw_stats, field) \
668}
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500669
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400670#define XGBE_STATS2_INFO(field) \
671{ \
672 "GBE_2:"#field, XGBE_STATS2_MODULE, \
673 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
674 offsetof(struct xgbe_hw_stats, field) \
675}
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500676
677static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
678 /* GBE module 0 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400679 XGBE_STATS0_INFO(rx_good_frames),
680 XGBE_STATS0_INFO(rx_broadcast_frames),
681 XGBE_STATS0_INFO(rx_multicast_frames),
682 XGBE_STATS0_INFO(rx_oversized_frames),
683 XGBE_STATS0_INFO(rx_undersized_frames),
684 XGBE_STATS0_INFO(overrun_type4),
685 XGBE_STATS0_INFO(overrun_type5),
686 XGBE_STATS0_INFO(rx_bytes),
687 XGBE_STATS0_INFO(tx_good_frames),
688 XGBE_STATS0_INFO(tx_broadcast_frames),
689 XGBE_STATS0_INFO(tx_multicast_frames),
690 XGBE_STATS0_INFO(tx_bytes),
691 XGBE_STATS0_INFO(tx_64byte_frames),
692 XGBE_STATS0_INFO(tx_65_to_127byte_frames),
693 XGBE_STATS0_INFO(tx_128_to_255byte_frames),
694 XGBE_STATS0_INFO(tx_256_to_511byte_frames),
695 XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
696 XGBE_STATS0_INFO(tx_1024byte_frames),
697 XGBE_STATS0_INFO(net_bytes),
698 XGBE_STATS0_INFO(rx_sof_overruns),
699 XGBE_STATS0_INFO(rx_mof_overruns),
700 XGBE_STATS0_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500701 /* XGBE module 1 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400702 XGBE_STATS1_INFO(rx_good_frames),
703 XGBE_STATS1_INFO(rx_broadcast_frames),
704 XGBE_STATS1_INFO(rx_multicast_frames),
705 XGBE_STATS1_INFO(rx_pause_frames),
706 XGBE_STATS1_INFO(rx_crc_errors),
707 XGBE_STATS1_INFO(rx_align_code_errors),
708 XGBE_STATS1_INFO(rx_oversized_frames),
709 XGBE_STATS1_INFO(rx_jabber_frames),
710 XGBE_STATS1_INFO(rx_undersized_frames),
711 XGBE_STATS1_INFO(rx_fragments),
712 XGBE_STATS1_INFO(overrun_type4),
713 XGBE_STATS1_INFO(overrun_type5),
714 XGBE_STATS1_INFO(rx_bytes),
715 XGBE_STATS1_INFO(tx_good_frames),
716 XGBE_STATS1_INFO(tx_broadcast_frames),
717 XGBE_STATS1_INFO(tx_multicast_frames),
718 XGBE_STATS1_INFO(tx_pause_frames),
719 XGBE_STATS1_INFO(tx_deferred_frames),
720 XGBE_STATS1_INFO(tx_collision_frames),
721 XGBE_STATS1_INFO(tx_single_coll_frames),
722 XGBE_STATS1_INFO(tx_mult_coll_frames),
723 XGBE_STATS1_INFO(tx_excessive_collisions),
724 XGBE_STATS1_INFO(tx_late_collisions),
725 XGBE_STATS1_INFO(tx_underrun),
726 XGBE_STATS1_INFO(tx_carrier_sense_errors),
727 XGBE_STATS1_INFO(tx_bytes),
728 XGBE_STATS1_INFO(tx_64byte_frames),
729 XGBE_STATS1_INFO(tx_65_to_127byte_frames),
730 XGBE_STATS1_INFO(tx_128_to_255byte_frames),
731 XGBE_STATS1_INFO(tx_256_to_511byte_frames),
732 XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
733 XGBE_STATS1_INFO(tx_1024byte_frames),
734 XGBE_STATS1_INFO(net_bytes),
735 XGBE_STATS1_INFO(rx_sof_overruns),
736 XGBE_STATS1_INFO(rx_mof_overruns),
737 XGBE_STATS1_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500738 /* XGBE module 2 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400739 XGBE_STATS2_INFO(rx_good_frames),
740 XGBE_STATS2_INFO(rx_broadcast_frames),
741 XGBE_STATS2_INFO(rx_multicast_frames),
742 XGBE_STATS2_INFO(rx_pause_frames),
743 XGBE_STATS2_INFO(rx_crc_errors),
744 XGBE_STATS2_INFO(rx_align_code_errors),
745 XGBE_STATS2_INFO(rx_oversized_frames),
746 XGBE_STATS2_INFO(rx_jabber_frames),
747 XGBE_STATS2_INFO(rx_undersized_frames),
748 XGBE_STATS2_INFO(rx_fragments),
749 XGBE_STATS2_INFO(overrun_type4),
750 XGBE_STATS2_INFO(overrun_type5),
751 XGBE_STATS2_INFO(rx_bytes),
752 XGBE_STATS2_INFO(tx_good_frames),
753 XGBE_STATS2_INFO(tx_broadcast_frames),
754 XGBE_STATS2_INFO(tx_multicast_frames),
755 XGBE_STATS2_INFO(tx_pause_frames),
756 XGBE_STATS2_INFO(tx_deferred_frames),
757 XGBE_STATS2_INFO(tx_collision_frames),
758 XGBE_STATS2_INFO(tx_single_coll_frames),
759 XGBE_STATS2_INFO(tx_mult_coll_frames),
760 XGBE_STATS2_INFO(tx_excessive_collisions),
761 XGBE_STATS2_INFO(tx_late_collisions),
762 XGBE_STATS2_INFO(tx_underrun),
763 XGBE_STATS2_INFO(tx_carrier_sense_errors),
764 XGBE_STATS2_INFO(tx_bytes),
765 XGBE_STATS2_INFO(tx_64byte_frames),
766 XGBE_STATS2_INFO(tx_65_to_127byte_frames),
767 XGBE_STATS2_INFO(tx_128_to_255byte_frames),
768 XGBE_STATS2_INFO(tx_256_to_511byte_frames),
769 XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
770 XGBE_STATS2_INFO(tx_1024byte_frames),
771 XGBE_STATS2_INFO(net_bytes),
772 XGBE_STATS2_INFO(rx_sof_overruns),
773 XGBE_STATS2_INFO(rx_mof_overruns),
774 XGBE_STATS2_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500775};
776
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500777#define for_each_intf(i, priv) \
778 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
779
780#define for_each_sec_slave(slave, priv) \
781 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
782
783#define first_sec_slave(priv) \
784 list_first_entry(&priv->secondary_slaves, \
785 struct gbe_slave, slave_list)
786
787static void keystone_get_drvinfo(struct net_device *ndev,
788 struct ethtool_drvinfo *info)
789{
790 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
791 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
792}
793
794static u32 keystone_get_msglevel(struct net_device *ndev)
795{
796 struct netcp_intf *netcp = netdev_priv(ndev);
797
798 return netcp->msg_enable;
799}
800
801static void keystone_set_msglevel(struct net_device *ndev, u32 value)
802{
803 struct netcp_intf *netcp = netdev_priv(ndev);
804
805 netcp->msg_enable = value;
806}
807
808static void keystone_get_stat_strings(struct net_device *ndev,
809 uint32_t stringset, uint8_t *data)
810{
811 struct netcp_intf *netcp = netdev_priv(ndev);
812 struct gbe_intf *gbe_intf;
813 struct gbe_priv *gbe_dev;
814 int i;
815
816 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
817 if (!gbe_intf)
818 return;
819 gbe_dev = gbe_intf->gbe_dev;
820
821 switch (stringset) {
822 case ETH_SS_STATS:
823 for (i = 0; i < gbe_dev->num_et_stats; i++) {
824 memcpy(data, gbe_dev->et_stats[i].desc,
825 ETH_GSTRING_LEN);
826 data += ETH_GSTRING_LEN;
827 }
828 break;
829 case ETH_SS_TEST:
830 break;
831 }
832}
833
834static int keystone_get_sset_count(struct net_device *ndev, int stringset)
835{
836 struct netcp_intf *netcp = netdev_priv(ndev);
837 struct gbe_intf *gbe_intf;
838 struct gbe_priv *gbe_dev;
839
840 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
841 if (!gbe_intf)
842 return -EINVAL;
843 gbe_dev = gbe_intf->gbe_dev;
844
845 switch (stringset) {
846 case ETH_SS_TEST:
847 return 0;
848 case ETH_SS_STATS:
849 return gbe_dev->num_et_stats;
850 default:
851 return -EINVAL;
852 }
853}
854
855static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
856{
857 void __iomem *base = NULL;
858 u32 __iomem *p;
859 u32 tmp = 0;
860 int i;
861
862 for (i = 0; i < gbe_dev->num_et_stats; i++) {
863 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
864 p = base + gbe_dev->et_stats[i].offset;
865 tmp = readl(p);
866 gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
867 if (data)
868 data[i] = gbe_dev->hw_stats[i];
869 /* write-to-decrement:
870 * new register value = old register value - write value
871 */
872 writel(tmp, p);
873 }
874}
875
876static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
877{
878 void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
879 void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
880 u64 *hw_stats = &gbe_dev->hw_stats[0];
881 void __iomem *base = NULL;
882 u32 __iomem *p;
883 u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
884 int i, j, pair;
885
886 for (pair = 0; pair < 2; pair++) {
887 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
888
889 if (pair == 0)
890 val &= ~GBE_STATS_CD_SEL;
891 else
892 val |= GBE_STATS_CD_SEL;
893
894 /* make the stat modules visible */
895 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
896
897 for (i = 0; i < pair_size; i++) {
898 j = pair * pair_size + i;
899 switch (gbe_dev->et_stats[j].type) {
900 case GBE_STATSA_MODULE:
901 case GBE_STATSC_MODULE:
902 base = gbe_statsa;
903 break;
904 case GBE_STATSB_MODULE:
905 case GBE_STATSD_MODULE:
906 base = gbe_statsb;
907 break;
908 }
909
910 p = base + gbe_dev->et_stats[j].offset;
911 tmp = readl(p);
912 hw_stats[j] += tmp;
913 if (data)
914 data[j] = hw_stats[j];
915 /* write-to-decrement:
916 * new register value = old register value - write value
917 */
918 writel(tmp, p);
919 }
920 }
921}
922
923static void keystone_get_ethtool_stats(struct net_device *ndev,
924 struct ethtool_stats *stats,
925 uint64_t *data)
926{
927 struct netcp_intf *netcp = netdev_priv(ndev);
928 struct gbe_intf *gbe_intf;
929 struct gbe_priv *gbe_dev;
930
931 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
932 if (!gbe_intf)
933 return;
934
935 gbe_dev = gbe_intf->gbe_dev;
936 spin_lock_bh(&gbe_dev->hw_stats_lock);
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500937 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
938 gbe_update_stats_ver14(gbe_dev, data);
939 else
940 gbe_update_stats(gbe_dev, data);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500941 spin_unlock_bh(&gbe_dev->hw_stats_lock);
942}
943
944static int keystone_get_settings(struct net_device *ndev,
945 struct ethtool_cmd *cmd)
946{
947 struct netcp_intf *netcp = netdev_priv(ndev);
948 struct phy_device *phy = ndev->phydev;
949 struct gbe_intf *gbe_intf;
950 int ret;
951
952 if (!phy)
953 return -EINVAL;
954
955 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
956 if (!gbe_intf)
957 return -EINVAL;
958
959 if (!gbe_intf->slave)
960 return -EINVAL;
961
962 ret = phy_ethtool_gset(phy, cmd);
963 if (!ret)
964 cmd->port = gbe_intf->slave->phy_port_t;
965
966 return ret;
967}
968
969static int keystone_set_settings(struct net_device *ndev,
970 struct ethtool_cmd *cmd)
971{
972 struct netcp_intf *netcp = netdev_priv(ndev);
973 struct phy_device *phy = ndev->phydev;
974 struct gbe_intf *gbe_intf;
975 u32 features = cmd->advertising & cmd->supported;
976
977 if (!phy)
978 return -EINVAL;
979
980 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
981 if (!gbe_intf)
982 return -EINVAL;
983
984 if (!gbe_intf->slave)
985 return -EINVAL;
986
987 if (cmd->port != gbe_intf->slave->phy_port_t) {
988 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
989 return -EINVAL;
990
991 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
992 return -EINVAL;
993
994 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
995 return -EINVAL;
996
997 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
998 return -EINVAL;
999
1000 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1001 return -EINVAL;
1002 }
1003
1004 gbe_intf->slave->phy_port_t = cmd->port;
1005 return phy_ethtool_sset(phy, cmd);
1006}
1007
1008static const struct ethtool_ops keystone_ethtool_ops = {
1009 .get_drvinfo = keystone_get_drvinfo,
1010 .get_link = ethtool_op_get_link,
1011 .get_msglevel = keystone_get_msglevel,
1012 .set_msglevel = keystone_set_msglevel,
1013 .get_strings = keystone_get_stat_strings,
1014 .get_sset_count = keystone_get_sset_count,
1015 .get_ethtool_stats = keystone_get_ethtool_stats,
1016 .get_settings = keystone_get_settings,
1017 .set_settings = keystone_set_settings,
1018};
1019
1020#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
1021 ((mac)[2] << 16) | ((mac)[3] << 24))
1022#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
1023
1024static void gbe_set_slave_mac(struct gbe_slave *slave,
1025 struct gbe_intf *gbe_intf)
1026{
1027 struct net_device *ndev = gbe_intf->ndev;
1028
1029 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1030 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1031}
1032
1033static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1034{
1035 if (priv->host_port == 0)
1036 return slave_num + 1;
1037
1038 return slave_num;
1039}
1040
1041static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1042 struct net_device *ndev,
1043 struct gbe_slave *slave,
1044 int up)
1045{
1046 struct phy_device *phy = slave->phy;
1047 u32 mac_control = 0;
1048
1049 if (up) {
1050 mac_control = slave->mac_control;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001051 if (phy && (phy->speed == SPEED_1000)) {
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001052 mac_control |= MACSL_GIG_MODE;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001053 mac_control &= ~MACSL_XGIG_MODE;
1054 } else if (phy && (phy->speed == SPEED_10000)) {
1055 mac_control |= MACSL_XGIG_MODE;
1056 mac_control &= ~MACSL_GIG_MODE;
1057 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001058
1059 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1060 mac_control));
1061
1062 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1063 ALE_PORT_STATE,
1064 ALE_PORT_STATE_FORWARD);
1065
1066 if (ndev && slave->open)
1067 netif_carrier_on(ndev);
1068 } else {
1069 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1070 mac_control));
1071 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1072 ALE_PORT_STATE,
1073 ALE_PORT_STATE_DISABLE);
1074 if (ndev)
1075 netif_carrier_off(ndev);
1076 }
1077
1078 if (phy)
1079 phy_print_status(phy);
1080}
1081
1082static bool gbe_phy_link_status(struct gbe_slave *slave)
1083{
1084 return !slave->phy || slave->phy->link;
1085}
1086
1087static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1088 struct gbe_slave *slave,
1089 struct net_device *ndev)
1090{
1091 int sp = slave->slave_num;
1092 int phy_link_state, sgmii_link_state = 1, link_state;
1093
1094 if (!slave->open)
1095 return;
1096
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001097 if (!SLAVE_LINK_IS_XGMII(slave))
1098 sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
1099 sp);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001100 phy_link_state = gbe_phy_link_status(slave);
1101 link_state = phy_link_state & sgmii_link_state;
1102
1103 if (atomic_xchg(&slave->link_state, link_state) != link_state)
1104 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1105 link_state);
1106}
1107
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001108static void xgbe_adjust_link(struct net_device *ndev)
1109{
1110 struct netcp_intf *netcp = netdev_priv(ndev);
1111 struct gbe_intf *gbe_intf;
1112
1113 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1114 if (!gbe_intf)
1115 return;
1116
1117 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1118 ndev);
1119}
1120
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001121static void gbe_adjust_link(struct net_device *ndev)
1122{
1123 struct netcp_intf *netcp = netdev_priv(ndev);
1124 struct gbe_intf *gbe_intf;
1125
1126 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1127 if (!gbe_intf)
1128 return;
1129
1130 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1131 ndev);
1132}
1133
1134static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1135{
1136 struct gbe_priv *gbe_dev = netdev_priv(ndev);
1137 struct gbe_slave *slave;
1138
1139 for_each_sec_slave(slave, gbe_dev)
1140 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1141}
1142
1143/* Reset EMAC
1144 * Soft reset is set and polled until clear, or until a timeout occurs
1145 */
1146static int gbe_port_reset(struct gbe_slave *slave)
1147{
1148 u32 i, v;
1149
1150 /* Set the soft reset bit */
1151 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1152
1153 /* Wait for the bit to clear */
1154 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1155 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1156 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1157 return 0;
1158 }
1159
1160 /* Timeout on the reset */
1161 return GMACSL_RET_WARN_RESET_INCOMPLETE;
1162}
1163
1164/* Configure EMAC */
1165static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1166 int max_rx_len)
1167{
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001168 u32 xgmii_mode;
1169
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001170 if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1171 max_rx_len = NETCP_MAX_FRAME_SIZE;
1172
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001173 /* Enable correct MII mode at SS level */
1174 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1175 (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1176 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1177 xgmii_mode |= (1 << slave->slave_num);
1178 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1179 }
1180
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001181 writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
1182 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1183}
1184
1185static void gbe_slave_stop(struct gbe_intf *intf)
1186{
1187 struct gbe_priv *gbe_dev = intf->gbe_dev;
1188 struct gbe_slave *slave = intf->slave;
1189
1190 gbe_port_reset(slave);
1191 /* Disable forwarding */
1192 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1193 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1194 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1195 1 << slave->port_num, 0, 0);
1196
1197 if (!slave->phy)
1198 return;
1199
1200 phy_stop(slave->phy);
1201 phy_disconnect(slave->phy);
1202 slave->phy = NULL;
1203}
1204
1205static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1206{
1207 void __iomem *sgmii_port_regs;
1208
1209 sgmii_port_regs = priv->sgmii_port_regs;
1210 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1211 sgmii_port_regs = priv->sgmii_port34_regs;
1212
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001213 if (!SLAVE_LINK_IS_XGMII(slave)) {
1214 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1215 netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1216 slave->link_interface);
1217 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001218}
1219
1220static int gbe_slave_open(struct gbe_intf *gbe_intf)
1221{
1222 struct gbe_priv *priv = gbe_intf->gbe_dev;
1223 struct gbe_slave *slave = gbe_intf->slave;
1224 phy_interface_t phy_mode;
1225 bool has_phy = false;
1226
1227 void (*hndlr)(struct net_device *) = gbe_adjust_link;
1228
1229 gbe_sgmii_config(priv, slave);
1230 gbe_port_reset(slave);
1231 gbe_port_config(priv, slave, priv->rx_packet_max);
1232 gbe_set_slave_mac(slave, gbe_intf);
1233 /* enable forwarding */
1234 cpsw_ale_control_set(priv->ale, slave->port_num,
1235 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1236 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1237 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1238
1239 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1240 has_phy = true;
1241 phy_mode = PHY_INTERFACE_MODE_SGMII;
1242 slave->phy_port_t = PORT_MII;
1243 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1244 has_phy = true;
1245 phy_mode = PHY_INTERFACE_MODE_NA;
1246 slave->phy_port_t = PORT_FIBRE;
1247 }
1248
1249 if (has_phy) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001250 if (priv->ss_version == XGBE_SS_VERSION_10)
1251 hndlr = xgbe_adjust_link;
1252
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001253 slave->phy = of_phy_connect(gbe_intf->ndev,
1254 slave->phy_node,
1255 hndlr, 0,
1256 phy_mode);
1257 if (!slave->phy) {
1258 dev_err(priv->dev, "phy not found on slave %d\n",
1259 slave->slave_num);
1260 return -ENODEV;
1261 }
1262 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
1263 dev_name(&slave->phy->dev));
1264 phy_start(slave->phy);
1265 phy_read_status(slave->phy);
1266 }
1267 return 0;
1268}
1269
1270static void gbe_init_host_port(struct gbe_priv *priv)
1271{
1272 int bypass_en = 1;
1273 /* Max length register */
1274 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
1275 rx_maxlen));
1276
1277 cpsw_ale_start(priv->ale);
1278
1279 if (priv->enable_ale)
1280 bypass_en = 0;
1281
1282 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
1283
1284 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
1285
1286 cpsw_ale_control_set(priv->ale, priv->host_port,
1287 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1288
1289 cpsw_ale_control_set(priv->ale, 0,
1290 ALE_PORT_UNKNOWN_VLAN_MEMBER,
1291 GBE_PORT_MASK(priv->ale_ports));
1292
1293 cpsw_ale_control_set(priv->ale, 0,
1294 ALE_PORT_UNKNOWN_MCAST_FLOOD,
1295 GBE_PORT_MASK(priv->ale_ports - 1));
1296
1297 cpsw_ale_control_set(priv->ale, 0,
1298 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
1299 GBE_PORT_MASK(priv->ale_ports));
1300
1301 cpsw_ale_control_set(priv->ale, 0,
1302 ALE_PORT_UNTAGGED_EGRESS,
1303 GBE_PORT_MASK(priv->ale_ports));
1304}
1305
1306static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1307{
1308 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1309 u16 vlan_id;
1310
1311 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1312 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
1313 ALE_MCAST_FWD_2);
1314 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1315 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1316 GBE_PORT_MASK(gbe_dev->ale_ports),
1317 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
1318 }
1319}
1320
1321static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1322{
1323 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1324 u16 vlan_id;
1325
1326 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1327
1328 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
1329 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1330 ALE_VLAN, vlan_id);
1331}
1332
1333static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1334{
1335 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1336 u16 vlan_id;
1337
1338 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
1339
1340 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1341 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
1342 }
1343}
1344
1345static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1346{
1347 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1348 u16 vlan_id;
1349
1350 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1351
1352 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1353 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1354 ALE_VLAN, vlan_id);
1355 }
1356}
1357
1358static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
1359{
1360 struct gbe_intf *gbe_intf = intf_priv;
1361 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1362
1363 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
1364 naddr->addr, naddr->type);
1365
1366 switch (naddr->type) {
1367 case ADDR_MCAST:
1368 case ADDR_BCAST:
1369 gbe_add_mcast_addr(gbe_intf, naddr->addr);
1370 break;
1371 case ADDR_UCAST:
1372 case ADDR_DEV:
1373 gbe_add_ucast_addr(gbe_intf, naddr->addr);
1374 break;
1375 case ADDR_ANY:
1376 /* nothing to do for promiscuous */
1377 default:
1378 break;
1379 }
1380
1381 return 0;
1382}
1383
1384static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
1385{
1386 struct gbe_intf *gbe_intf = intf_priv;
1387 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1388
1389 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
1390 naddr->addr, naddr->type);
1391
1392 switch (naddr->type) {
1393 case ADDR_MCAST:
1394 case ADDR_BCAST:
1395 gbe_del_mcast_addr(gbe_intf, naddr->addr);
1396 break;
1397 case ADDR_UCAST:
1398 case ADDR_DEV:
1399 gbe_del_ucast_addr(gbe_intf, naddr->addr);
1400 break;
1401 case ADDR_ANY:
1402 /* nothing to do for promiscuous */
1403 default:
1404 break;
1405 }
1406
1407 return 0;
1408}
1409
1410static int gbe_add_vid(void *intf_priv, int vid)
1411{
1412 struct gbe_intf *gbe_intf = intf_priv;
1413 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1414
1415 set_bit(vid, gbe_intf->active_vlans);
1416
1417 cpsw_ale_add_vlan(gbe_dev->ale, vid,
1418 GBE_PORT_MASK(gbe_dev->ale_ports),
1419 GBE_MASK_NO_PORTS,
1420 GBE_PORT_MASK(gbe_dev->ale_ports),
1421 GBE_PORT_MASK(gbe_dev->ale_ports - 1));
1422
1423 return 0;
1424}
1425
1426static int gbe_del_vid(void *intf_priv, int vid)
1427{
1428 struct gbe_intf *gbe_intf = intf_priv;
1429 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1430
1431 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
1432 clear_bit(vid, gbe_intf->active_vlans);
1433 return 0;
1434}
1435
1436static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
1437{
1438 struct gbe_intf *gbe_intf = intf_priv;
1439 struct phy_device *phy = gbe_intf->slave->phy;
1440 int ret = -EOPNOTSUPP;
1441
1442 if (phy)
1443 ret = phy_mii_ioctl(phy, req, cmd);
1444
1445 return ret;
1446}
1447
1448static void netcp_ethss_timer(unsigned long arg)
1449{
1450 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
1451 struct gbe_intf *gbe_intf;
1452 struct gbe_slave *slave;
1453
1454 /* Check & update SGMII link state of interfaces */
1455 for_each_intf(gbe_intf, gbe_dev) {
1456 if (!gbe_intf->slave->open)
1457 continue;
1458 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
1459 gbe_intf->ndev);
1460 }
1461
1462 /* Check & update SGMII link state of secondary ports */
1463 for_each_sec_slave(slave, gbe_dev) {
1464 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1465 }
1466
1467 spin_lock_bh(&gbe_dev->hw_stats_lock);
1468
1469 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1470 gbe_update_stats_ver14(gbe_dev, NULL);
1471 else
1472 gbe_update_stats(gbe_dev, NULL);
1473
1474 spin_unlock_bh(&gbe_dev->hw_stats_lock);
1475
1476 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
1477 add_timer(&gbe_dev->timer);
1478}
1479
1480static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
1481{
1482 struct gbe_intf *gbe_intf = data;
1483
1484 p_info->tx_pipe = &gbe_intf->tx_pipe;
1485 return 0;
1486}
1487
1488static int gbe_open(void *intf_priv, struct net_device *ndev)
1489{
1490 struct gbe_intf *gbe_intf = intf_priv;
1491 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1492 struct netcp_intf *netcp = netdev_priv(ndev);
1493 struct gbe_slave *slave = gbe_intf->slave;
1494 int port_num = slave->port_num;
1495 u32 reg;
1496 int ret;
1497
1498 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
1499 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
1500 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
1501 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
1502
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04001503 /* For 10G use directed to port */
1504 if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
1505 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001506
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04001507 if (gbe_dev->enable_ale)
1508 gbe_intf->tx_pipe.switch_to_port = 0;
1509 else
1510 gbe_intf->tx_pipe.switch_to_port = port_num;
1511
1512 dev_dbg(gbe_dev->dev,
1513 "opened TX channel %s: %p with to port %d, flags %d\n",
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001514 gbe_intf->tx_pipe.dma_chan_name,
1515 gbe_intf->tx_pipe.dma_channel,
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04001516 gbe_intf->tx_pipe.switch_to_port,
1517 gbe_intf->tx_pipe.flags);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001518
1519 gbe_slave_stop(gbe_intf);
1520
1521 /* disable priority elevation and enable statistics on all ports */
1522 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
1523
1524 /* Control register */
1525 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
1526
1527 /* All statistics enabled and STAT AB visible by default */
1528 writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
1529 stat_port_en));
1530
1531 ret = gbe_slave_open(gbe_intf);
1532 if (ret)
1533 goto fail;
1534
1535 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1536 gbe_intf);
1537
1538 slave->open = true;
1539 netcp_ethss_update_link_state(gbe_dev, slave, ndev);
1540 return 0;
1541
1542fail:
1543 gbe_slave_stop(gbe_intf);
1544 return ret;
1545}
1546
1547static int gbe_close(void *intf_priv, struct net_device *ndev)
1548{
1549 struct gbe_intf *gbe_intf = intf_priv;
1550 struct netcp_intf *netcp = netdev_priv(ndev);
1551
1552 gbe_slave_stop(gbe_intf);
1553 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1554 gbe_intf);
1555
1556 gbe_intf->slave->open = false;
1557 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
1558 return 0;
1559}
1560
1561static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1562 struct device_node *node)
1563{
1564 int port_reg_num;
1565 u32 port_reg_ofs, emac_reg_ofs;
1566
1567 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
1568 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
1569 return -EINVAL;
1570 }
1571
1572 if (of_property_read_u32(node, "link-interface",
1573 &slave->link_interface)) {
1574 dev_warn(gbe_dev->dev,
1575 "missing link-interface value defaulting to 1G mac-phy link\n");
1576 slave->link_interface = SGMII_LINK_MAC_PHY;
1577 }
1578
1579 slave->open = false;
1580 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
1581 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
1582
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001583 if (slave->link_interface >= XGMII_LINK_MAC_PHY)
1584 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
1585 else
1586 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001587
1588 /* Emac regs memmap are contiguous but port regs are not */
1589 port_reg_num = slave->slave_num;
1590 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1591 if (slave->slave_num > 1) {
1592 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
1593 port_reg_num -= 2;
1594 } else {
1595 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
1596 }
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001597 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1598 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001599 } else {
1600 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
1601 gbe_dev->ss_version);
1602 return -EINVAL;
1603 }
1604
1605 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1606 emac_reg_ofs = GBE13_EMAC_OFFSET;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001607 else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
1608 emac_reg_ofs = XGBE10_EMAC_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001609
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001610 slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001611 (0x30 * port_reg_num);
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001612 slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001613 (0x40 * slave->slave_num);
1614
1615 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1616 /* Initialize slave port register offsets */
1617 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
1618 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1619 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
1620 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
1621 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1622 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1623 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1624 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1625 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1626
1627 /* Initialize EMAC register offsets */
1628 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
1629 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1630 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
1631
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001632 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1633 /* Initialize slave port register offsets */
1634 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
1635 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1636 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
1637 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
1638 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1639 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1640 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1641 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1642 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1643
1644 /* Initialize EMAC register offsets */
1645 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
1646 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1647 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001648 }
1649
1650 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
1651 return 0;
1652}
1653
1654static void init_secondary_ports(struct gbe_priv *gbe_dev,
1655 struct device_node *node)
1656{
1657 struct device *dev = gbe_dev->dev;
1658 phy_interface_t phy_mode;
1659 struct gbe_priv **priv;
1660 struct device_node *port;
1661 struct gbe_slave *slave;
1662 bool mac_phy_link = false;
1663
1664 for_each_child_of_node(node, port) {
1665 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
1666 if (!slave) {
1667 dev_err(dev,
1668 "memomry alloc failed for secondary port(%s), skipping...\n",
1669 port->name);
1670 continue;
1671 }
1672
1673 if (init_slave(gbe_dev, slave, port)) {
1674 dev_err(dev,
1675 "Failed to initialize secondary port(%s), skipping...\n",
1676 port->name);
1677 devm_kfree(dev, slave);
1678 continue;
1679 }
1680
1681 gbe_sgmii_config(gbe_dev, slave);
1682 gbe_port_reset(slave);
1683 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
1684 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
1685 gbe_dev->num_slaves++;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001686 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
1687 (slave->link_interface == XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001688 mac_phy_link = true;
1689
1690 slave->open = true;
1691 }
1692
1693 /* of_phy_connect() is needed only for MAC-PHY interface */
1694 if (!mac_phy_link)
1695 return;
1696
1697 /* Allocate dummy netdev device for attaching to phy device */
1698 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
1699 NET_NAME_UNKNOWN, ether_setup);
1700 if (!gbe_dev->dummy_ndev) {
1701 dev_err(dev,
1702 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
1703 return;
1704 }
1705 priv = netdev_priv(gbe_dev->dummy_ndev);
1706 *priv = gbe_dev;
1707
1708 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1709 phy_mode = PHY_INTERFACE_MODE_SGMII;
1710 slave->phy_port_t = PORT_MII;
1711 } else {
1712 phy_mode = PHY_INTERFACE_MODE_NA;
1713 slave->phy_port_t = PORT_FIBRE;
1714 }
1715
1716 for_each_sec_slave(slave, gbe_dev) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001717 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
1718 (slave->link_interface != XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001719 continue;
1720 slave->phy =
1721 of_phy_connect(gbe_dev->dummy_ndev,
1722 slave->phy_node,
1723 gbe_adjust_link_sec_slaves,
1724 0, phy_mode);
1725 if (!slave->phy) {
1726 dev_err(dev, "phy not found for slave %d\n",
1727 slave->slave_num);
1728 slave->phy = NULL;
1729 } else {
1730 dev_dbg(dev, "phy found: id is: 0x%s\n",
1731 dev_name(&slave->phy->dev));
1732 phy_start(slave->phy);
1733 phy_read_status(slave->phy);
1734 }
1735 }
1736}
1737
1738static void free_secondary_ports(struct gbe_priv *gbe_dev)
1739{
1740 struct gbe_slave *slave;
1741
1742 for (;;) {
1743 slave = first_sec_slave(gbe_dev);
1744 if (!slave)
1745 break;
1746 if (slave->phy)
1747 phy_disconnect(slave->phy);
1748 list_del(&slave->slave_list);
1749 }
1750 if (gbe_dev->dummy_ndev)
1751 free_netdev(gbe_dev->dummy_ndev);
1752}
1753
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001754static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
1755 struct device_node *node)
1756{
1757 struct resource res;
1758 void __iomem *regs;
1759 int ret, i;
1760
1761 ret = of_address_to_resource(node, 0, &res);
1762 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001763 dev_err(gbe_dev->dev,
1764 "Can't xlate xgbe of node(%s) ss address at %d\n",
1765 node->name, XGBE_SS_REG_INDEX);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001766 return ret;
1767 }
1768
1769 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1770 if (IS_ERR(regs)) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001771 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001772 return PTR_ERR(regs);
1773 }
1774 gbe_dev->ss_regs = regs;
1775
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001776 ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
1777 if (ret) {
1778 dev_err(gbe_dev->dev,
1779 "Can't xlate xgbe of node(%s) sm address at %d\n",
1780 node->name, XGBE_SM_REG_INDEX);
1781 return ret;
1782 }
1783
1784 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1785 if (IS_ERR(regs)) {
1786 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
1787 return PTR_ERR(regs);
1788 }
1789 gbe_dev->switch_regs = regs;
1790
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001791 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
1792 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001793 dev_err(gbe_dev->dev,
1794 "Can't xlate xgbe serdes of node(%s) address at %d\n",
1795 node->name, XGBE_SERDES_REG_INDEX);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001796 return ret;
1797 }
1798
1799 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1800 if (IS_ERR(regs)) {
1801 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
1802 return PTR_ERR(regs);
1803 }
1804 gbe_dev->xgbe_serdes_regs = regs;
1805
1806 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1807 XGBE10_NUM_STAT_ENTRIES *
1808 (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
1809 GFP_KERNEL);
1810 if (!gbe_dev->hw_stats) {
1811 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1812 return -ENOMEM;
1813 }
1814
1815 gbe_dev->ss_version = XGBE_SS_VERSION_10;
1816 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
1817 XGBE10_SGMII_MODULE_OFFSET;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001818 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
1819
1820 for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001821 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001822 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
1823
1824 gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
1825 gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
1826 gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
1827 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
1828 gbe_dev->et_stats = xgbe10_et_stats;
1829 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
1830
1831 /* Subsystem registers */
1832 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1833 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
1834
1835 /* Switch module registers */
1836 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1837 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1838 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1839 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1840 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1841
1842 /* Host port registers */
1843 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1844 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
1845 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1846 return 0;
1847}
1848
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001849static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
1850 struct device_node *node)
1851{
1852 struct resource res;
1853 void __iomem *regs;
1854 int ret;
1855
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001856 ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001857 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001858 dev_err(gbe_dev->dev,
1859 "Can't translate of node(%s) of gbe ss address at %d\n",
1860 node->name, GBE_SS_REG_INDEX);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001861 return ret;
1862 }
1863
1864 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1865 if (IS_ERR(regs)) {
1866 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
1867 return PTR_ERR(regs);
1868 }
1869 gbe_dev->ss_regs = regs;
1870 gbe_dev->ss_version = readl(gbe_dev->ss_regs);
1871 return 0;
1872}
1873
1874static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
1875 struct device_node *node)
1876{
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001877 struct resource res;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001878 void __iomem *regs;
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001879 int i, ret;
1880
1881 ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
1882 if (ret) {
1883 dev_err(gbe_dev->dev,
1884 "Can't translate of gbe node(%s) address at index %d\n",
1885 node->name, GBE_SGMII34_REG_INDEX);
1886 return ret;
1887 }
1888
1889 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1890 if (IS_ERR(regs)) {
1891 dev_err(gbe_dev->dev,
1892 "Failed to map gbe sgmii port34 register base\n");
1893 return PTR_ERR(regs);
1894 }
1895 gbe_dev->sgmii_port34_regs = regs;
1896
1897 ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
1898 if (ret) {
1899 dev_err(gbe_dev->dev,
1900 "Can't translate of gbe node(%s) address at index %d\n",
1901 node->name, GBE_SM_REG_INDEX);
1902 return ret;
1903 }
1904
1905 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1906 if (IS_ERR(regs)) {
1907 dev_err(gbe_dev->dev,
1908 "Failed to map gbe switch module register base\n");
1909 return PTR_ERR(regs);
1910 }
1911 gbe_dev->switch_regs = regs;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001912
1913 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1914 GBE13_NUM_HW_STAT_ENTRIES *
1915 GBE13_NUM_SLAVES * sizeof(u64),
1916 GFP_KERNEL);
1917 if (!gbe_dev->hw_stats) {
1918 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1919 return -ENOMEM;
1920 }
1921
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001922 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
1923 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001924
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001925 for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++) {
1926 gbe_dev->hw_stats_regs[i] =
1927 gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
1928 (GBE_HW_STATS_REG_MAP_SZ * i);
1929 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001930
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04001931 gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001932 gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
1933 gbe_dev->host_port = GBE13_HOST_PORT_NUM;
1934 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
1935 gbe_dev->et_stats = gbe13_et_stats;
1936 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
1937
1938 /* Subsystem registers */
1939 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1940
1941 /* Switch module registers */
1942 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1943 GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1944 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
1945 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1946 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1947 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1948
1949 /* Host port registers */
1950 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1951 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1952 return 0;
1953}
1954
1955static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
1956 struct device_node *node, void **inst_priv)
1957{
1958 struct device_node *interfaces, *interface;
1959 struct device_node *secondary_ports;
1960 struct cpsw_ale_params ale_params;
1961 struct gbe_priv *gbe_dev;
1962 u32 slave_num;
1963 int ret = 0;
1964
1965 if (!node) {
1966 dev_err(dev, "device tree info unavailable\n");
1967 return -ENODEV;
1968 }
1969
1970 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
1971 if (!gbe_dev)
1972 return -ENOMEM;
1973
1974 gbe_dev->dev = dev;
1975 gbe_dev->netcp_device = netcp_device;
1976 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
1977
1978 /* init the hw stats lock */
1979 spin_lock_init(&gbe_dev->hw_stats_lock);
1980
1981 if (of_find_property(node, "enable-ale", NULL)) {
1982 gbe_dev->enable_ale = true;
1983 dev_info(dev, "ALE enabled\n");
1984 } else {
1985 gbe_dev->enable_ale = false;
1986 dev_dbg(dev, "ALE bypass enabled*\n");
1987 }
1988
1989 ret = of_property_read_u32(node, "tx-queue",
1990 &gbe_dev->tx_queue_id);
1991 if (ret < 0) {
1992 dev_err(dev, "missing tx_queue parameter\n");
1993 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
1994 }
1995
1996 ret = of_property_read_string(node, "tx-channel",
1997 &gbe_dev->dma_chan_name);
1998 if (ret < 0) {
1999 dev_err(dev, "missing \"tx-channel\" parameter\n");
2000 ret = -ENODEV;
2001 goto quit;
2002 }
2003
2004 if (!strcmp(node->name, "gbe")) {
2005 ret = get_gbe_resource_version(gbe_dev, node);
2006 if (ret)
2007 goto quit;
2008
2009 ret = set_gbe_ethss14_priv(gbe_dev, node);
2010 if (ret)
2011 goto quit;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002012 } else if (!strcmp(node->name, "xgbe")) {
2013 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2014 if (ret)
2015 goto quit;
2016 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2017 gbe_dev->ss_regs);
2018 if (ret)
2019 goto quit;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002020 } else {
2021 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2022 ret = -ENODEV;
2023 goto quit;
2024 }
2025
2026 interfaces = of_get_child_by_name(node, "interfaces");
2027 if (!interfaces)
2028 dev_err(dev, "could not find interfaces\n");
2029
2030 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2031 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2032 if (ret)
2033 goto quit;
2034
2035 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2036 if (ret)
2037 goto quit;
2038
2039 /* Create network interfaces */
2040 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
2041 for_each_child_of_node(interfaces, interface) {
2042 ret = of_property_read_u32(interface, "slave-port", &slave_num);
2043 if (ret) {
2044 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
2045 interface->name);
2046 continue;
2047 }
2048 gbe_dev->num_slaves++;
2049 }
2050
2051 if (!gbe_dev->num_slaves)
2052 dev_warn(dev, "No network interface configured\n");
2053
2054 /* Initialize Secondary slave ports */
2055 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
2056 INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
2057 if (secondary_ports)
2058 init_secondary_ports(gbe_dev, secondary_ports);
2059 of_node_put(secondary_ports);
2060
2061 if (!gbe_dev->num_slaves) {
2062 dev_err(dev, "No network interface or secondary ports configured\n");
2063 ret = -ENODEV;
2064 goto quit;
2065 }
2066
2067 memset(&ale_params, 0, sizeof(ale_params));
2068 ale_params.dev = gbe_dev->dev;
2069 ale_params.ale_regs = gbe_dev->ale_reg;
2070 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
2071 ale_params.ale_entries = gbe_dev->ale_entries;
2072 ale_params.ale_ports = gbe_dev->ale_ports;
2073
2074 gbe_dev->ale = cpsw_ale_create(&ale_params);
2075 if (!gbe_dev->ale) {
2076 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2077 ret = -ENODEV;
2078 goto quit;
2079 } else {
2080 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2081 }
2082
2083 /* initialize host port */
2084 gbe_init_host_port(gbe_dev);
2085
2086 init_timer(&gbe_dev->timer);
2087 gbe_dev->timer.data = (unsigned long)gbe_dev;
2088 gbe_dev->timer.function = netcp_ethss_timer;
2089 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2090 add_timer(&gbe_dev->timer);
2091 *inst_priv = gbe_dev;
2092 return 0;
2093
2094quit:
2095 if (gbe_dev->hw_stats)
2096 devm_kfree(dev, gbe_dev->hw_stats);
Markus Elfring9b556692015-02-03 20:12:25 +01002097 cpsw_ale_destroy(gbe_dev->ale);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002098 if (gbe_dev->ss_regs)
2099 devm_iounmap(dev, gbe_dev->ss_regs);
Markus Elfring9b556692015-02-03 20:12:25 +01002100 of_node_put(interfaces);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002101 devm_kfree(dev, gbe_dev);
2102 return ret;
2103}
2104
2105static int gbe_attach(void *inst_priv, struct net_device *ndev,
2106 struct device_node *node, void **intf_priv)
2107{
2108 struct gbe_priv *gbe_dev = inst_priv;
2109 struct gbe_intf *gbe_intf;
2110 int ret;
2111
2112 if (!node) {
2113 dev_err(gbe_dev->dev, "interface node not available\n");
2114 return -ENODEV;
2115 }
2116
2117 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2118 if (!gbe_intf)
2119 return -ENOMEM;
2120
2121 gbe_intf->ndev = ndev;
2122 gbe_intf->dev = gbe_dev->dev;
2123 gbe_intf->gbe_dev = gbe_dev;
2124
2125 gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2126 sizeof(*gbe_intf->slave),
2127 GFP_KERNEL);
2128 if (!gbe_intf->slave) {
2129 ret = -ENOMEM;
2130 goto fail;
2131 }
2132
2133 if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2134 ret = -ENODEV;
2135 goto fail;
2136 }
2137
2138 gbe_intf->tx_pipe = gbe_dev->tx_pipe;
2139 ndev->ethtool_ops = &keystone_ethtool_ops;
2140 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
2141 *intf_priv = gbe_intf;
2142 return 0;
2143
2144fail:
2145 if (gbe_intf->slave)
2146 devm_kfree(gbe_dev->dev, gbe_intf->slave);
2147 if (gbe_intf)
2148 devm_kfree(gbe_dev->dev, gbe_intf);
2149 return ret;
2150}
2151
2152static int gbe_release(void *intf_priv)
2153{
2154 struct gbe_intf *gbe_intf = intf_priv;
2155
2156 gbe_intf->ndev->ethtool_ops = NULL;
2157 list_del(&gbe_intf->gbe_intf_list);
2158 devm_kfree(gbe_intf->dev, gbe_intf->slave);
2159 devm_kfree(gbe_intf->dev, gbe_intf);
2160 return 0;
2161}
2162
2163static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
2164{
2165 struct gbe_priv *gbe_dev = inst_priv;
2166
2167 del_timer_sync(&gbe_dev->timer);
2168 cpsw_ale_stop(gbe_dev->ale);
2169 cpsw_ale_destroy(gbe_dev->ale);
2170 netcp_txpipe_close(&gbe_dev->tx_pipe);
2171 free_secondary_ports(gbe_dev);
2172
2173 if (!list_empty(&gbe_dev->gbe_intf_head))
2174 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
2175
2176 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
2177 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
2178 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
2179 devm_kfree(gbe_dev->dev, gbe_dev);
2180 return 0;
2181}
2182
2183static struct netcp_module gbe_module = {
2184 .name = GBE_MODULE_NAME,
2185 .owner = THIS_MODULE,
2186 .primary = true,
2187 .probe = gbe_probe,
2188 .open = gbe_open,
2189 .close = gbe_close,
2190 .remove = gbe_remove,
2191 .attach = gbe_attach,
2192 .release = gbe_release,
2193 .add_addr = gbe_add_addr,
2194 .del_addr = gbe_del_addr,
2195 .add_vid = gbe_add_vid,
2196 .del_vid = gbe_del_vid,
2197 .ioctl = gbe_ioctl,
2198};
2199
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002200static struct netcp_module xgbe_module = {
2201 .name = XGBE_MODULE_NAME,
2202 .owner = THIS_MODULE,
2203 .primary = true,
2204 .probe = gbe_probe,
2205 .open = gbe_open,
2206 .close = gbe_close,
2207 .remove = gbe_remove,
2208 .attach = gbe_attach,
2209 .release = gbe_release,
2210 .add_addr = gbe_add_addr,
2211 .del_addr = gbe_del_addr,
2212 .add_vid = gbe_add_vid,
2213 .del_vid = gbe_del_vid,
2214 .ioctl = gbe_ioctl,
2215};
2216
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002217static int __init keystone_gbe_init(void)
2218{
2219 int ret;
2220
2221 ret = netcp_register_module(&gbe_module);
2222 if (ret)
2223 return ret;
2224
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002225 ret = netcp_register_module(&xgbe_module);
2226 if (ret)
2227 return ret;
2228
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002229 return 0;
2230}
2231module_init(keystone_gbe_init);
2232
2233static void __exit keystone_gbe_exit(void)
2234{
2235 netcp_unregister_module(&gbe_module);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002236 netcp_unregister_module(&xgbe_module);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002237}
2238module_exit(keystone_gbe_exit);
Karicheri, Muralidharan58c11b52015-01-29 18:15:51 -05002239
2240MODULE_LICENSE("GPL v2");
2241MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
2242MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");