blob: 2bef655279f32a4ffb6097b295362e2da22867c0 [file] [log] [blame]
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001/*
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002 * Keystone GBE and XGBE subsystem code
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16 * kind, whether express or implied; without even the implied warranty
17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/io.h>
Karicheri, Muralidharan58c11b52015-01-29 18:15:51 -050022#include <linux/module.h>
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050023#include <linux/of_mdio.h>
24#include <linux/of_address.h>
25#include <linux/if_vlan.h>
26#include <linux/ethtool.h>
27
28#include "cpsw_ale.h"
29#include "netcp.h"
30
31#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
32#define NETCP_DRIVER_VERSION "v1.0"
33
34#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
35#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
36#define GBE_MINOR_VERSION(reg) (reg & 0xff)
37#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
38
39/* 1G Ethernet SS defines */
40#define GBE_MODULE_NAME "netcp-gbe"
41#define GBE_SS_VERSION_14 0x4ed21104
42
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040043#define GBE_SS_REG_INDEX 0
44#define GBE_SGMII34_REG_INDEX 1
45#define GBE_SM_REG_INDEX 2
46/* offset relative to base of GBE_SS_REG_INDEX */
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050047#define GBE13_SGMII_MODULE_OFFSET 0x100
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040048/* offset relative to base of GBE_SM_REG_INDEX */
49#define GBE13_HOST_PORT_OFFSET 0x34
50#define GBE13_SLAVE_PORT_OFFSET 0x60
51#define GBE13_EMAC_OFFSET 0x100
52#define GBE13_SLAVE_PORT2_OFFSET 0x200
53#define GBE13_HW_STATS_OFFSET 0x300
54#define GBE13_ALE_OFFSET 0x600
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050055#define GBE13_HOST_PORT_NUM 0
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050056#define GBE13_NUM_ALE_ENTRIES 1024
57
WingMan Kwok9a391c72015-03-20 16:11:25 -040058/* 1G Ethernet NU SS defines */
59#define GBENU_MODULE_NAME "netcp-gbenu"
60#define GBE_SS_ID_NU 0x4ee6
61#define GBE_SS_ID_2U 0x4ee8
62
63#define IS_SS_ID_MU(d) \
64 ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
65 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
66
67#define IS_SS_ID_NU(d) \
68 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
69
70#define GBENU_SS_REG_INDEX 0
71#define GBENU_SM_REG_INDEX 1
72#define GBENU_SGMII_MODULE_OFFSET 0x100
73#define GBENU_HOST_PORT_OFFSET 0x1000
74#define GBENU_SLAVE_PORT_OFFSET 0x2000
75#define GBENU_EMAC_OFFSET 0x2330
76#define GBENU_HW_STATS_OFFSET 0x1a000
77#define GBENU_ALE_OFFSET 0x1e000
78#define GBENU_HOST_PORT_NUM 0
79#define GBENU_NUM_ALE_ENTRIES 1024
80
Wingman Kwok90cff9e2015-01-15 19:12:52 -050081/* 10G Ethernet SS defines */
82#define XGBE_MODULE_NAME "netcp-xgbe"
83#define XGBE_SS_VERSION_10 0x4ee42100
84
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040085#define XGBE_SS_REG_INDEX 0
86#define XGBE_SM_REG_INDEX 1
87#define XGBE_SERDES_REG_INDEX 2
88
89/* offset relative to base of XGBE_SS_REG_INDEX */
Wingman Kwok90cff9e2015-01-15 19:12:52 -050090#define XGBE10_SGMII_MODULE_OFFSET 0x100
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040091/* offset relative to base of XGBE_SM_REG_INDEX */
92#define XGBE10_HOST_PORT_OFFSET 0x34
93#define XGBE10_SLAVE_PORT_OFFSET 0x64
94#define XGBE10_EMAC_OFFSET 0x400
95#define XGBE10_ALE_OFFSET 0x700
96#define XGBE10_HW_STATS_OFFSET 0x800
Wingman Kwok90cff9e2015-01-15 19:12:52 -050097#define XGBE10_HOST_PORT_NUM 0
Wingman Kwok90cff9e2015-01-15 19:12:52 -050098#define XGBE10_NUM_ALE_ENTRIES 1024
99
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500100#define GBE_TIMER_INTERVAL (HZ / 2)
101
102/* Soft reset register values */
103#define SOFT_RESET_MASK BIT(0)
104#define SOFT_RESET BIT(0)
105#define DEVICE_EMACSL_RESET_POLL_COUNT 100
106#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
107
108#define MACSL_RX_ENABLE_CSF BIT(23)
109#define MACSL_ENABLE_EXT_CTL BIT(18)
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500110#define MACSL_XGMII_ENABLE BIT(13)
111#define MACSL_XGIG_MODE BIT(8)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500112#define MACSL_GIG_MODE BIT(7)
113#define MACSL_GMII_ENABLE BIT(5)
114#define MACSL_FULLDUPLEX BIT(0)
115
116#define GBE_CTL_P0_ENABLE BIT(2)
WingMan Kwok9a391c72015-03-20 16:11:25 -0400117#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500118#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500119#define GBE_STATS_CD_SEL BIT(28)
120
121#define GBE_PORT_MASK(x) (BIT(x) - 1)
122#define GBE_MASK_NO_PORTS 0
123
124#define GBE_DEF_1G_MAC_CONTROL \
125 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
126 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
127
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500128#define GBE_DEF_10G_MAC_CONTROL \
129 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
130 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
131
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500132#define GBE_STATSA_MODULE 0
133#define GBE_STATSB_MODULE 1
134#define GBE_STATSC_MODULE 2
135#define GBE_STATSD_MODULE 3
136
WingMan Kwok9a391c72015-03-20 16:11:25 -0400137#define GBENU_STATS0_MODULE 0
138#define GBENU_STATS1_MODULE 1
139#define GBENU_STATS2_MODULE 2
140#define GBENU_STATS3_MODULE 3
141#define GBENU_STATS4_MODULE 4
142#define GBENU_STATS5_MODULE 5
143#define GBENU_STATS6_MODULE 6
144#define GBENU_STATS7_MODULE 7
145#define GBENU_STATS8_MODULE 8
146
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500147#define XGBE_STATS0_MODULE 0
148#define XGBE_STATS1_MODULE 1
149#define XGBE_STATS2_MODULE 2
150
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500151/* s: 0-based slave_port */
152#define SGMII_BASE(s) \
153 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
154
155#define GBE_TX_QUEUE 648
156#define GBE_TXHOOK_ORDER 0
157#define GBE_DEFAULT_ALE_AGEOUT 30
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500158#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500159#define NETCP_LINK_STATE_INVALID -1
160
161#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
162 offsetof(struct gbe##_##rb, rn)
WingMan Kwok9a391c72015-03-20 16:11:25 -0400163#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
164 offsetof(struct gbenu##_##rb, rn)
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500165#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
166 offsetof(struct xgbe##_##rb, rn)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500167#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
168
WingMan Kwok9a391c72015-03-20 16:11:25 -0400169#define HOST_TX_PRI_MAP_DEFAULT 0x00000000
170
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500171struct xgbe_ss_regs {
172 u32 id_ver;
173 u32 synce_count;
174 u32 synce_mux;
175 u32 control;
176};
177
178struct xgbe_switch_regs {
179 u32 id_ver;
180 u32 control;
181 u32 emcontrol;
182 u32 stat_port_en;
183 u32 ptype;
184 u32 soft_idle;
185 u32 thru_rate;
186 u32 gap_thresh;
187 u32 tx_start_wds;
188 u32 flow_control;
189 u32 cppi_thresh;
190};
191
192struct xgbe_port_regs {
193 u32 blk_cnt;
194 u32 port_vlan;
195 u32 tx_pri_map;
196 u32 sa_lo;
197 u32 sa_hi;
198 u32 ts_ctl;
199 u32 ts_seq_ltype;
200 u32 ts_vlan;
201 u32 ts_ctl_ltype2;
202 u32 ts_ctl2;
203 u32 control;
204};
205
206struct xgbe_host_port_regs {
207 u32 blk_cnt;
208 u32 port_vlan;
209 u32 tx_pri_map;
210 u32 src_id;
211 u32 rx_pri_map;
212 u32 rx_maxlen;
213};
214
215struct xgbe_emac_regs {
216 u32 id_ver;
217 u32 mac_control;
218 u32 mac_status;
219 u32 soft_reset;
220 u32 rx_maxlen;
221 u32 __reserved_0;
222 u32 rx_pause;
223 u32 tx_pause;
224 u32 em_control;
225 u32 __reserved_1;
226 u32 tx_gap;
227 u32 rsvd[4];
228};
229
230struct xgbe_host_hw_stats {
231 u32 rx_good_frames;
232 u32 rx_broadcast_frames;
233 u32 rx_multicast_frames;
234 u32 __rsvd_0[3];
235 u32 rx_oversized_frames;
236 u32 __rsvd_1;
237 u32 rx_undersized_frames;
238 u32 __rsvd_2;
239 u32 overrun_type4;
240 u32 overrun_type5;
241 u32 rx_bytes;
242 u32 tx_good_frames;
243 u32 tx_broadcast_frames;
244 u32 tx_multicast_frames;
245 u32 __rsvd_3[9];
246 u32 tx_bytes;
247 u32 tx_64byte_frames;
248 u32 tx_65_to_127byte_frames;
249 u32 tx_128_to_255byte_frames;
250 u32 tx_256_to_511byte_frames;
251 u32 tx_512_to_1023byte_frames;
252 u32 tx_1024byte_frames;
253 u32 net_bytes;
254 u32 rx_sof_overruns;
255 u32 rx_mof_overruns;
256 u32 rx_dma_overruns;
257};
258
259struct xgbe_hw_stats {
260 u32 rx_good_frames;
261 u32 rx_broadcast_frames;
262 u32 rx_multicast_frames;
263 u32 rx_pause_frames;
264 u32 rx_crc_errors;
265 u32 rx_align_code_errors;
266 u32 rx_oversized_frames;
267 u32 rx_jabber_frames;
268 u32 rx_undersized_frames;
269 u32 rx_fragments;
270 u32 overrun_type4;
271 u32 overrun_type5;
272 u32 rx_bytes;
273 u32 tx_good_frames;
274 u32 tx_broadcast_frames;
275 u32 tx_multicast_frames;
276 u32 tx_pause_frames;
277 u32 tx_deferred_frames;
278 u32 tx_collision_frames;
279 u32 tx_single_coll_frames;
280 u32 tx_mult_coll_frames;
281 u32 tx_excessive_collisions;
282 u32 tx_late_collisions;
283 u32 tx_underrun;
284 u32 tx_carrier_sense_errors;
285 u32 tx_bytes;
286 u32 tx_64byte_frames;
287 u32 tx_65_to_127byte_frames;
288 u32 tx_128_to_255byte_frames;
289 u32 tx_256_to_511byte_frames;
290 u32 tx_512_to_1023byte_frames;
291 u32 tx_1024byte_frames;
292 u32 net_bytes;
293 u32 rx_sof_overruns;
294 u32 rx_mof_overruns;
295 u32 rx_dma_overruns;
296};
297
298#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
299
WingMan Kwok9a391c72015-03-20 16:11:25 -0400300struct gbenu_ss_regs {
301 u32 id_ver;
302 u32 synce_count; /* NU */
303 u32 synce_mux; /* NU */
304 u32 control; /* 2U */
305 u32 __rsvd_0[2]; /* 2U */
306 u32 rgmii_status; /* 2U */
307 u32 ss_status; /* 2U */
308};
309
310struct gbenu_switch_regs {
311 u32 id_ver;
312 u32 control;
313 u32 __rsvd_0[2];
314 u32 emcontrol;
315 u32 stat_port_en;
316 u32 ptype; /* NU */
317 u32 soft_idle;
318 u32 thru_rate; /* NU */
319 u32 gap_thresh; /* NU */
320 u32 tx_start_wds; /* NU */
321 u32 eee_prescale; /* 2U */
322 u32 tx_g_oflow_thresh_set; /* NU */
323 u32 tx_g_oflow_thresh_clr; /* NU */
324 u32 tx_g_buf_thresh_set_l; /* NU */
325 u32 tx_g_buf_thresh_set_h; /* NU */
326 u32 tx_g_buf_thresh_clr_l; /* NU */
327 u32 tx_g_buf_thresh_clr_h; /* NU */
328};
329
330struct gbenu_port_regs {
331 u32 __rsvd_0;
332 u32 control;
333 u32 max_blks; /* 2U */
334 u32 mem_align1;
335 u32 blk_cnt;
336 u32 port_vlan;
337 u32 tx_pri_map; /* NU */
338 u32 pri_ctl; /* 2U */
339 u32 rx_pri_map;
340 u32 rx_maxlen;
341 u32 tx_blks_pri; /* NU */
342 u32 __rsvd_1;
343 u32 idle2lpi; /* 2U */
344 u32 lpi2idle; /* 2U */
345 u32 eee_status; /* 2U */
346 u32 __rsvd_2;
347 u32 __rsvd_3[176]; /* NU: more to add */
348 u32 __rsvd_4[2];
349 u32 sa_lo;
350 u32 sa_hi;
351 u32 ts_ctl;
352 u32 ts_seq_ltype;
353 u32 ts_vlan;
354 u32 ts_ctl_ltype2;
355 u32 ts_ctl2;
356};
357
358struct gbenu_host_port_regs {
359 u32 __rsvd_0;
360 u32 control;
361 u32 flow_id_offset; /* 2U */
362 u32 __rsvd_1;
363 u32 blk_cnt;
364 u32 port_vlan;
365 u32 tx_pri_map; /* NU */
366 u32 pri_ctl;
367 u32 rx_pri_map;
368 u32 rx_maxlen;
369 u32 tx_blks_pri; /* NU */
370 u32 __rsvd_2;
371 u32 idle2lpi; /* 2U */
372 u32 lpi2wake; /* 2U */
373 u32 eee_status; /* 2U */
374 u32 __rsvd_3;
375 u32 __rsvd_4[184]; /* NU */
376 u32 host_blks_pri; /* NU */
377};
378
379struct gbenu_emac_regs {
380 u32 mac_control;
381 u32 mac_status;
382 u32 soft_reset;
383 u32 boff_test;
384 u32 rx_pause;
385 u32 __rsvd_0[11]; /* NU */
386 u32 tx_pause;
387 u32 __rsvd_1[11]; /* NU */
388 u32 em_control;
389 u32 tx_gap;
390};
391
392/* Some hw stat regs are applicable to slave port only.
393 * This is handled by gbenu_et_stats struct. Also some
394 * are for SS version NU and some are for 2U.
395 */
396struct gbenu_hw_stats {
397 u32 rx_good_frames;
398 u32 rx_broadcast_frames;
399 u32 rx_multicast_frames;
400 u32 rx_pause_frames; /* slave */
401 u32 rx_crc_errors;
402 u32 rx_align_code_errors; /* slave */
403 u32 rx_oversized_frames;
404 u32 rx_jabber_frames; /* slave */
405 u32 rx_undersized_frames;
406 u32 rx_fragments; /* slave */
407 u32 ale_drop;
408 u32 ale_overrun_drop;
409 u32 rx_bytes;
410 u32 tx_good_frames;
411 u32 tx_broadcast_frames;
412 u32 tx_multicast_frames;
413 u32 tx_pause_frames; /* slave */
414 u32 tx_deferred_frames; /* slave */
415 u32 tx_collision_frames; /* slave */
416 u32 tx_single_coll_frames; /* slave */
417 u32 tx_mult_coll_frames; /* slave */
418 u32 tx_excessive_collisions; /* slave */
419 u32 tx_late_collisions; /* slave */
420 u32 rx_ipg_error; /* slave 10G only */
421 u32 tx_carrier_sense_errors; /* slave */
422 u32 tx_bytes;
423 u32 tx_64B_frames;
424 u32 tx_65_to_127B_frames;
425 u32 tx_128_to_255B_frames;
426 u32 tx_256_to_511B_frames;
427 u32 tx_512_to_1023B_frames;
428 u32 tx_1024B_frames;
429 u32 net_bytes;
430 u32 rx_bottom_fifo_drop;
431 u32 rx_port_mask_drop;
432 u32 rx_top_fifo_drop;
433 u32 ale_rate_limit_drop;
434 u32 ale_vid_ingress_drop;
435 u32 ale_da_eq_sa_drop;
436 u32 __rsvd_0[3];
437 u32 ale_unknown_ucast;
438 u32 ale_unknown_ucast_bytes;
439 u32 ale_unknown_mcast;
440 u32 ale_unknown_mcast_bytes;
441 u32 ale_unknown_bcast;
442 u32 ale_unknown_bcast_bytes;
443 u32 ale_pol_match;
444 u32 ale_pol_match_red; /* NU */
445 u32 ale_pol_match_yellow; /* NU */
446 u32 __rsvd_1[44];
447 u32 tx_mem_protect_err;
448 /* following NU only */
449 u32 tx_pri0;
450 u32 tx_pri1;
451 u32 tx_pri2;
452 u32 tx_pri3;
453 u32 tx_pri4;
454 u32 tx_pri5;
455 u32 tx_pri6;
456 u32 tx_pri7;
457 u32 tx_pri0_bcnt;
458 u32 tx_pri1_bcnt;
459 u32 tx_pri2_bcnt;
460 u32 tx_pri3_bcnt;
461 u32 tx_pri4_bcnt;
462 u32 tx_pri5_bcnt;
463 u32 tx_pri6_bcnt;
464 u32 tx_pri7_bcnt;
465 u32 tx_pri0_drop;
466 u32 tx_pri1_drop;
467 u32 tx_pri2_drop;
468 u32 tx_pri3_drop;
469 u32 tx_pri4_drop;
470 u32 tx_pri5_drop;
471 u32 tx_pri6_drop;
472 u32 tx_pri7_drop;
473 u32 tx_pri0_drop_bcnt;
474 u32 tx_pri1_drop_bcnt;
475 u32 tx_pri2_drop_bcnt;
476 u32 tx_pri3_drop_bcnt;
477 u32 tx_pri4_drop_bcnt;
478 u32 tx_pri5_drop_bcnt;
479 u32 tx_pri6_drop_bcnt;
480 u32 tx_pri7_drop_bcnt;
481};
482
483#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
484#define GBENU_HW_STATS_REG_MAP_SZ 0x200
485
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500486struct gbe_ss_regs {
487 u32 id_ver;
488 u32 synce_count;
489 u32 synce_mux;
490};
491
492struct gbe_ss_regs_ofs {
493 u16 id_ver;
494 u16 control;
495};
496
497struct gbe_switch_regs {
498 u32 id_ver;
499 u32 control;
500 u32 soft_reset;
501 u32 stat_port_en;
502 u32 ptype;
503 u32 soft_idle;
504 u32 thru_rate;
505 u32 gap_thresh;
506 u32 tx_start_wds;
507 u32 flow_control;
508};
509
510struct gbe_switch_regs_ofs {
511 u16 id_ver;
512 u16 control;
513 u16 soft_reset;
514 u16 emcontrol;
515 u16 stat_port_en;
516 u16 ptype;
517 u16 flow_control;
518};
519
520struct gbe_port_regs {
521 u32 max_blks;
522 u32 blk_cnt;
523 u32 port_vlan;
524 u32 tx_pri_map;
525 u32 sa_lo;
526 u32 sa_hi;
527 u32 ts_ctl;
528 u32 ts_seq_ltype;
529 u32 ts_vlan;
530 u32 ts_ctl_ltype2;
531 u32 ts_ctl2;
532};
533
534struct gbe_port_regs_ofs {
535 u16 port_vlan;
536 u16 tx_pri_map;
537 u16 sa_lo;
538 u16 sa_hi;
539 u16 ts_ctl;
540 u16 ts_seq_ltype;
541 u16 ts_vlan;
542 u16 ts_ctl_ltype2;
543 u16 ts_ctl2;
WingMan Kwok9a391c72015-03-20 16:11:25 -0400544 u16 rx_maxlen; /* 2U, NU */
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500545};
546
547struct gbe_host_port_regs {
548 u32 src_id;
549 u32 port_vlan;
550 u32 rx_pri_map;
551 u32 rx_maxlen;
552};
553
554struct gbe_host_port_regs_ofs {
555 u16 port_vlan;
556 u16 tx_pri_map;
557 u16 rx_maxlen;
558};
559
560struct gbe_emac_regs {
561 u32 id_ver;
562 u32 mac_control;
563 u32 mac_status;
564 u32 soft_reset;
565 u32 rx_maxlen;
566 u32 __reserved_0;
567 u32 rx_pause;
568 u32 tx_pause;
569 u32 __reserved_1;
570 u32 rx_pri_map;
571 u32 rsvd[6];
572};
573
574struct gbe_emac_regs_ofs {
575 u16 mac_control;
576 u16 soft_reset;
577 u16 rx_maxlen;
578};
579
580struct gbe_hw_stats {
581 u32 rx_good_frames;
582 u32 rx_broadcast_frames;
583 u32 rx_multicast_frames;
584 u32 rx_pause_frames;
585 u32 rx_crc_errors;
586 u32 rx_align_code_errors;
587 u32 rx_oversized_frames;
588 u32 rx_jabber_frames;
589 u32 rx_undersized_frames;
590 u32 rx_fragments;
591 u32 __pad_0[2];
592 u32 rx_bytes;
593 u32 tx_good_frames;
594 u32 tx_broadcast_frames;
595 u32 tx_multicast_frames;
596 u32 tx_pause_frames;
597 u32 tx_deferred_frames;
598 u32 tx_collision_frames;
599 u32 tx_single_coll_frames;
600 u32 tx_mult_coll_frames;
601 u32 tx_excessive_collisions;
602 u32 tx_late_collisions;
603 u32 tx_underrun;
604 u32 tx_carrier_sense_errors;
605 u32 tx_bytes;
606 u32 tx_64byte_frames;
607 u32 tx_65_to_127byte_frames;
608 u32 tx_128_to_255byte_frames;
609 u32 tx_256_to_511byte_frames;
610 u32 tx_512_to_1023byte_frames;
611 u32 tx_1024byte_frames;
612 u32 net_bytes;
613 u32 rx_sof_overruns;
614 u32 rx_mof_overruns;
615 u32 rx_dma_overruns;
616};
617
618#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
WingMan Kwok9a391c72015-03-20 16:11:25 -0400619#define GBE_MAX_HW_STAT_MODS 9
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500620#define GBE_HW_STATS_REG_MAP_SZ 0x100
621
622struct gbe_slave {
623 void __iomem *port_regs;
624 void __iomem *emac_regs;
625 struct gbe_port_regs_ofs port_regs_ofs;
626 struct gbe_emac_regs_ofs emac_regs_ofs;
627 int slave_num; /* 0 based logical number */
628 int port_num; /* actual port number */
629 atomic_t link_state;
630 bool open;
631 struct phy_device *phy;
632 u32 link_interface;
633 u32 mac_control;
634 u8 phy_port_t;
635 struct device_node *phy_node;
636 struct list_head slave_list;
637};
638
639struct gbe_priv {
640 struct device *dev;
641 struct netcp_device *netcp_device;
642 struct timer_list timer;
643 u32 num_slaves;
644 u32 ale_entries;
645 u32 ale_ports;
646 bool enable_ale;
WingMan Kwok9a391c72015-03-20 16:11:25 -0400647 u8 max_num_slaves;
648 u8 max_num_ports; /* max_num_slaves + 1 */
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500649 struct netcp_tx_pipe tx_pipe;
650
651 int host_port;
652 u32 rx_packet_max;
653 u32 ss_version;
WingMan Kwok9a391c72015-03-20 16:11:25 -0400654 u32 stats_en_mask;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500655
656 void __iomem *ss_regs;
657 void __iomem *switch_regs;
658 void __iomem *host_port_regs;
659 void __iomem *ale_reg;
660 void __iomem *sgmii_port_regs;
661 void __iomem *sgmii_port34_regs;
662 void __iomem *xgbe_serdes_regs;
663 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
664
665 struct gbe_ss_regs_ofs ss_regs_ofs;
666 struct gbe_switch_regs_ofs switch_regs_ofs;
667 struct gbe_host_port_regs_ofs host_port_regs_ofs;
668
669 struct cpsw_ale *ale;
670 unsigned int tx_queue_id;
671 const char *dma_chan_name;
672
673 struct list_head gbe_intf_head;
674 struct list_head secondary_slaves;
675 struct net_device *dummy_ndev;
676
677 u64 *hw_stats;
678 const struct netcp_ethtool_stat *et_stats;
679 int num_et_stats;
680 /* Lock for updating the hwstats */
681 spinlock_t hw_stats_lock;
682};
683
684struct gbe_intf {
685 struct net_device *ndev;
686 struct device *dev;
687 struct gbe_priv *gbe_dev;
688 struct netcp_tx_pipe tx_pipe;
689 struct gbe_slave *slave;
690 struct list_head gbe_intf_list;
691 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
692};
693
694static struct netcp_module gbe_module;
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500695static struct netcp_module xgbe_module;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500696
697/* Statistic management */
698struct netcp_ethtool_stat {
699 char desc[ETH_GSTRING_LEN];
700 int type;
701 u32 size;
702 int offset;
703};
704
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400705#define GBE_STATSA_INFO(field) \
706{ \
707 "GBE_A:"#field, GBE_STATSA_MODULE, \
708 FIELD_SIZEOF(struct gbe_hw_stats, field), \
709 offsetof(struct gbe_hw_stats, field) \
710}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500711
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400712#define GBE_STATSB_INFO(field) \
713{ \
714 "GBE_B:"#field, GBE_STATSB_MODULE, \
715 FIELD_SIZEOF(struct gbe_hw_stats, field), \
716 offsetof(struct gbe_hw_stats, field) \
717}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500718
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400719#define GBE_STATSC_INFO(field) \
720{ \
721 "GBE_C:"#field, GBE_STATSC_MODULE, \
722 FIELD_SIZEOF(struct gbe_hw_stats, field), \
723 offsetof(struct gbe_hw_stats, field) \
724}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500725
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400726#define GBE_STATSD_INFO(field) \
727{ \
728 "GBE_D:"#field, GBE_STATSD_MODULE, \
729 FIELD_SIZEOF(struct gbe_hw_stats, field), \
730 offsetof(struct gbe_hw_stats, field) \
731}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500732
733static const struct netcp_ethtool_stat gbe13_et_stats[] = {
734 /* GBE module A */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400735 GBE_STATSA_INFO(rx_good_frames),
736 GBE_STATSA_INFO(rx_broadcast_frames),
737 GBE_STATSA_INFO(rx_multicast_frames),
738 GBE_STATSA_INFO(rx_pause_frames),
739 GBE_STATSA_INFO(rx_crc_errors),
740 GBE_STATSA_INFO(rx_align_code_errors),
741 GBE_STATSA_INFO(rx_oversized_frames),
742 GBE_STATSA_INFO(rx_jabber_frames),
743 GBE_STATSA_INFO(rx_undersized_frames),
744 GBE_STATSA_INFO(rx_fragments),
745 GBE_STATSA_INFO(rx_bytes),
746 GBE_STATSA_INFO(tx_good_frames),
747 GBE_STATSA_INFO(tx_broadcast_frames),
748 GBE_STATSA_INFO(tx_multicast_frames),
749 GBE_STATSA_INFO(tx_pause_frames),
750 GBE_STATSA_INFO(tx_deferred_frames),
751 GBE_STATSA_INFO(tx_collision_frames),
752 GBE_STATSA_INFO(tx_single_coll_frames),
753 GBE_STATSA_INFO(tx_mult_coll_frames),
754 GBE_STATSA_INFO(tx_excessive_collisions),
755 GBE_STATSA_INFO(tx_late_collisions),
756 GBE_STATSA_INFO(tx_underrun),
757 GBE_STATSA_INFO(tx_carrier_sense_errors),
758 GBE_STATSA_INFO(tx_bytes),
759 GBE_STATSA_INFO(tx_64byte_frames),
760 GBE_STATSA_INFO(tx_65_to_127byte_frames),
761 GBE_STATSA_INFO(tx_128_to_255byte_frames),
762 GBE_STATSA_INFO(tx_256_to_511byte_frames),
763 GBE_STATSA_INFO(tx_512_to_1023byte_frames),
764 GBE_STATSA_INFO(tx_1024byte_frames),
765 GBE_STATSA_INFO(net_bytes),
766 GBE_STATSA_INFO(rx_sof_overruns),
767 GBE_STATSA_INFO(rx_mof_overruns),
768 GBE_STATSA_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500769 /* GBE module B */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400770 GBE_STATSB_INFO(rx_good_frames),
771 GBE_STATSB_INFO(rx_broadcast_frames),
772 GBE_STATSB_INFO(rx_multicast_frames),
773 GBE_STATSB_INFO(rx_pause_frames),
774 GBE_STATSB_INFO(rx_crc_errors),
775 GBE_STATSB_INFO(rx_align_code_errors),
776 GBE_STATSB_INFO(rx_oversized_frames),
777 GBE_STATSB_INFO(rx_jabber_frames),
778 GBE_STATSB_INFO(rx_undersized_frames),
779 GBE_STATSB_INFO(rx_fragments),
780 GBE_STATSB_INFO(rx_bytes),
781 GBE_STATSB_INFO(tx_good_frames),
782 GBE_STATSB_INFO(tx_broadcast_frames),
783 GBE_STATSB_INFO(tx_multicast_frames),
784 GBE_STATSB_INFO(tx_pause_frames),
785 GBE_STATSB_INFO(tx_deferred_frames),
786 GBE_STATSB_INFO(tx_collision_frames),
787 GBE_STATSB_INFO(tx_single_coll_frames),
788 GBE_STATSB_INFO(tx_mult_coll_frames),
789 GBE_STATSB_INFO(tx_excessive_collisions),
790 GBE_STATSB_INFO(tx_late_collisions),
791 GBE_STATSB_INFO(tx_underrun),
792 GBE_STATSB_INFO(tx_carrier_sense_errors),
793 GBE_STATSB_INFO(tx_bytes),
794 GBE_STATSB_INFO(tx_64byte_frames),
795 GBE_STATSB_INFO(tx_65_to_127byte_frames),
796 GBE_STATSB_INFO(tx_128_to_255byte_frames),
797 GBE_STATSB_INFO(tx_256_to_511byte_frames),
798 GBE_STATSB_INFO(tx_512_to_1023byte_frames),
799 GBE_STATSB_INFO(tx_1024byte_frames),
800 GBE_STATSB_INFO(net_bytes),
801 GBE_STATSB_INFO(rx_sof_overruns),
802 GBE_STATSB_INFO(rx_mof_overruns),
803 GBE_STATSB_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500804 /* GBE module C */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400805 GBE_STATSC_INFO(rx_good_frames),
806 GBE_STATSC_INFO(rx_broadcast_frames),
807 GBE_STATSC_INFO(rx_multicast_frames),
808 GBE_STATSC_INFO(rx_pause_frames),
809 GBE_STATSC_INFO(rx_crc_errors),
810 GBE_STATSC_INFO(rx_align_code_errors),
811 GBE_STATSC_INFO(rx_oversized_frames),
812 GBE_STATSC_INFO(rx_jabber_frames),
813 GBE_STATSC_INFO(rx_undersized_frames),
814 GBE_STATSC_INFO(rx_fragments),
815 GBE_STATSC_INFO(rx_bytes),
816 GBE_STATSC_INFO(tx_good_frames),
817 GBE_STATSC_INFO(tx_broadcast_frames),
818 GBE_STATSC_INFO(tx_multicast_frames),
819 GBE_STATSC_INFO(tx_pause_frames),
820 GBE_STATSC_INFO(tx_deferred_frames),
821 GBE_STATSC_INFO(tx_collision_frames),
822 GBE_STATSC_INFO(tx_single_coll_frames),
823 GBE_STATSC_INFO(tx_mult_coll_frames),
824 GBE_STATSC_INFO(tx_excessive_collisions),
825 GBE_STATSC_INFO(tx_late_collisions),
826 GBE_STATSC_INFO(tx_underrun),
827 GBE_STATSC_INFO(tx_carrier_sense_errors),
828 GBE_STATSC_INFO(tx_bytes),
829 GBE_STATSC_INFO(tx_64byte_frames),
830 GBE_STATSC_INFO(tx_65_to_127byte_frames),
831 GBE_STATSC_INFO(tx_128_to_255byte_frames),
832 GBE_STATSC_INFO(tx_256_to_511byte_frames),
833 GBE_STATSC_INFO(tx_512_to_1023byte_frames),
834 GBE_STATSC_INFO(tx_1024byte_frames),
835 GBE_STATSC_INFO(net_bytes),
836 GBE_STATSC_INFO(rx_sof_overruns),
837 GBE_STATSC_INFO(rx_mof_overruns),
838 GBE_STATSC_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500839 /* GBE module D */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400840 GBE_STATSD_INFO(rx_good_frames),
841 GBE_STATSD_INFO(rx_broadcast_frames),
842 GBE_STATSD_INFO(rx_multicast_frames),
843 GBE_STATSD_INFO(rx_pause_frames),
844 GBE_STATSD_INFO(rx_crc_errors),
845 GBE_STATSD_INFO(rx_align_code_errors),
846 GBE_STATSD_INFO(rx_oversized_frames),
847 GBE_STATSD_INFO(rx_jabber_frames),
848 GBE_STATSD_INFO(rx_undersized_frames),
849 GBE_STATSD_INFO(rx_fragments),
850 GBE_STATSD_INFO(rx_bytes),
851 GBE_STATSD_INFO(tx_good_frames),
852 GBE_STATSD_INFO(tx_broadcast_frames),
853 GBE_STATSD_INFO(tx_multicast_frames),
854 GBE_STATSD_INFO(tx_pause_frames),
855 GBE_STATSD_INFO(tx_deferred_frames),
856 GBE_STATSD_INFO(tx_collision_frames),
857 GBE_STATSD_INFO(tx_single_coll_frames),
858 GBE_STATSD_INFO(tx_mult_coll_frames),
859 GBE_STATSD_INFO(tx_excessive_collisions),
860 GBE_STATSD_INFO(tx_late_collisions),
861 GBE_STATSD_INFO(tx_underrun),
862 GBE_STATSD_INFO(tx_carrier_sense_errors),
863 GBE_STATSD_INFO(tx_bytes),
864 GBE_STATSD_INFO(tx_64byte_frames),
865 GBE_STATSD_INFO(tx_65_to_127byte_frames),
866 GBE_STATSD_INFO(tx_128_to_255byte_frames),
867 GBE_STATSD_INFO(tx_256_to_511byte_frames),
868 GBE_STATSD_INFO(tx_512_to_1023byte_frames),
869 GBE_STATSD_INFO(tx_1024byte_frames),
870 GBE_STATSD_INFO(net_bytes),
871 GBE_STATSD_INFO(rx_sof_overruns),
872 GBE_STATSD_INFO(rx_mof_overruns),
873 GBE_STATSD_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500874};
875
WingMan Kwok9a391c72015-03-20 16:11:25 -0400876/* This is the size of entries in GBENU_STATS_HOST */
877#define GBENU_ET_STATS_HOST_SIZE 33
878
879#define GBENU_STATS_HOST(field) \
880{ \
881 "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
882 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
883 offsetof(struct gbenu_hw_stats, field) \
884}
885
886/* This is the size of entries in GBENU_STATS_HOST */
887#define GBENU_ET_STATS_PORT_SIZE 46
888
889#define GBENU_STATS_P1(field) \
890{ \
891 "GBE_P1:"#field, GBENU_STATS1_MODULE, \
892 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
893 offsetof(struct gbenu_hw_stats, field) \
894}
895
896#define GBENU_STATS_P2(field) \
897{ \
898 "GBE_P2:"#field, GBENU_STATS2_MODULE, \
899 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
900 offsetof(struct gbenu_hw_stats, field) \
901}
902
903#define GBENU_STATS_P3(field) \
904{ \
905 "GBE_P3:"#field, GBENU_STATS3_MODULE, \
906 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
907 offsetof(struct gbenu_hw_stats, field) \
908}
909
910#define GBENU_STATS_P4(field) \
911{ \
912 "GBE_P4:"#field, GBENU_STATS4_MODULE, \
913 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
914 offsetof(struct gbenu_hw_stats, field) \
915}
916
917#define GBENU_STATS_P5(field) \
918{ \
919 "GBE_P5:"#field, GBENU_STATS5_MODULE, \
920 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
921 offsetof(struct gbenu_hw_stats, field) \
922}
923
924#define GBENU_STATS_P6(field) \
925{ \
926 "GBE_P6:"#field, GBENU_STATS6_MODULE, \
927 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
928 offsetof(struct gbenu_hw_stats, field) \
929}
930
931#define GBENU_STATS_P7(field) \
932{ \
933 "GBE_P7:"#field, GBENU_STATS7_MODULE, \
934 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
935 offsetof(struct gbenu_hw_stats, field) \
936}
937
938#define GBENU_STATS_P8(field) \
939{ \
940 "GBE_P8:"#field, GBENU_STATS8_MODULE, \
941 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
942 offsetof(struct gbenu_hw_stats, field) \
943}
944
945static const struct netcp_ethtool_stat gbenu_et_stats[] = {
946 /* GBENU Host Module */
947 GBENU_STATS_HOST(rx_good_frames),
948 GBENU_STATS_HOST(rx_broadcast_frames),
949 GBENU_STATS_HOST(rx_multicast_frames),
950 GBENU_STATS_HOST(rx_crc_errors),
951 GBENU_STATS_HOST(rx_oversized_frames),
952 GBENU_STATS_HOST(rx_undersized_frames),
953 GBENU_STATS_HOST(ale_drop),
954 GBENU_STATS_HOST(ale_overrun_drop),
955 GBENU_STATS_HOST(rx_bytes),
956 GBENU_STATS_HOST(tx_good_frames),
957 GBENU_STATS_HOST(tx_broadcast_frames),
958 GBENU_STATS_HOST(tx_multicast_frames),
959 GBENU_STATS_HOST(tx_bytes),
960 GBENU_STATS_HOST(tx_64B_frames),
961 GBENU_STATS_HOST(tx_65_to_127B_frames),
962 GBENU_STATS_HOST(tx_128_to_255B_frames),
963 GBENU_STATS_HOST(tx_256_to_511B_frames),
964 GBENU_STATS_HOST(tx_512_to_1023B_frames),
965 GBENU_STATS_HOST(tx_1024B_frames),
966 GBENU_STATS_HOST(net_bytes),
967 GBENU_STATS_HOST(rx_bottom_fifo_drop),
968 GBENU_STATS_HOST(rx_port_mask_drop),
969 GBENU_STATS_HOST(rx_top_fifo_drop),
970 GBENU_STATS_HOST(ale_rate_limit_drop),
971 GBENU_STATS_HOST(ale_vid_ingress_drop),
972 GBENU_STATS_HOST(ale_da_eq_sa_drop),
973 GBENU_STATS_HOST(ale_unknown_ucast),
974 GBENU_STATS_HOST(ale_unknown_ucast_bytes),
975 GBENU_STATS_HOST(ale_unknown_mcast),
976 GBENU_STATS_HOST(ale_unknown_mcast_bytes),
977 GBENU_STATS_HOST(ale_unknown_bcast),
978 GBENU_STATS_HOST(ale_unknown_bcast_bytes),
979 GBENU_STATS_HOST(tx_mem_protect_err),
980 /* GBENU Module 1 */
981 GBENU_STATS_P1(rx_good_frames),
982 GBENU_STATS_P1(rx_broadcast_frames),
983 GBENU_STATS_P1(rx_multicast_frames),
984 GBENU_STATS_P1(rx_pause_frames),
985 GBENU_STATS_P1(rx_crc_errors),
986 GBENU_STATS_P1(rx_align_code_errors),
987 GBENU_STATS_P1(rx_oversized_frames),
988 GBENU_STATS_P1(rx_jabber_frames),
989 GBENU_STATS_P1(rx_undersized_frames),
990 GBENU_STATS_P1(rx_fragments),
991 GBENU_STATS_P1(ale_drop),
992 GBENU_STATS_P1(ale_overrun_drop),
993 GBENU_STATS_P1(rx_bytes),
994 GBENU_STATS_P1(tx_good_frames),
995 GBENU_STATS_P1(tx_broadcast_frames),
996 GBENU_STATS_P1(tx_multicast_frames),
997 GBENU_STATS_P1(tx_pause_frames),
998 GBENU_STATS_P1(tx_deferred_frames),
999 GBENU_STATS_P1(tx_collision_frames),
1000 GBENU_STATS_P1(tx_single_coll_frames),
1001 GBENU_STATS_P1(tx_mult_coll_frames),
1002 GBENU_STATS_P1(tx_excessive_collisions),
1003 GBENU_STATS_P1(tx_late_collisions),
1004 GBENU_STATS_P1(rx_ipg_error),
1005 GBENU_STATS_P1(tx_carrier_sense_errors),
1006 GBENU_STATS_P1(tx_bytes),
1007 GBENU_STATS_P1(tx_64B_frames),
1008 GBENU_STATS_P1(tx_65_to_127B_frames),
1009 GBENU_STATS_P1(tx_128_to_255B_frames),
1010 GBENU_STATS_P1(tx_256_to_511B_frames),
1011 GBENU_STATS_P1(tx_512_to_1023B_frames),
1012 GBENU_STATS_P1(tx_1024B_frames),
1013 GBENU_STATS_P1(net_bytes),
1014 GBENU_STATS_P1(rx_bottom_fifo_drop),
1015 GBENU_STATS_P1(rx_port_mask_drop),
1016 GBENU_STATS_P1(rx_top_fifo_drop),
1017 GBENU_STATS_P1(ale_rate_limit_drop),
1018 GBENU_STATS_P1(ale_vid_ingress_drop),
1019 GBENU_STATS_P1(ale_da_eq_sa_drop),
1020 GBENU_STATS_P1(ale_unknown_ucast),
1021 GBENU_STATS_P1(ale_unknown_ucast_bytes),
1022 GBENU_STATS_P1(ale_unknown_mcast),
1023 GBENU_STATS_P1(ale_unknown_mcast_bytes),
1024 GBENU_STATS_P1(ale_unknown_bcast),
1025 GBENU_STATS_P1(ale_unknown_bcast_bytes),
1026 GBENU_STATS_P1(tx_mem_protect_err),
1027 /* GBENU Module 2 */
1028 GBENU_STATS_P2(rx_good_frames),
1029 GBENU_STATS_P2(rx_broadcast_frames),
1030 GBENU_STATS_P2(rx_multicast_frames),
1031 GBENU_STATS_P2(rx_pause_frames),
1032 GBENU_STATS_P2(rx_crc_errors),
1033 GBENU_STATS_P2(rx_align_code_errors),
1034 GBENU_STATS_P2(rx_oversized_frames),
1035 GBENU_STATS_P2(rx_jabber_frames),
1036 GBENU_STATS_P2(rx_undersized_frames),
1037 GBENU_STATS_P2(rx_fragments),
1038 GBENU_STATS_P2(ale_drop),
1039 GBENU_STATS_P2(ale_overrun_drop),
1040 GBENU_STATS_P2(rx_bytes),
1041 GBENU_STATS_P2(tx_good_frames),
1042 GBENU_STATS_P2(tx_broadcast_frames),
1043 GBENU_STATS_P2(tx_multicast_frames),
1044 GBENU_STATS_P2(tx_pause_frames),
1045 GBENU_STATS_P2(tx_deferred_frames),
1046 GBENU_STATS_P2(tx_collision_frames),
1047 GBENU_STATS_P2(tx_single_coll_frames),
1048 GBENU_STATS_P2(tx_mult_coll_frames),
1049 GBENU_STATS_P2(tx_excessive_collisions),
1050 GBENU_STATS_P2(tx_late_collisions),
1051 GBENU_STATS_P2(rx_ipg_error),
1052 GBENU_STATS_P2(tx_carrier_sense_errors),
1053 GBENU_STATS_P2(tx_bytes),
1054 GBENU_STATS_P2(tx_64B_frames),
1055 GBENU_STATS_P2(tx_65_to_127B_frames),
1056 GBENU_STATS_P2(tx_128_to_255B_frames),
1057 GBENU_STATS_P2(tx_256_to_511B_frames),
1058 GBENU_STATS_P2(tx_512_to_1023B_frames),
1059 GBENU_STATS_P2(tx_1024B_frames),
1060 GBENU_STATS_P2(net_bytes),
1061 GBENU_STATS_P2(rx_bottom_fifo_drop),
1062 GBENU_STATS_P2(rx_port_mask_drop),
1063 GBENU_STATS_P2(rx_top_fifo_drop),
1064 GBENU_STATS_P2(ale_rate_limit_drop),
1065 GBENU_STATS_P2(ale_vid_ingress_drop),
1066 GBENU_STATS_P2(ale_da_eq_sa_drop),
1067 GBENU_STATS_P2(ale_unknown_ucast),
1068 GBENU_STATS_P2(ale_unknown_ucast_bytes),
1069 GBENU_STATS_P2(ale_unknown_mcast),
1070 GBENU_STATS_P2(ale_unknown_mcast_bytes),
1071 GBENU_STATS_P2(ale_unknown_bcast),
1072 GBENU_STATS_P2(ale_unknown_bcast_bytes),
1073 GBENU_STATS_P2(tx_mem_protect_err),
1074 /* GBENU Module 3 */
1075 GBENU_STATS_P3(rx_good_frames),
1076 GBENU_STATS_P3(rx_broadcast_frames),
1077 GBENU_STATS_P3(rx_multicast_frames),
1078 GBENU_STATS_P3(rx_pause_frames),
1079 GBENU_STATS_P3(rx_crc_errors),
1080 GBENU_STATS_P3(rx_align_code_errors),
1081 GBENU_STATS_P3(rx_oversized_frames),
1082 GBENU_STATS_P3(rx_jabber_frames),
1083 GBENU_STATS_P3(rx_undersized_frames),
1084 GBENU_STATS_P3(rx_fragments),
1085 GBENU_STATS_P3(ale_drop),
1086 GBENU_STATS_P3(ale_overrun_drop),
1087 GBENU_STATS_P3(rx_bytes),
1088 GBENU_STATS_P3(tx_good_frames),
1089 GBENU_STATS_P3(tx_broadcast_frames),
1090 GBENU_STATS_P3(tx_multicast_frames),
1091 GBENU_STATS_P3(tx_pause_frames),
1092 GBENU_STATS_P3(tx_deferred_frames),
1093 GBENU_STATS_P3(tx_collision_frames),
1094 GBENU_STATS_P3(tx_single_coll_frames),
1095 GBENU_STATS_P3(tx_mult_coll_frames),
1096 GBENU_STATS_P3(tx_excessive_collisions),
1097 GBENU_STATS_P3(tx_late_collisions),
1098 GBENU_STATS_P3(rx_ipg_error),
1099 GBENU_STATS_P3(tx_carrier_sense_errors),
1100 GBENU_STATS_P3(tx_bytes),
1101 GBENU_STATS_P3(tx_64B_frames),
1102 GBENU_STATS_P3(tx_65_to_127B_frames),
1103 GBENU_STATS_P3(tx_128_to_255B_frames),
1104 GBENU_STATS_P3(tx_256_to_511B_frames),
1105 GBENU_STATS_P3(tx_512_to_1023B_frames),
1106 GBENU_STATS_P3(tx_1024B_frames),
1107 GBENU_STATS_P3(net_bytes),
1108 GBENU_STATS_P3(rx_bottom_fifo_drop),
1109 GBENU_STATS_P3(rx_port_mask_drop),
1110 GBENU_STATS_P3(rx_top_fifo_drop),
1111 GBENU_STATS_P3(ale_rate_limit_drop),
1112 GBENU_STATS_P3(ale_vid_ingress_drop),
1113 GBENU_STATS_P3(ale_da_eq_sa_drop),
1114 GBENU_STATS_P3(ale_unknown_ucast),
1115 GBENU_STATS_P3(ale_unknown_ucast_bytes),
1116 GBENU_STATS_P3(ale_unknown_mcast),
1117 GBENU_STATS_P3(ale_unknown_mcast_bytes),
1118 GBENU_STATS_P3(ale_unknown_bcast),
1119 GBENU_STATS_P3(ale_unknown_bcast_bytes),
1120 GBENU_STATS_P3(tx_mem_protect_err),
1121 /* GBENU Module 4 */
1122 GBENU_STATS_P4(rx_good_frames),
1123 GBENU_STATS_P4(rx_broadcast_frames),
1124 GBENU_STATS_P4(rx_multicast_frames),
1125 GBENU_STATS_P4(rx_pause_frames),
1126 GBENU_STATS_P4(rx_crc_errors),
1127 GBENU_STATS_P4(rx_align_code_errors),
1128 GBENU_STATS_P4(rx_oversized_frames),
1129 GBENU_STATS_P4(rx_jabber_frames),
1130 GBENU_STATS_P4(rx_undersized_frames),
1131 GBENU_STATS_P4(rx_fragments),
1132 GBENU_STATS_P4(ale_drop),
1133 GBENU_STATS_P4(ale_overrun_drop),
1134 GBENU_STATS_P4(rx_bytes),
1135 GBENU_STATS_P4(tx_good_frames),
1136 GBENU_STATS_P4(tx_broadcast_frames),
1137 GBENU_STATS_P4(tx_multicast_frames),
1138 GBENU_STATS_P4(tx_pause_frames),
1139 GBENU_STATS_P4(tx_deferred_frames),
1140 GBENU_STATS_P4(tx_collision_frames),
1141 GBENU_STATS_P4(tx_single_coll_frames),
1142 GBENU_STATS_P4(tx_mult_coll_frames),
1143 GBENU_STATS_P4(tx_excessive_collisions),
1144 GBENU_STATS_P4(tx_late_collisions),
1145 GBENU_STATS_P4(rx_ipg_error),
1146 GBENU_STATS_P4(tx_carrier_sense_errors),
1147 GBENU_STATS_P4(tx_bytes),
1148 GBENU_STATS_P4(tx_64B_frames),
1149 GBENU_STATS_P4(tx_65_to_127B_frames),
1150 GBENU_STATS_P4(tx_128_to_255B_frames),
1151 GBENU_STATS_P4(tx_256_to_511B_frames),
1152 GBENU_STATS_P4(tx_512_to_1023B_frames),
1153 GBENU_STATS_P4(tx_1024B_frames),
1154 GBENU_STATS_P4(net_bytes),
1155 GBENU_STATS_P4(rx_bottom_fifo_drop),
1156 GBENU_STATS_P4(rx_port_mask_drop),
1157 GBENU_STATS_P4(rx_top_fifo_drop),
1158 GBENU_STATS_P4(ale_rate_limit_drop),
1159 GBENU_STATS_P4(ale_vid_ingress_drop),
1160 GBENU_STATS_P4(ale_da_eq_sa_drop),
1161 GBENU_STATS_P4(ale_unknown_ucast),
1162 GBENU_STATS_P4(ale_unknown_ucast_bytes),
1163 GBENU_STATS_P4(ale_unknown_mcast),
1164 GBENU_STATS_P4(ale_unknown_mcast_bytes),
1165 GBENU_STATS_P4(ale_unknown_bcast),
1166 GBENU_STATS_P4(ale_unknown_bcast_bytes),
1167 GBENU_STATS_P4(tx_mem_protect_err),
1168 /* GBENU Module 5 */
1169 GBENU_STATS_P5(rx_good_frames),
1170 GBENU_STATS_P5(rx_broadcast_frames),
1171 GBENU_STATS_P5(rx_multicast_frames),
1172 GBENU_STATS_P5(rx_pause_frames),
1173 GBENU_STATS_P5(rx_crc_errors),
1174 GBENU_STATS_P5(rx_align_code_errors),
1175 GBENU_STATS_P5(rx_oversized_frames),
1176 GBENU_STATS_P5(rx_jabber_frames),
1177 GBENU_STATS_P5(rx_undersized_frames),
1178 GBENU_STATS_P5(rx_fragments),
1179 GBENU_STATS_P5(ale_drop),
1180 GBENU_STATS_P5(ale_overrun_drop),
1181 GBENU_STATS_P5(rx_bytes),
1182 GBENU_STATS_P5(tx_good_frames),
1183 GBENU_STATS_P5(tx_broadcast_frames),
1184 GBENU_STATS_P5(tx_multicast_frames),
1185 GBENU_STATS_P5(tx_pause_frames),
1186 GBENU_STATS_P5(tx_deferred_frames),
1187 GBENU_STATS_P5(tx_collision_frames),
1188 GBENU_STATS_P5(tx_single_coll_frames),
1189 GBENU_STATS_P5(tx_mult_coll_frames),
1190 GBENU_STATS_P5(tx_excessive_collisions),
1191 GBENU_STATS_P5(tx_late_collisions),
1192 GBENU_STATS_P5(rx_ipg_error),
1193 GBENU_STATS_P5(tx_carrier_sense_errors),
1194 GBENU_STATS_P5(tx_bytes),
1195 GBENU_STATS_P5(tx_64B_frames),
1196 GBENU_STATS_P5(tx_65_to_127B_frames),
1197 GBENU_STATS_P5(tx_128_to_255B_frames),
1198 GBENU_STATS_P5(tx_256_to_511B_frames),
1199 GBENU_STATS_P5(tx_512_to_1023B_frames),
1200 GBENU_STATS_P5(tx_1024B_frames),
1201 GBENU_STATS_P5(net_bytes),
1202 GBENU_STATS_P5(rx_bottom_fifo_drop),
1203 GBENU_STATS_P5(rx_port_mask_drop),
1204 GBENU_STATS_P5(rx_top_fifo_drop),
1205 GBENU_STATS_P5(ale_rate_limit_drop),
1206 GBENU_STATS_P5(ale_vid_ingress_drop),
1207 GBENU_STATS_P5(ale_da_eq_sa_drop),
1208 GBENU_STATS_P5(ale_unknown_ucast),
1209 GBENU_STATS_P5(ale_unknown_ucast_bytes),
1210 GBENU_STATS_P5(ale_unknown_mcast),
1211 GBENU_STATS_P5(ale_unknown_mcast_bytes),
1212 GBENU_STATS_P5(ale_unknown_bcast),
1213 GBENU_STATS_P5(ale_unknown_bcast_bytes),
1214 GBENU_STATS_P5(tx_mem_protect_err),
1215 /* GBENU Module 6 */
1216 GBENU_STATS_P6(rx_good_frames),
1217 GBENU_STATS_P6(rx_broadcast_frames),
1218 GBENU_STATS_P6(rx_multicast_frames),
1219 GBENU_STATS_P6(rx_pause_frames),
1220 GBENU_STATS_P6(rx_crc_errors),
1221 GBENU_STATS_P6(rx_align_code_errors),
1222 GBENU_STATS_P6(rx_oversized_frames),
1223 GBENU_STATS_P6(rx_jabber_frames),
1224 GBENU_STATS_P6(rx_undersized_frames),
1225 GBENU_STATS_P6(rx_fragments),
1226 GBENU_STATS_P6(ale_drop),
1227 GBENU_STATS_P6(ale_overrun_drop),
1228 GBENU_STATS_P6(rx_bytes),
1229 GBENU_STATS_P6(tx_good_frames),
1230 GBENU_STATS_P6(tx_broadcast_frames),
1231 GBENU_STATS_P6(tx_multicast_frames),
1232 GBENU_STATS_P6(tx_pause_frames),
1233 GBENU_STATS_P6(tx_deferred_frames),
1234 GBENU_STATS_P6(tx_collision_frames),
1235 GBENU_STATS_P6(tx_single_coll_frames),
1236 GBENU_STATS_P6(tx_mult_coll_frames),
1237 GBENU_STATS_P6(tx_excessive_collisions),
1238 GBENU_STATS_P6(tx_late_collisions),
1239 GBENU_STATS_P6(rx_ipg_error),
1240 GBENU_STATS_P6(tx_carrier_sense_errors),
1241 GBENU_STATS_P6(tx_bytes),
1242 GBENU_STATS_P6(tx_64B_frames),
1243 GBENU_STATS_P6(tx_65_to_127B_frames),
1244 GBENU_STATS_P6(tx_128_to_255B_frames),
1245 GBENU_STATS_P6(tx_256_to_511B_frames),
1246 GBENU_STATS_P6(tx_512_to_1023B_frames),
1247 GBENU_STATS_P6(tx_1024B_frames),
1248 GBENU_STATS_P6(net_bytes),
1249 GBENU_STATS_P6(rx_bottom_fifo_drop),
1250 GBENU_STATS_P6(rx_port_mask_drop),
1251 GBENU_STATS_P6(rx_top_fifo_drop),
1252 GBENU_STATS_P6(ale_rate_limit_drop),
1253 GBENU_STATS_P6(ale_vid_ingress_drop),
1254 GBENU_STATS_P6(ale_da_eq_sa_drop),
1255 GBENU_STATS_P6(ale_unknown_ucast),
1256 GBENU_STATS_P6(ale_unknown_ucast_bytes),
1257 GBENU_STATS_P6(ale_unknown_mcast),
1258 GBENU_STATS_P6(ale_unknown_mcast_bytes),
1259 GBENU_STATS_P6(ale_unknown_bcast),
1260 GBENU_STATS_P6(ale_unknown_bcast_bytes),
1261 GBENU_STATS_P6(tx_mem_protect_err),
1262 /* GBENU Module 7 */
1263 GBENU_STATS_P7(rx_good_frames),
1264 GBENU_STATS_P7(rx_broadcast_frames),
1265 GBENU_STATS_P7(rx_multicast_frames),
1266 GBENU_STATS_P7(rx_pause_frames),
1267 GBENU_STATS_P7(rx_crc_errors),
1268 GBENU_STATS_P7(rx_align_code_errors),
1269 GBENU_STATS_P7(rx_oversized_frames),
1270 GBENU_STATS_P7(rx_jabber_frames),
1271 GBENU_STATS_P7(rx_undersized_frames),
1272 GBENU_STATS_P7(rx_fragments),
1273 GBENU_STATS_P7(ale_drop),
1274 GBENU_STATS_P7(ale_overrun_drop),
1275 GBENU_STATS_P7(rx_bytes),
1276 GBENU_STATS_P7(tx_good_frames),
1277 GBENU_STATS_P7(tx_broadcast_frames),
1278 GBENU_STATS_P7(tx_multicast_frames),
1279 GBENU_STATS_P7(tx_pause_frames),
1280 GBENU_STATS_P7(tx_deferred_frames),
1281 GBENU_STATS_P7(tx_collision_frames),
1282 GBENU_STATS_P7(tx_single_coll_frames),
1283 GBENU_STATS_P7(tx_mult_coll_frames),
1284 GBENU_STATS_P7(tx_excessive_collisions),
1285 GBENU_STATS_P7(tx_late_collisions),
1286 GBENU_STATS_P7(rx_ipg_error),
1287 GBENU_STATS_P7(tx_carrier_sense_errors),
1288 GBENU_STATS_P7(tx_bytes),
1289 GBENU_STATS_P7(tx_64B_frames),
1290 GBENU_STATS_P7(tx_65_to_127B_frames),
1291 GBENU_STATS_P7(tx_128_to_255B_frames),
1292 GBENU_STATS_P7(tx_256_to_511B_frames),
1293 GBENU_STATS_P7(tx_512_to_1023B_frames),
1294 GBENU_STATS_P7(tx_1024B_frames),
1295 GBENU_STATS_P7(net_bytes),
1296 GBENU_STATS_P7(rx_bottom_fifo_drop),
1297 GBENU_STATS_P7(rx_port_mask_drop),
1298 GBENU_STATS_P7(rx_top_fifo_drop),
1299 GBENU_STATS_P7(ale_rate_limit_drop),
1300 GBENU_STATS_P7(ale_vid_ingress_drop),
1301 GBENU_STATS_P7(ale_da_eq_sa_drop),
1302 GBENU_STATS_P7(ale_unknown_ucast),
1303 GBENU_STATS_P7(ale_unknown_ucast_bytes),
1304 GBENU_STATS_P7(ale_unknown_mcast),
1305 GBENU_STATS_P7(ale_unknown_mcast_bytes),
1306 GBENU_STATS_P7(ale_unknown_bcast),
1307 GBENU_STATS_P7(ale_unknown_bcast_bytes),
1308 GBENU_STATS_P7(tx_mem_protect_err),
1309 /* GBENU Module 8 */
1310 GBENU_STATS_P8(rx_good_frames),
1311 GBENU_STATS_P8(rx_broadcast_frames),
1312 GBENU_STATS_P8(rx_multicast_frames),
1313 GBENU_STATS_P8(rx_pause_frames),
1314 GBENU_STATS_P8(rx_crc_errors),
1315 GBENU_STATS_P8(rx_align_code_errors),
1316 GBENU_STATS_P8(rx_oversized_frames),
1317 GBENU_STATS_P8(rx_jabber_frames),
1318 GBENU_STATS_P8(rx_undersized_frames),
1319 GBENU_STATS_P8(rx_fragments),
1320 GBENU_STATS_P8(ale_drop),
1321 GBENU_STATS_P8(ale_overrun_drop),
1322 GBENU_STATS_P8(rx_bytes),
1323 GBENU_STATS_P8(tx_good_frames),
1324 GBENU_STATS_P8(tx_broadcast_frames),
1325 GBENU_STATS_P8(tx_multicast_frames),
1326 GBENU_STATS_P8(tx_pause_frames),
1327 GBENU_STATS_P8(tx_deferred_frames),
1328 GBENU_STATS_P8(tx_collision_frames),
1329 GBENU_STATS_P8(tx_single_coll_frames),
1330 GBENU_STATS_P8(tx_mult_coll_frames),
1331 GBENU_STATS_P8(tx_excessive_collisions),
1332 GBENU_STATS_P8(tx_late_collisions),
1333 GBENU_STATS_P8(rx_ipg_error),
1334 GBENU_STATS_P8(tx_carrier_sense_errors),
1335 GBENU_STATS_P8(tx_bytes),
1336 GBENU_STATS_P8(tx_64B_frames),
1337 GBENU_STATS_P8(tx_65_to_127B_frames),
1338 GBENU_STATS_P8(tx_128_to_255B_frames),
1339 GBENU_STATS_P8(tx_256_to_511B_frames),
1340 GBENU_STATS_P8(tx_512_to_1023B_frames),
1341 GBENU_STATS_P8(tx_1024B_frames),
1342 GBENU_STATS_P8(net_bytes),
1343 GBENU_STATS_P8(rx_bottom_fifo_drop),
1344 GBENU_STATS_P8(rx_port_mask_drop),
1345 GBENU_STATS_P8(rx_top_fifo_drop),
1346 GBENU_STATS_P8(ale_rate_limit_drop),
1347 GBENU_STATS_P8(ale_vid_ingress_drop),
1348 GBENU_STATS_P8(ale_da_eq_sa_drop),
1349 GBENU_STATS_P8(ale_unknown_ucast),
1350 GBENU_STATS_P8(ale_unknown_ucast_bytes),
1351 GBENU_STATS_P8(ale_unknown_mcast),
1352 GBENU_STATS_P8(ale_unknown_mcast_bytes),
1353 GBENU_STATS_P8(ale_unknown_bcast),
1354 GBENU_STATS_P8(ale_unknown_bcast_bytes),
1355 GBENU_STATS_P8(tx_mem_protect_err),
1356};
1357
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001358#define XGBE_STATS0_INFO(field) \
1359{ \
1360 "GBE_0:"#field, XGBE_STATS0_MODULE, \
1361 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
1362 offsetof(struct xgbe_hw_stats, field) \
1363}
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001364
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001365#define XGBE_STATS1_INFO(field) \
1366{ \
1367 "GBE_1:"#field, XGBE_STATS1_MODULE, \
1368 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
1369 offsetof(struct xgbe_hw_stats, field) \
1370}
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001371
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001372#define XGBE_STATS2_INFO(field) \
1373{ \
1374 "GBE_2:"#field, XGBE_STATS2_MODULE, \
1375 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
1376 offsetof(struct xgbe_hw_stats, field) \
1377}
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001378
1379static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1380 /* GBE module 0 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001381 XGBE_STATS0_INFO(rx_good_frames),
1382 XGBE_STATS0_INFO(rx_broadcast_frames),
1383 XGBE_STATS0_INFO(rx_multicast_frames),
1384 XGBE_STATS0_INFO(rx_oversized_frames),
1385 XGBE_STATS0_INFO(rx_undersized_frames),
1386 XGBE_STATS0_INFO(overrun_type4),
1387 XGBE_STATS0_INFO(overrun_type5),
1388 XGBE_STATS0_INFO(rx_bytes),
1389 XGBE_STATS0_INFO(tx_good_frames),
1390 XGBE_STATS0_INFO(tx_broadcast_frames),
1391 XGBE_STATS0_INFO(tx_multicast_frames),
1392 XGBE_STATS0_INFO(tx_bytes),
1393 XGBE_STATS0_INFO(tx_64byte_frames),
1394 XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1395 XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1396 XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1397 XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1398 XGBE_STATS0_INFO(tx_1024byte_frames),
1399 XGBE_STATS0_INFO(net_bytes),
1400 XGBE_STATS0_INFO(rx_sof_overruns),
1401 XGBE_STATS0_INFO(rx_mof_overruns),
1402 XGBE_STATS0_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001403 /* XGBE module 1 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001404 XGBE_STATS1_INFO(rx_good_frames),
1405 XGBE_STATS1_INFO(rx_broadcast_frames),
1406 XGBE_STATS1_INFO(rx_multicast_frames),
1407 XGBE_STATS1_INFO(rx_pause_frames),
1408 XGBE_STATS1_INFO(rx_crc_errors),
1409 XGBE_STATS1_INFO(rx_align_code_errors),
1410 XGBE_STATS1_INFO(rx_oversized_frames),
1411 XGBE_STATS1_INFO(rx_jabber_frames),
1412 XGBE_STATS1_INFO(rx_undersized_frames),
1413 XGBE_STATS1_INFO(rx_fragments),
1414 XGBE_STATS1_INFO(overrun_type4),
1415 XGBE_STATS1_INFO(overrun_type5),
1416 XGBE_STATS1_INFO(rx_bytes),
1417 XGBE_STATS1_INFO(tx_good_frames),
1418 XGBE_STATS1_INFO(tx_broadcast_frames),
1419 XGBE_STATS1_INFO(tx_multicast_frames),
1420 XGBE_STATS1_INFO(tx_pause_frames),
1421 XGBE_STATS1_INFO(tx_deferred_frames),
1422 XGBE_STATS1_INFO(tx_collision_frames),
1423 XGBE_STATS1_INFO(tx_single_coll_frames),
1424 XGBE_STATS1_INFO(tx_mult_coll_frames),
1425 XGBE_STATS1_INFO(tx_excessive_collisions),
1426 XGBE_STATS1_INFO(tx_late_collisions),
1427 XGBE_STATS1_INFO(tx_underrun),
1428 XGBE_STATS1_INFO(tx_carrier_sense_errors),
1429 XGBE_STATS1_INFO(tx_bytes),
1430 XGBE_STATS1_INFO(tx_64byte_frames),
1431 XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1432 XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1433 XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1434 XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1435 XGBE_STATS1_INFO(tx_1024byte_frames),
1436 XGBE_STATS1_INFO(net_bytes),
1437 XGBE_STATS1_INFO(rx_sof_overruns),
1438 XGBE_STATS1_INFO(rx_mof_overruns),
1439 XGBE_STATS1_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001440 /* XGBE module 2 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001441 XGBE_STATS2_INFO(rx_good_frames),
1442 XGBE_STATS2_INFO(rx_broadcast_frames),
1443 XGBE_STATS2_INFO(rx_multicast_frames),
1444 XGBE_STATS2_INFO(rx_pause_frames),
1445 XGBE_STATS2_INFO(rx_crc_errors),
1446 XGBE_STATS2_INFO(rx_align_code_errors),
1447 XGBE_STATS2_INFO(rx_oversized_frames),
1448 XGBE_STATS2_INFO(rx_jabber_frames),
1449 XGBE_STATS2_INFO(rx_undersized_frames),
1450 XGBE_STATS2_INFO(rx_fragments),
1451 XGBE_STATS2_INFO(overrun_type4),
1452 XGBE_STATS2_INFO(overrun_type5),
1453 XGBE_STATS2_INFO(rx_bytes),
1454 XGBE_STATS2_INFO(tx_good_frames),
1455 XGBE_STATS2_INFO(tx_broadcast_frames),
1456 XGBE_STATS2_INFO(tx_multicast_frames),
1457 XGBE_STATS2_INFO(tx_pause_frames),
1458 XGBE_STATS2_INFO(tx_deferred_frames),
1459 XGBE_STATS2_INFO(tx_collision_frames),
1460 XGBE_STATS2_INFO(tx_single_coll_frames),
1461 XGBE_STATS2_INFO(tx_mult_coll_frames),
1462 XGBE_STATS2_INFO(tx_excessive_collisions),
1463 XGBE_STATS2_INFO(tx_late_collisions),
1464 XGBE_STATS2_INFO(tx_underrun),
1465 XGBE_STATS2_INFO(tx_carrier_sense_errors),
1466 XGBE_STATS2_INFO(tx_bytes),
1467 XGBE_STATS2_INFO(tx_64byte_frames),
1468 XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1469 XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1470 XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1471 XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1472 XGBE_STATS2_INFO(tx_1024byte_frames),
1473 XGBE_STATS2_INFO(net_bytes),
1474 XGBE_STATS2_INFO(rx_sof_overruns),
1475 XGBE_STATS2_INFO(rx_mof_overruns),
1476 XGBE_STATS2_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001477};
1478
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001479#define for_each_intf(i, priv) \
1480 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1481
1482#define for_each_sec_slave(slave, priv) \
1483 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1484
1485#define first_sec_slave(priv) \
1486 list_first_entry(&priv->secondary_slaves, \
1487 struct gbe_slave, slave_list)
1488
1489static void keystone_get_drvinfo(struct net_device *ndev,
1490 struct ethtool_drvinfo *info)
1491{
1492 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1493 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1494}
1495
1496static u32 keystone_get_msglevel(struct net_device *ndev)
1497{
1498 struct netcp_intf *netcp = netdev_priv(ndev);
1499
1500 return netcp->msg_enable;
1501}
1502
1503static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1504{
1505 struct netcp_intf *netcp = netdev_priv(ndev);
1506
1507 netcp->msg_enable = value;
1508}
1509
1510static void keystone_get_stat_strings(struct net_device *ndev,
1511 uint32_t stringset, uint8_t *data)
1512{
1513 struct netcp_intf *netcp = netdev_priv(ndev);
1514 struct gbe_intf *gbe_intf;
1515 struct gbe_priv *gbe_dev;
1516 int i;
1517
1518 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1519 if (!gbe_intf)
1520 return;
1521 gbe_dev = gbe_intf->gbe_dev;
1522
1523 switch (stringset) {
1524 case ETH_SS_STATS:
1525 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1526 memcpy(data, gbe_dev->et_stats[i].desc,
1527 ETH_GSTRING_LEN);
1528 data += ETH_GSTRING_LEN;
1529 }
1530 break;
1531 case ETH_SS_TEST:
1532 break;
1533 }
1534}
1535
1536static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1537{
1538 struct netcp_intf *netcp = netdev_priv(ndev);
1539 struct gbe_intf *gbe_intf;
1540 struct gbe_priv *gbe_dev;
1541
1542 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1543 if (!gbe_intf)
1544 return -EINVAL;
1545 gbe_dev = gbe_intf->gbe_dev;
1546
1547 switch (stringset) {
1548 case ETH_SS_TEST:
1549 return 0;
1550 case ETH_SS_STATS:
1551 return gbe_dev->num_et_stats;
1552 default:
1553 return -EINVAL;
1554 }
1555}
1556
1557static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1558{
1559 void __iomem *base = NULL;
1560 u32 __iomem *p;
1561 u32 tmp = 0;
1562 int i;
1563
1564 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1565 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
1566 p = base + gbe_dev->et_stats[i].offset;
1567 tmp = readl(p);
1568 gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
1569 if (data)
1570 data[i] = gbe_dev->hw_stats[i];
1571 /* write-to-decrement:
1572 * new register value = old register value - write value
1573 */
1574 writel(tmp, p);
1575 }
1576}
1577
1578static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1579{
1580 void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
1581 void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
1582 u64 *hw_stats = &gbe_dev->hw_stats[0];
1583 void __iomem *base = NULL;
1584 u32 __iomem *p;
1585 u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
1586 int i, j, pair;
1587
1588 for (pair = 0; pair < 2; pair++) {
1589 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1590
1591 if (pair == 0)
1592 val &= ~GBE_STATS_CD_SEL;
1593 else
1594 val |= GBE_STATS_CD_SEL;
1595
1596 /* make the stat modules visible */
1597 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1598
1599 for (i = 0; i < pair_size; i++) {
1600 j = pair * pair_size + i;
1601 switch (gbe_dev->et_stats[j].type) {
1602 case GBE_STATSA_MODULE:
1603 case GBE_STATSC_MODULE:
1604 base = gbe_statsa;
1605 break;
1606 case GBE_STATSB_MODULE:
1607 case GBE_STATSD_MODULE:
1608 base = gbe_statsb;
1609 break;
1610 }
1611
1612 p = base + gbe_dev->et_stats[j].offset;
1613 tmp = readl(p);
1614 hw_stats[j] += tmp;
1615 if (data)
1616 data[j] = hw_stats[j];
1617 /* write-to-decrement:
1618 * new register value = old register value - write value
1619 */
1620 writel(tmp, p);
1621 }
1622 }
1623}
1624
1625static void keystone_get_ethtool_stats(struct net_device *ndev,
1626 struct ethtool_stats *stats,
1627 uint64_t *data)
1628{
1629 struct netcp_intf *netcp = netdev_priv(ndev);
1630 struct gbe_intf *gbe_intf;
1631 struct gbe_priv *gbe_dev;
1632
1633 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1634 if (!gbe_intf)
1635 return;
1636
1637 gbe_dev = gbe_intf->gbe_dev;
1638 spin_lock_bh(&gbe_dev->hw_stats_lock);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001639 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1640 gbe_update_stats_ver14(gbe_dev, data);
1641 else
1642 gbe_update_stats(gbe_dev, data);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001643 spin_unlock_bh(&gbe_dev->hw_stats_lock);
1644}
1645
1646static int keystone_get_settings(struct net_device *ndev,
1647 struct ethtool_cmd *cmd)
1648{
1649 struct netcp_intf *netcp = netdev_priv(ndev);
1650 struct phy_device *phy = ndev->phydev;
1651 struct gbe_intf *gbe_intf;
1652 int ret;
1653
1654 if (!phy)
1655 return -EINVAL;
1656
1657 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1658 if (!gbe_intf)
1659 return -EINVAL;
1660
1661 if (!gbe_intf->slave)
1662 return -EINVAL;
1663
1664 ret = phy_ethtool_gset(phy, cmd);
1665 if (!ret)
1666 cmd->port = gbe_intf->slave->phy_port_t;
1667
1668 return ret;
1669}
1670
1671static int keystone_set_settings(struct net_device *ndev,
1672 struct ethtool_cmd *cmd)
1673{
1674 struct netcp_intf *netcp = netdev_priv(ndev);
1675 struct phy_device *phy = ndev->phydev;
1676 struct gbe_intf *gbe_intf;
1677 u32 features = cmd->advertising & cmd->supported;
1678
1679 if (!phy)
1680 return -EINVAL;
1681
1682 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1683 if (!gbe_intf)
1684 return -EINVAL;
1685
1686 if (!gbe_intf->slave)
1687 return -EINVAL;
1688
1689 if (cmd->port != gbe_intf->slave->phy_port_t) {
1690 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
1691 return -EINVAL;
1692
1693 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
1694 return -EINVAL;
1695
1696 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
1697 return -EINVAL;
1698
1699 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
1700 return -EINVAL;
1701
1702 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1703 return -EINVAL;
1704 }
1705
1706 gbe_intf->slave->phy_port_t = cmd->port;
1707 return phy_ethtool_sset(phy, cmd);
1708}
1709
1710static const struct ethtool_ops keystone_ethtool_ops = {
1711 .get_drvinfo = keystone_get_drvinfo,
1712 .get_link = ethtool_op_get_link,
1713 .get_msglevel = keystone_get_msglevel,
1714 .set_msglevel = keystone_set_msglevel,
1715 .get_strings = keystone_get_stat_strings,
1716 .get_sset_count = keystone_get_sset_count,
1717 .get_ethtool_stats = keystone_get_ethtool_stats,
1718 .get_settings = keystone_get_settings,
1719 .set_settings = keystone_set_settings,
1720};
1721
1722#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
1723 ((mac)[2] << 16) | ((mac)[3] << 24))
1724#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
1725
1726static void gbe_set_slave_mac(struct gbe_slave *slave,
1727 struct gbe_intf *gbe_intf)
1728{
1729 struct net_device *ndev = gbe_intf->ndev;
1730
1731 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1732 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1733}
1734
1735static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1736{
1737 if (priv->host_port == 0)
1738 return slave_num + 1;
1739
1740 return slave_num;
1741}
1742
1743static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1744 struct net_device *ndev,
1745 struct gbe_slave *slave,
1746 int up)
1747{
1748 struct phy_device *phy = slave->phy;
1749 u32 mac_control = 0;
1750
1751 if (up) {
1752 mac_control = slave->mac_control;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001753 if (phy && (phy->speed == SPEED_1000)) {
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001754 mac_control |= MACSL_GIG_MODE;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001755 mac_control &= ~MACSL_XGIG_MODE;
1756 } else if (phy && (phy->speed == SPEED_10000)) {
1757 mac_control |= MACSL_XGIG_MODE;
1758 mac_control &= ~MACSL_GIG_MODE;
1759 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001760
1761 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1762 mac_control));
1763
1764 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1765 ALE_PORT_STATE,
1766 ALE_PORT_STATE_FORWARD);
1767
1768 if (ndev && slave->open)
1769 netif_carrier_on(ndev);
1770 } else {
1771 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1772 mac_control));
1773 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1774 ALE_PORT_STATE,
1775 ALE_PORT_STATE_DISABLE);
1776 if (ndev)
1777 netif_carrier_off(ndev);
1778 }
1779
1780 if (phy)
1781 phy_print_status(phy);
1782}
1783
1784static bool gbe_phy_link_status(struct gbe_slave *slave)
1785{
1786 return !slave->phy || slave->phy->link;
1787}
1788
1789static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1790 struct gbe_slave *slave,
1791 struct net_device *ndev)
1792{
1793 int sp = slave->slave_num;
1794 int phy_link_state, sgmii_link_state = 1, link_state;
1795
1796 if (!slave->open)
1797 return;
1798
WingMan Kwok9a391c72015-03-20 16:11:25 -04001799 if (!SLAVE_LINK_IS_XGMII(slave)) {
1800 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1801 sgmii_link_state =
1802 netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
1803 else
1804 sgmii_link_state =
1805 netcp_sgmii_get_port_link(
1806 gbe_dev->sgmii_port_regs, sp);
1807 }
1808
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001809 phy_link_state = gbe_phy_link_status(slave);
1810 link_state = phy_link_state & sgmii_link_state;
1811
1812 if (atomic_xchg(&slave->link_state, link_state) != link_state)
1813 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1814 link_state);
1815}
1816
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001817static void xgbe_adjust_link(struct net_device *ndev)
1818{
1819 struct netcp_intf *netcp = netdev_priv(ndev);
1820 struct gbe_intf *gbe_intf;
1821
1822 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1823 if (!gbe_intf)
1824 return;
1825
1826 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1827 ndev);
1828}
1829
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001830static void gbe_adjust_link(struct net_device *ndev)
1831{
1832 struct netcp_intf *netcp = netdev_priv(ndev);
1833 struct gbe_intf *gbe_intf;
1834
1835 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1836 if (!gbe_intf)
1837 return;
1838
1839 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1840 ndev);
1841}
1842
1843static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1844{
1845 struct gbe_priv *gbe_dev = netdev_priv(ndev);
1846 struct gbe_slave *slave;
1847
1848 for_each_sec_slave(slave, gbe_dev)
1849 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1850}
1851
1852/* Reset EMAC
1853 * Soft reset is set and polled until clear, or until a timeout occurs
1854 */
1855static int gbe_port_reset(struct gbe_slave *slave)
1856{
1857 u32 i, v;
1858
1859 /* Set the soft reset bit */
1860 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1861
1862 /* Wait for the bit to clear */
1863 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1864 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1865 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1866 return 0;
1867 }
1868
1869 /* Timeout on the reset */
1870 return GMACSL_RET_WARN_RESET_INCOMPLETE;
1871}
1872
1873/* Configure EMAC */
1874static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1875 int max_rx_len)
1876{
WingMan Kwok9a391c72015-03-20 16:11:25 -04001877 void __iomem *rx_maxlen_reg;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001878 u32 xgmii_mode;
1879
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001880 if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1881 max_rx_len = NETCP_MAX_FRAME_SIZE;
1882
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001883 /* Enable correct MII mode at SS level */
1884 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1885 (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1886 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1887 xgmii_mode |= (1 << slave->slave_num);
1888 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1889 }
1890
WingMan Kwok9a391c72015-03-20 16:11:25 -04001891 if (IS_SS_ID_MU(gbe_dev))
1892 rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
1893 else
1894 rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
1895
1896 writel(max_rx_len, rx_maxlen_reg);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001897 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1898}
1899
1900static void gbe_slave_stop(struct gbe_intf *intf)
1901{
1902 struct gbe_priv *gbe_dev = intf->gbe_dev;
1903 struct gbe_slave *slave = intf->slave;
1904
1905 gbe_port_reset(slave);
1906 /* Disable forwarding */
1907 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1908 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1909 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1910 1 << slave->port_num, 0, 0);
1911
1912 if (!slave->phy)
1913 return;
1914
1915 phy_stop(slave->phy);
1916 phy_disconnect(slave->phy);
1917 slave->phy = NULL;
1918}
1919
1920static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1921{
1922 void __iomem *sgmii_port_regs;
1923
1924 sgmii_port_regs = priv->sgmii_port_regs;
1925 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1926 sgmii_port_regs = priv->sgmii_port34_regs;
1927
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001928 if (!SLAVE_LINK_IS_XGMII(slave)) {
1929 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1930 netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1931 slave->link_interface);
1932 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001933}
1934
1935static int gbe_slave_open(struct gbe_intf *gbe_intf)
1936{
1937 struct gbe_priv *priv = gbe_intf->gbe_dev;
1938 struct gbe_slave *slave = gbe_intf->slave;
1939 phy_interface_t phy_mode;
1940 bool has_phy = false;
1941
1942 void (*hndlr)(struct net_device *) = gbe_adjust_link;
1943
1944 gbe_sgmii_config(priv, slave);
1945 gbe_port_reset(slave);
1946 gbe_port_config(priv, slave, priv->rx_packet_max);
1947 gbe_set_slave_mac(slave, gbe_intf);
1948 /* enable forwarding */
1949 cpsw_ale_control_set(priv->ale, slave->port_num,
1950 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1951 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1952 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1953
1954 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1955 has_phy = true;
1956 phy_mode = PHY_INTERFACE_MODE_SGMII;
1957 slave->phy_port_t = PORT_MII;
1958 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1959 has_phy = true;
1960 phy_mode = PHY_INTERFACE_MODE_NA;
1961 slave->phy_port_t = PORT_FIBRE;
1962 }
1963
1964 if (has_phy) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001965 if (priv->ss_version == XGBE_SS_VERSION_10)
1966 hndlr = xgbe_adjust_link;
1967
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001968 slave->phy = of_phy_connect(gbe_intf->ndev,
1969 slave->phy_node,
1970 hndlr, 0,
1971 phy_mode);
1972 if (!slave->phy) {
1973 dev_err(priv->dev, "phy not found on slave %d\n",
1974 slave->slave_num);
1975 return -ENODEV;
1976 }
1977 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
1978 dev_name(&slave->phy->dev));
1979 phy_start(slave->phy);
1980 phy_read_status(slave->phy);
1981 }
1982 return 0;
1983}
1984
1985static void gbe_init_host_port(struct gbe_priv *priv)
1986{
1987 int bypass_en = 1;
WingMan Kwok9a391c72015-03-20 16:11:25 -04001988
1989 /* Host Tx Pri */
1990 if (IS_SS_ID_NU(priv))
1991 writel(HOST_TX_PRI_MAP_DEFAULT,
1992 GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
1993
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001994 /* Max length register */
1995 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
1996 rx_maxlen));
1997
1998 cpsw_ale_start(priv->ale);
1999
2000 if (priv->enable_ale)
2001 bypass_en = 0;
2002
2003 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2004
2005 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2006
2007 cpsw_ale_control_set(priv->ale, priv->host_port,
2008 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2009
2010 cpsw_ale_control_set(priv->ale, 0,
2011 ALE_PORT_UNKNOWN_VLAN_MEMBER,
2012 GBE_PORT_MASK(priv->ale_ports));
2013
2014 cpsw_ale_control_set(priv->ale, 0,
2015 ALE_PORT_UNKNOWN_MCAST_FLOOD,
2016 GBE_PORT_MASK(priv->ale_ports - 1));
2017
2018 cpsw_ale_control_set(priv->ale, 0,
2019 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2020 GBE_PORT_MASK(priv->ale_ports));
2021
2022 cpsw_ale_control_set(priv->ale, 0,
2023 ALE_PORT_UNTAGGED_EGRESS,
2024 GBE_PORT_MASK(priv->ale_ports));
2025}
2026
2027static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2028{
2029 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2030 u16 vlan_id;
2031
2032 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2033 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2034 ALE_MCAST_FWD_2);
2035 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2036 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2037 GBE_PORT_MASK(gbe_dev->ale_ports),
2038 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2039 }
2040}
2041
2042static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2043{
2044 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2045 u16 vlan_id;
2046
2047 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2048
2049 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2050 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2051 ALE_VLAN, vlan_id);
2052}
2053
2054static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2055{
2056 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2057 u16 vlan_id;
2058
2059 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2060
2061 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2062 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2063 }
2064}
2065
2066static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2067{
2068 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2069 u16 vlan_id;
2070
2071 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2072
2073 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2074 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2075 ALE_VLAN, vlan_id);
2076 }
2077}
2078
2079static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2080{
2081 struct gbe_intf *gbe_intf = intf_priv;
2082 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2083
2084 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2085 naddr->addr, naddr->type);
2086
2087 switch (naddr->type) {
2088 case ADDR_MCAST:
2089 case ADDR_BCAST:
2090 gbe_add_mcast_addr(gbe_intf, naddr->addr);
2091 break;
2092 case ADDR_UCAST:
2093 case ADDR_DEV:
2094 gbe_add_ucast_addr(gbe_intf, naddr->addr);
2095 break;
2096 case ADDR_ANY:
2097 /* nothing to do for promiscuous */
2098 default:
2099 break;
2100 }
2101
2102 return 0;
2103}
2104
2105static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2106{
2107 struct gbe_intf *gbe_intf = intf_priv;
2108 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2109
2110 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2111 naddr->addr, naddr->type);
2112
2113 switch (naddr->type) {
2114 case ADDR_MCAST:
2115 case ADDR_BCAST:
2116 gbe_del_mcast_addr(gbe_intf, naddr->addr);
2117 break;
2118 case ADDR_UCAST:
2119 case ADDR_DEV:
2120 gbe_del_ucast_addr(gbe_intf, naddr->addr);
2121 break;
2122 case ADDR_ANY:
2123 /* nothing to do for promiscuous */
2124 default:
2125 break;
2126 }
2127
2128 return 0;
2129}
2130
2131static int gbe_add_vid(void *intf_priv, int vid)
2132{
2133 struct gbe_intf *gbe_intf = intf_priv;
2134 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2135
2136 set_bit(vid, gbe_intf->active_vlans);
2137
2138 cpsw_ale_add_vlan(gbe_dev->ale, vid,
2139 GBE_PORT_MASK(gbe_dev->ale_ports),
2140 GBE_MASK_NO_PORTS,
2141 GBE_PORT_MASK(gbe_dev->ale_ports),
2142 GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2143
2144 return 0;
2145}
2146
2147static int gbe_del_vid(void *intf_priv, int vid)
2148{
2149 struct gbe_intf *gbe_intf = intf_priv;
2150 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2151
2152 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2153 clear_bit(vid, gbe_intf->active_vlans);
2154 return 0;
2155}
2156
2157static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2158{
2159 struct gbe_intf *gbe_intf = intf_priv;
2160 struct phy_device *phy = gbe_intf->slave->phy;
2161 int ret = -EOPNOTSUPP;
2162
2163 if (phy)
2164 ret = phy_mii_ioctl(phy, req, cmd);
2165
2166 return ret;
2167}
2168
2169static void netcp_ethss_timer(unsigned long arg)
2170{
2171 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2172 struct gbe_intf *gbe_intf;
2173 struct gbe_slave *slave;
2174
2175 /* Check & update SGMII link state of interfaces */
2176 for_each_intf(gbe_intf, gbe_dev) {
2177 if (!gbe_intf->slave->open)
2178 continue;
2179 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2180 gbe_intf->ndev);
2181 }
2182
2183 /* Check & update SGMII link state of secondary ports */
2184 for_each_sec_slave(slave, gbe_dev) {
2185 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2186 }
2187
2188 spin_lock_bh(&gbe_dev->hw_stats_lock);
2189
2190 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2191 gbe_update_stats_ver14(gbe_dev, NULL);
2192 else
2193 gbe_update_stats(gbe_dev, NULL);
2194
2195 spin_unlock_bh(&gbe_dev->hw_stats_lock);
2196
2197 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2198 add_timer(&gbe_dev->timer);
2199}
2200
2201static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
2202{
2203 struct gbe_intf *gbe_intf = data;
2204
2205 p_info->tx_pipe = &gbe_intf->tx_pipe;
2206 return 0;
2207}
2208
2209static int gbe_open(void *intf_priv, struct net_device *ndev)
2210{
2211 struct gbe_intf *gbe_intf = intf_priv;
2212 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2213 struct netcp_intf *netcp = netdev_priv(ndev);
2214 struct gbe_slave *slave = gbe_intf->slave;
2215 int port_num = slave->port_num;
2216 u32 reg;
2217 int ret;
2218
2219 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2220 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2221 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2222 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2223
WingMan Kwok9a391c72015-03-20 16:11:25 -04002224 /* For 10G and on NetCP 1.5, use directed to port */
2225 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04002226 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002227
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04002228 if (gbe_dev->enable_ale)
2229 gbe_intf->tx_pipe.switch_to_port = 0;
2230 else
2231 gbe_intf->tx_pipe.switch_to_port = port_num;
2232
2233 dev_dbg(gbe_dev->dev,
2234 "opened TX channel %s: %p with to port %d, flags %d\n",
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002235 gbe_intf->tx_pipe.dma_chan_name,
2236 gbe_intf->tx_pipe.dma_channel,
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04002237 gbe_intf->tx_pipe.switch_to_port,
2238 gbe_intf->tx_pipe.flags);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002239
2240 gbe_slave_stop(gbe_intf);
2241
2242 /* disable priority elevation and enable statistics on all ports */
2243 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2244
2245 /* Control register */
2246 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2247
2248 /* All statistics enabled and STAT AB visible by default */
WingMan Kwok9a391c72015-03-20 16:11:25 -04002249 writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2250 stat_port_en));
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002251
2252 ret = gbe_slave_open(gbe_intf);
2253 if (ret)
2254 goto fail;
2255
2256 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2257 gbe_intf);
2258
2259 slave->open = true;
2260 netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2261 return 0;
2262
2263fail:
2264 gbe_slave_stop(gbe_intf);
2265 return ret;
2266}
2267
2268static int gbe_close(void *intf_priv, struct net_device *ndev)
2269{
2270 struct gbe_intf *gbe_intf = intf_priv;
2271 struct netcp_intf *netcp = netdev_priv(ndev);
2272
2273 gbe_slave_stop(gbe_intf);
2274 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2275 gbe_intf);
2276
2277 gbe_intf->slave->open = false;
2278 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2279 return 0;
2280}
2281
2282static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2283 struct device_node *node)
2284{
2285 int port_reg_num;
2286 u32 port_reg_ofs, emac_reg_ofs;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002287 u32 port_reg_blk_sz, emac_reg_blk_sz;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002288
2289 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2290 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2291 return -EINVAL;
2292 }
2293
2294 if (of_property_read_u32(node, "link-interface",
2295 &slave->link_interface)) {
2296 dev_warn(gbe_dev->dev,
2297 "missing link-interface value defaulting to 1G mac-phy link\n");
2298 slave->link_interface = SGMII_LINK_MAC_PHY;
2299 }
2300
2301 slave->open = false;
2302 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
2303 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2304
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002305 if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2306 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2307 else
2308 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002309
2310 /* Emac regs memmap are contiguous but port regs are not */
2311 port_reg_num = slave->slave_num;
2312 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2313 if (slave->slave_num > 1) {
2314 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2315 port_reg_num -= 2;
2316 } else {
2317 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2318 }
WingMan Kwok9a391c72015-03-20 16:11:25 -04002319 emac_reg_ofs = GBE13_EMAC_OFFSET;
2320 port_reg_blk_sz = 0x30;
2321 emac_reg_blk_sz = 0x40;
2322 } else if (IS_SS_ID_MU(gbe_dev)) {
2323 port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2324 emac_reg_ofs = GBENU_EMAC_OFFSET;
2325 port_reg_blk_sz = 0x1000;
2326 emac_reg_blk_sz = 0x1000;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002327 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2328 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002329 emac_reg_ofs = XGBE10_EMAC_OFFSET;
2330 port_reg_blk_sz = 0x30;
2331 emac_reg_blk_sz = 0x40;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002332 } else {
2333 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2334 gbe_dev->ss_version);
2335 return -EINVAL;
2336 }
2337
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002338 slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
WingMan Kwok9a391c72015-03-20 16:11:25 -04002339 (port_reg_blk_sz * port_reg_num);
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002340 slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
WingMan Kwok9a391c72015-03-20 16:11:25 -04002341 (emac_reg_blk_sz * slave->slave_num);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002342
2343 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2344 /* Initialize slave port register offsets */
2345 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2346 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2347 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2348 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2349 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2350 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2351 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2352 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2353 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2354
2355 /* Initialize EMAC register offsets */
2356 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2357 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2358 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2359
WingMan Kwok9a391c72015-03-20 16:11:25 -04002360 } else if (IS_SS_ID_MU(gbe_dev)) {
2361 /* Initialize slave port register offsets */
2362 GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2363 GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
2364 GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2365 GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2366 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2367 GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2368 GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
2369 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2370 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
2371 GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
2372
2373 /* Initialize EMAC register offsets */
2374 GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
2375 GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
2376
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002377 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2378 /* Initialize slave port register offsets */
2379 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
2380 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2381 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
2382 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
2383 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2384 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2385 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2386 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2387 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2388
2389 /* Initialize EMAC register offsets */
2390 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
2391 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2392 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002393 }
2394
2395 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
2396 return 0;
2397}
2398
2399static void init_secondary_ports(struct gbe_priv *gbe_dev,
2400 struct device_node *node)
2401{
2402 struct device *dev = gbe_dev->dev;
2403 phy_interface_t phy_mode;
2404 struct gbe_priv **priv;
2405 struct device_node *port;
2406 struct gbe_slave *slave;
2407 bool mac_phy_link = false;
2408
2409 for_each_child_of_node(node, port) {
2410 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
2411 if (!slave) {
2412 dev_err(dev,
2413 "memomry alloc failed for secondary port(%s), skipping...\n",
2414 port->name);
2415 continue;
2416 }
2417
2418 if (init_slave(gbe_dev, slave, port)) {
2419 dev_err(dev,
2420 "Failed to initialize secondary port(%s), skipping...\n",
2421 port->name);
2422 devm_kfree(dev, slave);
2423 continue;
2424 }
2425
2426 gbe_sgmii_config(gbe_dev, slave);
2427 gbe_port_reset(slave);
2428 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
2429 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
2430 gbe_dev->num_slaves++;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002431 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2432 (slave->link_interface == XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002433 mac_phy_link = true;
2434
2435 slave->open = true;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002436 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2437 break;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002438 }
2439
2440 /* of_phy_connect() is needed only for MAC-PHY interface */
2441 if (!mac_phy_link)
2442 return;
2443
2444 /* Allocate dummy netdev device for attaching to phy device */
2445 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
2446 NET_NAME_UNKNOWN, ether_setup);
2447 if (!gbe_dev->dummy_ndev) {
2448 dev_err(dev,
2449 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
2450 return;
2451 }
2452 priv = netdev_priv(gbe_dev->dummy_ndev);
2453 *priv = gbe_dev;
2454
2455 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2456 phy_mode = PHY_INTERFACE_MODE_SGMII;
2457 slave->phy_port_t = PORT_MII;
2458 } else {
2459 phy_mode = PHY_INTERFACE_MODE_NA;
2460 slave->phy_port_t = PORT_FIBRE;
2461 }
2462
2463 for_each_sec_slave(slave, gbe_dev) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002464 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2465 (slave->link_interface != XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002466 continue;
2467 slave->phy =
2468 of_phy_connect(gbe_dev->dummy_ndev,
2469 slave->phy_node,
2470 gbe_adjust_link_sec_slaves,
2471 0, phy_mode);
2472 if (!slave->phy) {
2473 dev_err(dev, "phy not found for slave %d\n",
2474 slave->slave_num);
2475 slave->phy = NULL;
2476 } else {
2477 dev_dbg(dev, "phy found: id is: 0x%s\n",
2478 dev_name(&slave->phy->dev));
2479 phy_start(slave->phy);
2480 phy_read_status(slave->phy);
2481 }
2482 }
2483}
2484
2485static void free_secondary_ports(struct gbe_priv *gbe_dev)
2486{
2487 struct gbe_slave *slave;
2488
2489 for (;;) {
2490 slave = first_sec_slave(gbe_dev);
2491 if (!slave)
2492 break;
2493 if (slave->phy)
2494 phy_disconnect(slave->phy);
2495 list_del(&slave->slave_list);
2496 }
2497 if (gbe_dev->dummy_ndev)
2498 free_netdev(gbe_dev->dummy_ndev);
2499}
2500
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002501static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
2502 struct device_node *node)
2503{
2504 struct resource res;
2505 void __iomem *regs;
2506 int ret, i;
2507
WingMan Kwok9a391c72015-03-20 16:11:25 -04002508 ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002509 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002510 dev_err(gbe_dev->dev,
2511 "Can't xlate xgbe of node(%s) ss address at %d\n",
2512 node->name, XGBE_SS_REG_INDEX);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002513 return ret;
2514 }
2515
2516 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2517 if (IS_ERR(regs)) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002518 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002519 return PTR_ERR(regs);
2520 }
2521 gbe_dev->ss_regs = regs;
2522
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002523 ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
2524 if (ret) {
2525 dev_err(gbe_dev->dev,
2526 "Can't xlate xgbe of node(%s) sm address at %d\n",
2527 node->name, XGBE_SM_REG_INDEX);
2528 return ret;
2529 }
2530
2531 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2532 if (IS_ERR(regs)) {
2533 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
2534 return PTR_ERR(regs);
2535 }
2536 gbe_dev->switch_regs = regs;
2537
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002538 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
2539 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002540 dev_err(gbe_dev->dev,
2541 "Can't xlate xgbe serdes of node(%s) address at %d\n",
2542 node->name, XGBE_SERDES_REG_INDEX);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002543 return ret;
2544 }
2545
2546 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2547 if (IS_ERR(regs)) {
2548 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
2549 return PTR_ERR(regs);
2550 }
2551 gbe_dev->xgbe_serdes_regs = regs;
2552
2553 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
WingMan Kwok9a391c72015-03-20 16:11:25 -04002554 XGBE10_NUM_STAT_ENTRIES *
2555 (gbe_dev->max_num_ports) * sizeof(u64),
2556 GFP_KERNEL);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002557 if (!gbe_dev->hw_stats) {
2558 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2559 return -ENOMEM;
2560 }
2561
2562 gbe_dev->ss_version = XGBE_SS_VERSION_10;
2563 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
2564 XGBE10_SGMII_MODULE_OFFSET;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002565 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
2566
WingMan Kwok9a391c72015-03-20 16:11:25 -04002567 for (i = 0; i < gbe_dev->max_num_ports; i++)
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002568 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002569 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
2570
WingMan Kwok9a391c72015-03-20 16:11:25 -04002571 gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
2572 gbe_dev->ale_ports = gbe_dev->max_num_ports;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002573 gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
2574 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
2575 gbe_dev->et_stats = xgbe10_et_stats;
2576 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
WingMan Kwok9a391c72015-03-20 16:11:25 -04002577 gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002578
2579 /* Subsystem registers */
2580 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2581 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
2582
2583 /* Switch module registers */
2584 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2585 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2586 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2587 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2588 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2589
2590 /* Host port registers */
2591 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2592 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2593 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2594 return 0;
2595}
2596
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002597static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
2598 struct device_node *node)
2599{
2600 struct resource res;
2601 void __iomem *regs;
2602 int ret;
2603
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002604 ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002605 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002606 dev_err(gbe_dev->dev,
2607 "Can't translate of node(%s) of gbe ss address at %d\n",
2608 node->name, GBE_SS_REG_INDEX);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002609 return ret;
2610 }
2611
2612 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2613 if (IS_ERR(regs)) {
2614 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
2615 return PTR_ERR(regs);
2616 }
2617 gbe_dev->ss_regs = regs;
2618 gbe_dev->ss_version = readl(gbe_dev->ss_regs);
2619 return 0;
2620}
2621
2622static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
2623 struct device_node *node)
2624{
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002625 struct resource res;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002626 void __iomem *regs;
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002627 int i, ret;
2628
2629 ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
2630 if (ret) {
2631 dev_err(gbe_dev->dev,
2632 "Can't translate of gbe node(%s) address at index %d\n",
2633 node->name, GBE_SGMII34_REG_INDEX);
2634 return ret;
2635 }
2636
2637 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2638 if (IS_ERR(regs)) {
2639 dev_err(gbe_dev->dev,
2640 "Failed to map gbe sgmii port34 register base\n");
2641 return PTR_ERR(regs);
2642 }
2643 gbe_dev->sgmii_port34_regs = regs;
2644
2645 ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
2646 if (ret) {
2647 dev_err(gbe_dev->dev,
2648 "Can't translate of gbe node(%s) address at index %d\n",
2649 node->name, GBE_SM_REG_INDEX);
2650 return ret;
2651 }
2652
2653 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2654 if (IS_ERR(regs)) {
2655 dev_err(gbe_dev->dev,
2656 "Failed to map gbe switch module register base\n");
2657 return PTR_ERR(regs);
2658 }
2659 gbe_dev->switch_regs = regs;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002660
2661 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2662 GBE13_NUM_HW_STAT_ENTRIES *
WingMan Kwok9a391c72015-03-20 16:11:25 -04002663 gbe_dev->max_num_slaves * sizeof(u64),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002664 GFP_KERNEL);
2665 if (!gbe_dev->hw_stats) {
2666 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2667 return -ENOMEM;
2668 }
2669
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002670 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
2671 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002672
WingMan Kwok9a391c72015-03-20 16:11:25 -04002673 for (i = 0; i < gbe_dev->max_num_slaves; i++) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002674 gbe_dev->hw_stats_regs[i] =
2675 gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
2676 (GBE_HW_STATS_REG_MAP_SZ * i);
2677 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002678
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002679 gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002680 gbe_dev->ale_ports = gbe_dev->max_num_ports;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002681 gbe_dev->host_port = GBE13_HOST_PORT_NUM;
2682 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2683 gbe_dev->et_stats = gbe13_et_stats;
2684 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
WingMan Kwok9a391c72015-03-20 16:11:25 -04002685 gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002686
2687 /* Subsystem registers */
2688 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2689
2690 /* Switch module registers */
2691 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2692 GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2693 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
2694 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2695 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2696 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2697
2698 /* Host port registers */
2699 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2700 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2701 return 0;
2702}
2703
WingMan Kwok9a391c72015-03-20 16:11:25 -04002704static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2705 struct device_node *node)
2706{
2707 struct resource res;
2708 void __iomem *regs;
2709 int i, ret;
2710
2711 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2712 GBENU_NUM_HW_STAT_ENTRIES *
2713 (gbe_dev->max_num_ports) * sizeof(u64),
2714 GFP_KERNEL);
2715 if (!gbe_dev->hw_stats) {
2716 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2717 return -ENOMEM;
2718 }
2719
2720 ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
2721 if (ret) {
2722 dev_err(gbe_dev->dev,
2723 "Can't translate of gbenu node(%s) addr at index %d\n",
2724 node->name, GBENU_SM_REG_INDEX);
2725 return ret;
2726 }
2727
2728 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2729 if (IS_ERR(regs)) {
2730 dev_err(gbe_dev->dev,
2731 "Failed to map gbenu switch module register base\n");
2732 return PTR_ERR(regs);
2733 }
2734 gbe_dev->switch_regs = regs;
2735
2736 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2737 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
2738
2739 for (i = 0; i < (gbe_dev->max_num_ports); i++)
2740 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2741 GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
2742
2743 gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
2744 gbe_dev->ale_ports = gbe_dev->max_num_ports;
2745 gbe_dev->host_port = GBENU_HOST_PORT_NUM;
2746 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2747 gbe_dev->et_stats = gbenu_et_stats;
2748 gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2749
2750 if (IS_SS_ID_NU(gbe_dev))
2751 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2752 (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
2753 else
2754 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2755 GBENU_ET_STATS_PORT_SIZE;
2756
2757 /* Subsystem registers */
2758 GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2759
2760 /* Switch module registers */
2761 GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2762 GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
2763 GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2764 GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2765
2766 /* Host port registers */
2767 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2768 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2769
2770 /* For NU only. 2U does not need tx_pri_map.
2771 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
2772 * while 2U has only 1 such thread
2773 */
2774 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2775 return 0;
2776}
2777
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002778static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2779 struct device_node *node, void **inst_priv)
2780{
2781 struct device_node *interfaces, *interface;
2782 struct device_node *secondary_ports;
2783 struct cpsw_ale_params ale_params;
2784 struct gbe_priv *gbe_dev;
2785 u32 slave_num;
2786 int ret = 0;
2787
2788 if (!node) {
2789 dev_err(dev, "device tree info unavailable\n");
2790 return -ENODEV;
2791 }
2792
2793 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
2794 if (!gbe_dev)
2795 return -ENOMEM;
2796
WingMan Kwok9a391c72015-03-20 16:11:25 -04002797 if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
2798 of_device_is_compatible(node, "ti,netcp-gbe")) {
2799 gbe_dev->max_num_slaves = 4;
2800 } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
2801 gbe_dev->max_num_slaves = 8;
2802 } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
2803 gbe_dev->max_num_slaves = 1;
2804 } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
2805 gbe_dev->max_num_slaves = 2;
2806 } else {
2807 dev_err(dev, "device tree node for unknown device\n");
2808 return -EINVAL;
2809 }
2810 gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
2811
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002812 gbe_dev->dev = dev;
2813 gbe_dev->netcp_device = netcp_device;
2814 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
2815
2816 /* init the hw stats lock */
2817 spin_lock_init(&gbe_dev->hw_stats_lock);
2818
2819 if (of_find_property(node, "enable-ale", NULL)) {
2820 gbe_dev->enable_ale = true;
2821 dev_info(dev, "ALE enabled\n");
2822 } else {
2823 gbe_dev->enable_ale = false;
2824 dev_dbg(dev, "ALE bypass enabled*\n");
2825 }
2826
2827 ret = of_property_read_u32(node, "tx-queue",
2828 &gbe_dev->tx_queue_id);
2829 if (ret < 0) {
2830 dev_err(dev, "missing tx_queue parameter\n");
2831 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
2832 }
2833
2834 ret = of_property_read_string(node, "tx-channel",
2835 &gbe_dev->dma_chan_name);
2836 if (ret < 0) {
2837 dev_err(dev, "missing \"tx-channel\" parameter\n");
2838 ret = -ENODEV;
2839 goto quit;
2840 }
2841
2842 if (!strcmp(node->name, "gbe")) {
2843 ret = get_gbe_resource_version(gbe_dev, node);
2844 if (ret)
2845 goto quit;
2846
WingMan Kwok9a391c72015-03-20 16:11:25 -04002847 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2848
2849 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2850 ret = set_gbe_ethss14_priv(gbe_dev, node);
2851 else if (IS_SS_ID_MU(gbe_dev))
2852 ret = set_gbenu_ethss_priv(gbe_dev, node);
2853 else
2854 ret = -ENODEV;
2855
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002856 if (ret)
2857 goto quit;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002858 } else if (!strcmp(node->name, "xgbe")) {
2859 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2860 if (ret)
2861 goto quit;
2862 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2863 gbe_dev->ss_regs);
2864 if (ret)
2865 goto quit;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002866 } else {
2867 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2868 ret = -ENODEV;
2869 goto quit;
2870 }
2871
2872 interfaces = of_get_child_by_name(node, "interfaces");
2873 if (!interfaces)
2874 dev_err(dev, "could not find interfaces\n");
2875
2876 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2877 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2878 if (ret)
2879 goto quit;
2880
2881 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2882 if (ret)
2883 goto quit;
2884
2885 /* Create network interfaces */
2886 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
2887 for_each_child_of_node(interfaces, interface) {
2888 ret = of_property_read_u32(interface, "slave-port", &slave_num);
2889 if (ret) {
2890 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
2891 interface->name);
2892 continue;
2893 }
2894 gbe_dev->num_slaves++;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002895 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2896 break;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002897 }
2898
2899 if (!gbe_dev->num_slaves)
2900 dev_warn(dev, "No network interface configured\n");
2901
2902 /* Initialize Secondary slave ports */
2903 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
2904 INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
WingMan Kwok9a391c72015-03-20 16:11:25 -04002905 if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002906 init_secondary_ports(gbe_dev, secondary_ports);
2907 of_node_put(secondary_ports);
2908
2909 if (!gbe_dev->num_slaves) {
2910 dev_err(dev, "No network interface or secondary ports configured\n");
2911 ret = -ENODEV;
2912 goto quit;
2913 }
2914
2915 memset(&ale_params, 0, sizeof(ale_params));
2916 ale_params.dev = gbe_dev->dev;
2917 ale_params.ale_regs = gbe_dev->ale_reg;
2918 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
2919 ale_params.ale_entries = gbe_dev->ale_entries;
2920 ale_params.ale_ports = gbe_dev->ale_ports;
2921
2922 gbe_dev->ale = cpsw_ale_create(&ale_params);
2923 if (!gbe_dev->ale) {
2924 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2925 ret = -ENODEV;
2926 goto quit;
2927 } else {
2928 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2929 }
2930
2931 /* initialize host port */
2932 gbe_init_host_port(gbe_dev);
2933
2934 init_timer(&gbe_dev->timer);
2935 gbe_dev->timer.data = (unsigned long)gbe_dev;
2936 gbe_dev->timer.function = netcp_ethss_timer;
2937 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2938 add_timer(&gbe_dev->timer);
2939 *inst_priv = gbe_dev;
2940 return 0;
2941
2942quit:
2943 if (gbe_dev->hw_stats)
2944 devm_kfree(dev, gbe_dev->hw_stats);
Markus Elfring9b556692015-02-03 20:12:25 +01002945 cpsw_ale_destroy(gbe_dev->ale);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002946 if (gbe_dev->ss_regs)
2947 devm_iounmap(dev, gbe_dev->ss_regs);
Markus Elfring9b556692015-02-03 20:12:25 +01002948 of_node_put(interfaces);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002949 devm_kfree(dev, gbe_dev);
2950 return ret;
2951}
2952
2953static int gbe_attach(void *inst_priv, struct net_device *ndev,
2954 struct device_node *node, void **intf_priv)
2955{
2956 struct gbe_priv *gbe_dev = inst_priv;
2957 struct gbe_intf *gbe_intf;
2958 int ret;
2959
2960 if (!node) {
2961 dev_err(gbe_dev->dev, "interface node not available\n");
2962 return -ENODEV;
2963 }
2964
2965 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2966 if (!gbe_intf)
2967 return -ENOMEM;
2968
2969 gbe_intf->ndev = ndev;
2970 gbe_intf->dev = gbe_dev->dev;
2971 gbe_intf->gbe_dev = gbe_dev;
2972
2973 gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2974 sizeof(*gbe_intf->slave),
2975 GFP_KERNEL);
2976 if (!gbe_intf->slave) {
2977 ret = -ENOMEM;
2978 goto fail;
2979 }
2980
2981 if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2982 ret = -ENODEV;
2983 goto fail;
2984 }
2985
2986 gbe_intf->tx_pipe = gbe_dev->tx_pipe;
2987 ndev->ethtool_ops = &keystone_ethtool_ops;
2988 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
2989 *intf_priv = gbe_intf;
2990 return 0;
2991
2992fail:
2993 if (gbe_intf->slave)
2994 devm_kfree(gbe_dev->dev, gbe_intf->slave);
2995 if (gbe_intf)
2996 devm_kfree(gbe_dev->dev, gbe_intf);
2997 return ret;
2998}
2999
3000static int gbe_release(void *intf_priv)
3001{
3002 struct gbe_intf *gbe_intf = intf_priv;
3003
3004 gbe_intf->ndev->ethtool_ops = NULL;
3005 list_del(&gbe_intf->gbe_intf_list);
3006 devm_kfree(gbe_intf->dev, gbe_intf->slave);
3007 devm_kfree(gbe_intf->dev, gbe_intf);
3008 return 0;
3009}
3010
3011static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3012{
3013 struct gbe_priv *gbe_dev = inst_priv;
3014
3015 del_timer_sync(&gbe_dev->timer);
3016 cpsw_ale_stop(gbe_dev->ale);
3017 cpsw_ale_destroy(gbe_dev->ale);
3018 netcp_txpipe_close(&gbe_dev->tx_pipe);
3019 free_secondary_ports(gbe_dev);
3020
3021 if (!list_empty(&gbe_dev->gbe_intf_head))
3022 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
3023
3024 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3025 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3026 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3027 devm_kfree(gbe_dev->dev, gbe_dev);
3028 return 0;
3029}
3030
3031static struct netcp_module gbe_module = {
3032 .name = GBE_MODULE_NAME,
3033 .owner = THIS_MODULE,
3034 .primary = true,
3035 .probe = gbe_probe,
3036 .open = gbe_open,
3037 .close = gbe_close,
3038 .remove = gbe_remove,
3039 .attach = gbe_attach,
3040 .release = gbe_release,
3041 .add_addr = gbe_add_addr,
3042 .del_addr = gbe_del_addr,
3043 .add_vid = gbe_add_vid,
3044 .del_vid = gbe_del_vid,
3045 .ioctl = gbe_ioctl,
3046};
3047
Wingman Kwok90cff9e2015-01-15 19:12:52 -05003048static struct netcp_module xgbe_module = {
3049 .name = XGBE_MODULE_NAME,
3050 .owner = THIS_MODULE,
3051 .primary = true,
3052 .probe = gbe_probe,
3053 .open = gbe_open,
3054 .close = gbe_close,
3055 .remove = gbe_remove,
3056 .attach = gbe_attach,
3057 .release = gbe_release,
3058 .add_addr = gbe_add_addr,
3059 .del_addr = gbe_del_addr,
3060 .add_vid = gbe_add_vid,
3061 .del_vid = gbe_del_vid,
3062 .ioctl = gbe_ioctl,
3063};
3064
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003065static int __init keystone_gbe_init(void)
3066{
3067 int ret;
3068
3069 ret = netcp_register_module(&gbe_module);
3070 if (ret)
3071 return ret;
3072
Wingman Kwok90cff9e2015-01-15 19:12:52 -05003073 ret = netcp_register_module(&xgbe_module);
3074 if (ret)
3075 return ret;
3076
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003077 return 0;
3078}
3079module_init(keystone_gbe_init);
3080
3081static void __exit keystone_gbe_exit(void)
3082{
3083 netcp_unregister_module(&gbe_module);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05003084 netcp_unregister_module(&xgbe_module);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003085}
3086module_exit(keystone_gbe_exit);
Karicheri, Muralidharan58c11b52015-01-29 18:15:51 -05003087
3088MODULE_LICENSE("GPL v2");
3089MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3090MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");