blob: fa1041a78b46ba087b19c6d3c4388727e26b9983 [file] [log] [blame]
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001/*
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002 * Keystone GBE and XGBE subsystem code
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16 * kind, whether express or implied; without even the implied warranty
17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/io.h>
22#include <linux/of_mdio.h>
23#include <linux/of_address.h>
24#include <linux/if_vlan.h>
25#include <linux/ethtool.h>
26
27#include "cpsw_ale.h"
28#include "netcp.h"
29
30#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
31#define NETCP_DRIVER_VERSION "v1.0"
32
33#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
34#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
35#define GBE_MINOR_VERSION(reg) (reg & 0xff)
36#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
37
38/* 1G Ethernet SS defines */
39#define GBE_MODULE_NAME "netcp-gbe"
40#define GBE_SS_VERSION_14 0x4ed21104
41
42#define GBE13_SGMII_MODULE_OFFSET 0x100
43#define GBE13_SGMII34_MODULE_OFFSET 0x400
44#define GBE13_SWITCH_MODULE_OFFSET 0x800
45#define GBE13_HOST_PORT_OFFSET 0x834
46#define GBE13_SLAVE_PORT_OFFSET 0x860
47#define GBE13_EMAC_OFFSET 0x900
48#define GBE13_SLAVE_PORT2_OFFSET 0xa00
49#define GBE13_HW_STATS_OFFSET 0xb00
50#define GBE13_ALE_OFFSET 0xe00
51#define GBE13_HOST_PORT_NUM 0
52#define GBE13_NUM_SLAVES 4
53#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1)
54#define GBE13_NUM_ALE_ENTRIES 1024
55
Wingman Kwok90cff9e2015-01-15 19:12:52 -050056/* 10G Ethernet SS defines */
57#define XGBE_MODULE_NAME "netcp-xgbe"
58#define XGBE_SS_VERSION_10 0x4ee42100
59
60#define XGBE_SERDES_REG_INDEX 1
61#define XGBE10_SGMII_MODULE_OFFSET 0x100
62#define XGBE10_SWITCH_MODULE_OFFSET 0x1000
63#define XGBE10_HOST_PORT_OFFSET 0x1034
64#define XGBE10_SLAVE_PORT_OFFSET 0x1064
65#define XGBE10_EMAC_OFFSET 0x1400
66#define XGBE10_ALE_OFFSET 0x1700
67#define XGBE10_HW_STATS_OFFSET 0x1800
68#define XGBE10_HOST_PORT_NUM 0
69#define XGBE10_NUM_SLAVES 2
70#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1)
71#define XGBE10_NUM_ALE_ENTRIES 1024
72
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050073#define GBE_TIMER_INTERVAL (HZ / 2)
74
75/* Soft reset register values */
76#define SOFT_RESET_MASK BIT(0)
77#define SOFT_RESET BIT(0)
78#define DEVICE_EMACSL_RESET_POLL_COUNT 100
79#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
80
81#define MACSL_RX_ENABLE_CSF BIT(23)
82#define MACSL_ENABLE_EXT_CTL BIT(18)
Wingman Kwok90cff9e2015-01-15 19:12:52 -050083#define MACSL_XGMII_ENABLE BIT(13)
84#define MACSL_XGIG_MODE BIT(8)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050085#define MACSL_GIG_MODE BIT(7)
86#define MACSL_GMII_ENABLE BIT(5)
87#define MACSL_FULLDUPLEX BIT(0)
88
89#define GBE_CTL_P0_ENABLE BIT(2)
90#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff
Wingman Kwok90cff9e2015-01-15 19:12:52 -050091#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050092#define GBE_STATS_CD_SEL BIT(28)
93
94#define GBE_PORT_MASK(x) (BIT(x) - 1)
95#define GBE_MASK_NO_PORTS 0
96
97#define GBE_DEF_1G_MAC_CONTROL \
98 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
99 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
100
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500101#define GBE_DEF_10G_MAC_CONTROL \
102 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
103 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
104
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500105#define GBE_STATSA_MODULE 0
106#define GBE_STATSB_MODULE 1
107#define GBE_STATSC_MODULE 2
108#define GBE_STATSD_MODULE 3
109
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500110#define XGBE_STATS0_MODULE 0
111#define XGBE_STATS1_MODULE 1
112#define XGBE_STATS2_MODULE 2
113
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500114#define MAX_SLAVES GBE13_NUM_SLAVES
115/* s: 0-based slave_port */
116#define SGMII_BASE(s) \
117 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
118
119#define GBE_TX_QUEUE 648
120#define GBE_TXHOOK_ORDER 0
121#define GBE_DEFAULT_ALE_AGEOUT 30
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500122#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500123#define NETCP_LINK_STATE_INVALID -1
124
125#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
126 offsetof(struct gbe##_##rb, rn)
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500127#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
128 offsetof(struct xgbe##_##rb, rn)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500129#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
130
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500131struct xgbe_ss_regs {
132 u32 id_ver;
133 u32 synce_count;
134 u32 synce_mux;
135 u32 control;
136};
137
138struct xgbe_switch_regs {
139 u32 id_ver;
140 u32 control;
141 u32 emcontrol;
142 u32 stat_port_en;
143 u32 ptype;
144 u32 soft_idle;
145 u32 thru_rate;
146 u32 gap_thresh;
147 u32 tx_start_wds;
148 u32 flow_control;
149 u32 cppi_thresh;
150};
151
152struct xgbe_port_regs {
153 u32 blk_cnt;
154 u32 port_vlan;
155 u32 tx_pri_map;
156 u32 sa_lo;
157 u32 sa_hi;
158 u32 ts_ctl;
159 u32 ts_seq_ltype;
160 u32 ts_vlan;
161 u32 ts_ctl_ltype2;
162 u32 ts_ctl2;
163 u32 control;
164};
165
166struct xgbe_host_port_regs {
167 u32 blk_cnt;
168 u32 port_vlan;
169 u32 tx_pri_map;
170 u32 src_id;
171 u32 rx_pri_map;
172 u32 rx_maxlen;
173};
174
175struct xgbe_emac_regs {
176 u32 id_ver;
177 u32 mac_control;
178 u32 mac_status;
179 u32 soft_reset;
180 u32 rx_maxlen;
181 u32 __reserved_0;
182 u32 rx_pause;
183 u32 tx_pause;
184 u32 em_control;
185 u32 __reserved_1;
186 u32 tx_gap;
187 u32 rsvd[4];
188};
189
190struct xgbe_host_hw_stats {
191 u32 rx_good_frames;
192 u32 rx_broadcast_frames;
193 u32 rx_multicast_frames;
194 u32 __rsvd_0[3];
195 u32 rx_oversized_frames;
196 u32 __rsvd_1;
197 u32 rx_undersized_frames;
198 u32 __rsvd_2;
199 u32 overrun_type4;
200 u32 overrun_type5;
201 u32 rx_bytes;
202 u32 tx_good_frames;
203 u32 tx_broadcast_frames;
204 u32 tx_multicast_frames;
205 u32 __rsvd_3[9];
206 u32 tx_bytes;
207 u32 tx_64byte_frames;
208 u32 tx_65_to_127byte_frames;
209 u32 tx_128_to_255byte_frames;
210 u32 tx_256_to_511byte_frames;
211 u32 tx_512_to_1023byte_frames;
212 u32 tx_1024byte_frames;
213 u32 net_bytes;
214 u32 rx_sof_overruns;
215 u32 rx_mof_overruns;
216 u32 rx_dma_overruns;
217};
218
219struct xgbe_hw_stats {
220 u32 rx_good_frames;
221 u32 rx_broadcast_frames;
222 u32 rx_multicast_frames;
223 u32 rx_pause_frames;
224 u32 rx_crc_errors;
225 u32 rx_align_code_errors;
226 u32 rx_oversized_frames;
227 u32 rx_jabber_frames;
228 u32 rx_undersized_frames;
229 u32 rx_fragments;
230 u32 overrun_type4;
231 u32 overrun_type5;
232 u32 rx_bytes;
233 u32 tx_good_frames;
234 u32 tx_broadcast_frames;
235 u32 tx_multicast_frames;
236 u32 tx_pause_frames;
237 u32 tx_deferred_frames;
238 u32 tx_collision_frames;
239 u32 tx_single_coll_frames;
240 u32 tx_mult_coll_frames;
241 u32 tx_excessive_collisions;
242 u32 tx_late_collisions;
243 u32 tx_underrun;
244 u32 tx_carrier_sense_errors;
245 u32 tx_bytes;
246 u32 tx_64byte_frames;
247 u32 tx_65_to_127byte_frames;
248 u32 tx_128_to_255byte_frames;
249 u32 tx_256_to_511byte_frames;
250 u32 tx_512_to_1023byte_frames;
251 u32 tx_1024byte_frames;
252 u32 net_bytes;
253 u32 rx_sof_overruns;
254 u32 rx_mof_overruns;
255 u32 rx_dma_overruns;
256};
257
258#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
259
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500260struct gbe_ss_regs {
261 u32 id_ver;
262 u32 synce_count;
263 u32 synce_mux;
264};
265
266struct gbe_ss_regs_ofs {
267 u16 id_ver;
268 u16 control;
269};
270
271struct gbe_switch_regs {
272 u32 id_ver;
273 u32 control;
274 u32 soft_reset;
275 u32 stat_port_en;
276 u32 ptype;
277 u32 soft_idle;
278 u32 thru_rate;
279 u32 gap_thresh;
280 u32 tx_start_wds;
281 u32 flow_control;
282};
283
284struct gbe_switch_regs_ofs {
285 u16 id_ver;
286 u16 control;
287 u16 soft_reset;
288 u16 emcontrol;
289 u16 stat_port_en;
290 u16 ptype;
291 u16 flow_control;
292};
293
294struct gbe_port_regs {
295 u32 max_blks;
296 u32 blk_cnt;
297 u32 port_vlan;
298 u32 tx_pri_map;
299 u32 sa_lo;
300 u32 sa_hi;
301 u32 ts_ctl;
302 u32 ts_seq_ltype;
303 u32 ts_vlan;
304 u32 ts_ctl_ltype2;
305 u32 ts_ctl2;
306};
307
308struct gbe_port_regs_ofs {
309 u16 port_vlan;
310 u16 tx_pri_map;
311 u16 sa_lo;
312 u16 sa_hi;
313 u16 ts_ctl;
314 u16 ts_seq_ltype;
315 u16 ts_vlan;
316 u16 ts_ctl_ltype2;
317 u16 ts_ctl2;
318};
319
320struct gbe_host_port_regs {
321 u32 src_id;
322 u32 port_vlan;
323 u32 rx_pri_map;
324 u32 rx_maxlen;
325};
326
327struct gbe_host_port_regs_ofs {
328 u16 port_vlan;
329 u16 tx_pri_map;
330 u16 rx_maxlen;
331};
332
333struct gbe_emac_regs {
334 u32 id_ver;
335 u32 mac_control;
336 u32 mac_status;
337 u32 soft_reset;
338 u32 rx_maxlen;
339 u32 __reserved_0;
340 u32 rx_pause;
341 u32 tx_pause;
342 u32 __reserved_1;
343 u32 rx_pri_map;
344 u32 rsvd[6];
345};
346
347struct gbe_emac_regs_ofs {
348 u16 mac_control;
349 u16 soft_reset;
350 u16 rx_maxlen;
351};
352
353struct gbe_hw_stats {
354 u32 rx_good_frames;
355 u32 rx_broadcast_frames;
356 u32 rx_multicast_frames;
357 u32 rx_pause_frames;
358 u32 rx_crc_errors;
359 u32 rx_align_code_errors;
360 u32 rx_oversized_frames;
361 u32 rx_jabber_frames;
362 u32 rx_undersized_frames;
363 u32 rx_fragments;
364 u32 __pad_0[2];
365 u32 rx_bytes;
366 u32 tx_good_frames;
367 u32 tx_broadcast_frames;
368 u32 tx_multicast_frames;
369 u32 tx_pause_frames;
370 u32 tx_deferred_frames;
371 u32 tx_collision_frames;
372 u32 tx_single_coll_frames;
373 u32 tx_mult_coll_frames;
374 u32 tx_excessive_collisions;
375 u32 tx_late_collisions;
376 u32 tx_underrun;
377 u32 tx_carrier_sense_errors;
378 u32 tx_bytes;
379 u32 tx_64byte_frames;
380 u32 tx_65_to_127byte_frames;
381 u32 tx_128_to_255byte_frames;
382 u32 tx_256_to_511byte_frames;
383 u32 tx_512_to_1023byte_frames;
384 u32 tx_1024byte_frames;
385 u32 net_bytes;
386 u32 rx_sof_overruns;
387 u32 rx_mof_overruns;
388 u32 rx_dma_overruns;
389};
390
391#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
392#define GBE13_NUM_HW_STATS_MOD 2
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500393#define XGBE10_NUM_HW_STATS_MOD 3
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500394#define GBE_MAX_HW_STAT_MODS 3
395#define GBE_HW_STATS_REG_MAP_SZ 0x100
396
397struct gbe_slave {
398 void __iomem *port_regs;
399 void __iomem *emac_regs;
400 struct gbe_port_regs_ofs port_regs_ofs;
401 struct gbe_emac_regs_ofs emac_regs_ofs;
402 int slave_num; /* 0 based logical number */
403 int port_num; /* actual port number */
404 atomic_t link_state;
405 bool open;
406 struct phy_device *phy;
407 u32 link_interface;
408 u32 mac_control;
409 u8 phy_port_t;
410 struct device_node *phy_node;
411 struct list_head slave_list;
412};
413
414struct gbe_priv {
415 struct device *dev;
416 struct netcp_device *netcp_device;
417 struct timer_list timer;
418 u32 num_slaves;
419 u32 ale_entries;
420 u32 ale_ports;
421 bool enable_ale;
422 struct netcp_tx_pipe tx_pipe;
423
424 int host_port;
425 u32 rx_packet_max;
426 u32 ss_version;
427
428 void __iomem *ss_regs;
429 void __iomem *switch_regs;
430 void __iomem *host_port_regs;
431 void __iomem *ale_reg;
432 void __iomem *sgmii_port_regs;
433 void __iomem *sgmii_port34_regs;
434 void __iomem *xgbe_serdes_regs;
435 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
436
437 struct gbe_ss_regs_ofs ss_regs_ofs;
438 struct gbe_switch_regs_ofs switch_regs_ofs;
439 struct gbe_host_port_regs_ofs host_port_regs_ofs;
440
441 struct cpsw_ale *ale;
442 unsigned int tx_queue_id;
443 const char *dma_chan_name;
444
445 struct list_head gbe_intf_head;
446 struct list_head secondary_slaves;
447 struct net_device *dummy_ndev;
448
449 u64 *hw_stats;
450 const struct netcp_ethtool_stat *et_stats;
451 int num_et_stats;
452 /* Lock for updating the hwstats */
453 spinlock_t hw_stats_lock;
454};
455
456struct gbe_intf {
457 struct net_device *ndev;
458 struct device *dev;
459 struct gbe_priv *gbe_dev;
460 struct netcp_tx_pipe tx_pipe;
461 struct gbe_slave *slave;
462 struct list_head gbe_intf_list;
463 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
464};
465
466static struct netcp_module gbe_module;
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500467static struct netcp_module xgbe_module;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500468
469/* Statistic management */
470struct netcp_ethtool_stat {
471 char desc[ETH_GSTRING_LEN];
472 int type;
473 u32 size;
474 int offset;
475};
476
477#define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\
478 FIELD_SIZEOF(struct gbe_hw_stats, field), \
479 offsetof(struct gbe_hw_stats, field)
480
481#define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\
482 FIELD_SIZEOF(struct gbe_hw_stats, field), \
483 offsetof(struct gbe_hw_stats, field)
484
485#define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\
486 FIELD_SIZEOF(struct gbe_hw_stats, field), \
487 offsetof(struct gbe_hw_stats, field)
488
489#define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\
490 FIELD_SIZEOF(struct gbe_hw_stats, field), \
491 offsetof(struct gbe_hw_stats, field)
492
493static const struct netcp_ethtool_stat gbe13_et_stats[] = {
494 /* GBE module A */
495 {GBE_STATSA_INFO(rx_good_frames)},
496 {GBE_STATSA_INFO(rx_broadcast_frames)},
497 {GBE_STATSA_INFO(rx_multicast_frames)},
498 {GBE_STATSA_INFO(rx_pause_frames)},
499 {GBE_STATSA_INFO(rx_crc_errors)},
500 {GBE_STATSA_INFO(rx_align_code_errors)},
501 {GBE_STATSA_INFO(rx_oversized_frames)},
502 {GBE_STATSA_INFO(rx_jabber_frames)},
503 {GBE_STATSA_INFO(rx_undersized_frames)},
504 {GBE_STATSA_INFO(rx_fragments)},
505 {GBE_STATSA_INFO(rx_bytes)},
506 {GBE_STATSA_INFO(tx_good_frames)},
507 {GBE_STATSA_INFO(tx_broadcast_frames)},
508 {GBE_STATSA_INFO(tx_multicast_frames)},
509 {GBE_STATSA_INFO(tx_pause_frames)},
510 {GBE_STATSA_INFO(tx_deferred_frames)},
511 {GBE_STATSA_INFO(tx_collision_frames)},
512 {GBE_STATSA_INFO(tx_single_coll_frames)},
513 {GBE_STATSA_INFO(tx_mult_coll_frames)},
514 {GBE_STATSA_INFO(tx_excessive_collisions)},
515 {GBE_STATSA_INFO(tx_late_collisions)},
516 {GBE_STATSA_INFO(tx_underrun)},
517 {GBE_STATSA_INFO(tx_carrier_sense_errors)},
518 {GBE_STATSA_INFO(tx_bytes)},
519 {GBE_STATSA_INFO(tx_64byte_frames)},
520 {GBE_STATSA_INFO(tx_65_to_127byte_frames)},
521 {GBE_STATSA_INFO(tx_128_to_255byte_frames)},
522 {GBE_STATSA_INFO(tx_256_to_511byte_frames)},
523 {GBE_STATSA_INFO(tx_512_to_1023byte_frames)},
524 {GBE_STATSA_INFO(tx_1024byte_frames)},
525 {GBE_STATSA_INFO(net_bytes)},
526 {GBE_STATSA_INFO(rx_sof_overruns)},
527 {GBE_STATSA_INFO(rx_mof_overruns)},
528 {GBE_STATSA_INFO(rx_dma_overruns)},
529 /* GBE module B */
530 {GBE_STATSB_INFO(rx_good_frames)},
531 {GBE_STATSB_INFO(rx_broadcast_frames)},
532 {GBE_STATSB_INFO(rx_multicast_frames)},
533 {GBE_STATSB_INFO(rx_pause_frames)},
534 {GBE_STATSB_INFO(rx_crc_errors)},
535 {GBE_STATSB_INFO(rx_align_code_errors)},
536 {GBE_STATSB_INFO(rx_oversized_frames)},
537 {GBE_STATSB_INFO(rx_jabber_frames)},
538 {GBE_STATSB_INFO(rx_undersized_frames)},
539 {GBE_STATSB_INFO(rx_fragments)},
540 {GBE_STATSB_INFO(rx_bytes)},
541 {GBE_STATSB_INFO(tx_good_frames)},
542 {GBE_STATSB_INFO(tx_broadcast_frames)},
543 {GBE_STATSB_INFO(tx_multicast_frames)},
544 {GBE_STATSB_INFO(tx_pause_frames)},
545 {GBE_STATSB_INFO(tx_deferred_frames)},
546 {GBE_STATSB_INFO(tx_collision_frames)},
547 {GBE_STATSB_INFO(tx_single_coll_frames)},
548 {GBE_STATSB_INFO(tx_mult_coll_frames)},
549 {GBE_STATSB_INFO(tx_excessive_collisions)},
550 {GBE_STATSB_INFO(tx_late_collisions)},
551 {GBE_STATSB_INFO(tx_underrun)},
552 {GBE_STATSB_INFO(tx_carrier_sense_errors)},
553 {GBE_STATSB_INFO(tx_bytes)},
554 {GBE_STATSB_INFO(tx_64byte_frames)},
555 {GBE_STATSB_INFO(tx_65_to_127byte_frames)},
556 {GBE_STATSB_INFO(tx_128_to_255byte_frames)},
557 {GBE_STATSB_INFO(tx_256_to_511byte_frames)},
558 {GBE_STATSB_INFO(tx_512_to_1023byte_frames)},
559 {GBE_STATSB_INFO(tx_1024byte_frames)},
560 {GBE_STATSB_INFO(net_bytes)},
561 {GBE_STATSB_INFO(rx_sof_overruns)},
562 {GBE_STATSB_INFO(rx_mof_overruns)},
563 {GBE_STATSB_INFO(rx_dma_overruns)},
564 /* GBE module C */
565 {GBE_STATSC_INFO(rx_good_frames)},
566 {GBE_STATSC_INFO(rx_broadcast_frames)},
567 {GBE_STATSC_INFO(rx_multicast_frames)},
568 {GBE_STATSC_INFO(rx_pause_frames)},
569 {GBE_STATSC_INFO(rx_crc_errors)},
570 {GBE_STATSC_INFO(rx_align_code_errors)},
571 {GBE_STATSC_INFO(rx_oversized_frames)},
572 {GBE_STATSC_INFO(rx_jabber_frames)},
573 {GBE_STATSC_INFO(rx_undersized_frames)},
574 {GBE_STATSC_INFO(rx_fragments)},
575 {GBE_STATSC_INFO(rx_bytes)},
576 {GBE_STATSC_INFO(tx_good_frames)},
577 {GBE_STATSC_INFO(tx_broadcast_frames)},
578 {GBE_STATSC_INFO(tx_multicast_frames)},
579 {GBE_STATSC_INFO(tx_pause_frames)},
580 {GBE_STATSC_INFO(tx_deferred_frames)},
581 {GBE_STATSC_INFO(tx_collision_frames)},
582 {GBE_STATSC_INFO(tx_single_coll_frames)},
583 {GBE_STATSC_INFO(tx_mult_coll_frames)},
584 {GBE_STATSC_INFO(tx_excessive_collisions)},
585 {GBE_STATSC_INFO(tx_late_collisions)},
586 {GBE_STATSC_INFO(tx_underrun)},
587 {GBE_STATSC_INFO(tx_carrier_sense_errors)},
588 {GBE_STATSC_INFO(tx_bytes)},
589 {GBE_STATSC_INFO(tx_64byte_frames)},
590 {GBE_STATSC_INFO(tx_65_to_127byte_frames)},
591 {GBE_STATSC_INFO(tx_128_to_255byte_frames)},
592 {GBE_STATSC_INFO(tx_256_to_511byte_frames)},
593 {GBE_STATSC_INFO(tx_512_to_1023byte_frames)},
594 {GBE_STATSC_INFO(tx_1024byte_frames)},
595 {GBE_STATSC_INFO(net_bytes)},
596 {GBE_STATSC_INFO(rx_sof_overruns)},
597 {GBE_STATSC_INFO(rx_mof_overruns)},
598 {GBE_STATSC_INFO(rx_dma_overruns)},
599 /* GBE module D */
600 {GBE_STATSD_INFO(rx_good_frames)},
601 {GBE_STATSD_INFO(rx_broadcast_frames)},
602 {GBE_STATSD_INFO(rx_multicast_frames)},
603 {GBE_STATSD_INFO(rx_pause_frames)},
604 {GBE_STATSD_INFO(rx_crc_errors)},
605 {GBE_STATSD_INFO(rx_align_code_errors)},
606 {GBE_STATSD_INFO(rx_oversized_frames)},
607 {GBE_STATSD_INFO(rx_jabber_frames)},
608 {GBE_STATSD_INFO(rx_undersized_frames)},
609 {GBE_STATSD_INFO(rx_fragments)},
610 {GBE_STATSD_INFO(rx_bytes)},
611 {GBE_STATSD_INFO(tx_good_frames)},
612 {GBE_STATSD_INFO(tx_broadcast_frames)},
613 {GBE_STATSD_INFO(tx_multicast_frames)},
614 {GBE_STATSD_INFO(tx_pause_frames)},
615 {GBE_STATSD_INFO(tx_deferred_frames)},
616 {GBE_STATSD_INFO(tx_collision_frames)},
617 {GBE_STATSD_INFO(tx_single_coll_frames)},
618 {GBE_STATSD_INFO(tx_mult_coll_frames)},
619 {GBE_STATSD_INFO(tx_excessive_collisions)},
620 {GBE_STATSD_INFO(tx_late_collisions)},
621 {GBE_STATSD_INFO(tx_underrun)},
622 {GBE_STATSD_INFO(tx_carrier_sense_errors)},
623 {GBE_STATSD_INFO(tx_bytes)},
624 {GBE_STATSD_INFO(tx_64byte_frames)},
625 {GBE_STATSD_INFO(tx_65_to_127byte_frames)},
626 {GBE_STATSD_INFO(tx_128_to_255byte_frames)},
627 {GBE_STATSD_INFO(tx_256_to_511byte_frames)},
628 {GBE_STATSD_INFO(tx_512_to_1023byte_frames)},
629 {GBE_STATSD_INFO(tx_1024byte_frames)},
630 {GBE_STATSD_INFO(net_bytes)},
631 {GBE_STATSD_INFO(rx_sof_overruns)},
632 {GBE_STATSD_INFO(rx_mof_overruns)},
633 {GBE_STATSD_INFO(rx_dma_overruns)},
634};
635
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500636#define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \
637 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
638 offsetof(struct xgbe_hw_stats, field)
639
640#define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \
641 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
642 offsetof(struct xgbe_hw_stats, field)
643
644#define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \
645 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
646 offsetof(struct xgbe_hw_stats, field)
647
648static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
649 /* GBE module 0 */
650 {XGBE_STATS0_INFO(rx_good_frames)},
651 {XGBE_STATS0_INFO(rx_broadcast_frames)},
652 {XGBE_STATS0_INFO(rx_multicast_frames)},
653 {XGBE_STATS0_INFO(rx_oversized_frames)},
654 {XGBE_STATS0_INFO(rx_undersized_frames)},
655 {XGBE_STATS0_INFO(overrun_type4)},
656 {XGBE_STATS0_INFO(overrun_type5)},
657 {XGBE_STATS0_INFO(rx_bytes)},
658 {XGBE_STATS0_INFO(tx_good_frames)},
659 {XGBE_STATS0_INFO(tx_broadcast_frames)},
660 {XGBE_STATS0_INFO(tx_multicast_frames)},
661 {XGBE_STATS0_INFO(tx_bytes)},
662 {XGBE_STATS0_INFO(tx_64byte_frames)},
663 {XGBE_STATS0_INFO(tx_65_to_127byte_frames)},
664 {XGBE_STATS0_INFO(tx_128_to_255byte_frames)},
665 {XGBE_STATS0_INFO(tx_256_to_511byte_frames)},
666 {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)},
667 {XGBE_STATS0_INFO(tx_1024byte_frames)},
668 {XGBE_STATS0_INFO(net_bytes)},
669 {XGBE_STATS0_INFO(rx_sof_overruns)},
670 {XGBE_STATS0_INFO(rx_mof_overruns)},
671 {XGBE_STATS0_INFO(rx_dma_overruns)},
672 /* XGBE module 1 */
673 {XGBE_STATS1_INFO(rx_good_frames)},
674 {XGBE_STATS1_INFO(rx_broadcast_frames)},
675 {XGBE_STATS1_INFO(rx_multicast_frames)},
676 {XGBE_STATS1_INFO(rx_pause_frames)},
677 {XGBE_STATS1_INFO(rx_crc_errors)},
678 {XGBE_STATS1_INFO(rx_align_code_errors)},
679 {XGBE_STATS1_INFO(rx_oversized_frames)},
680 {XGBE_STATS1_INFO(rx_jabber_frames)},
681 {XGBE_STATS1_INFO(rx_undersized_frames)},
682 {XGBE_STATS1_INFO(rx_fragments)},
683 {XGBE_STATS1_INFO(overrun_type4)},
684 {XGBE_STATS1_INFO(overrun_type5)},
685 {XGBE_STATS1_INFO(rx_bytes)},
686 {XGBE_STATS1_INFO(tx_good_frames)},
687 {XGBE_STATS1_INFO(tx_broadcast_frames)},
688 {XGBE_STATS1_INFO(tx_multicast_frames)},
689 {XGBE_STATS1_INFO(tx_pause_frames)},
690 {XGBE_STATS1_INFO(tx_deferred_frames)},
691 {XGBE_STATS1_INFO(tx_collision_frames)},
692 {XGBE_STATS1_INFO(tx_single_coll_frames)},
693 {XGBE_STATS1_INFO(tx_mult_coll_frames)},
694 {XGBE_STATS1_INFO(tx_excessive_collisions)},
695 {XGBE_STATS1_INFO(tx_late_collisions)},
696 {XGBE_STATS1_INFO(tx_underrun)},
697 {XGBE_STATS1_INFO(tx_carrier_sense_errors)},
698 {XGBE_STATS1_INFO(tx_bytes)},
699 {XGBE_STATS1_INFO(tx_64byte_frames)},
700 {XGBE_STATS1_INFO(tx_65_to_127byte_frames)},
701 {XGBE_STATS1_INFO(tx_128_to_255byte_frames)},
702 {XGBE_STATS1_INFO(tx_256_to_511byte_frames)},
703 {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)},
704 {XGBE_STATS1_INFO(tx_1024byte_frames)},
705 {XGBE_STATS1_INFO(net_bytes)},
706 {XGBE_STATS1_INFO(rx_sof_overruns)},
707 {XGBE_STATS1_INFO(rx_mof_overruns)},
708 {XGBE_STATS1_INFO(rx_dma_overruns)},
709 /* XGBE module 2 */
710 {XGBE_STATS2_INFO(rx_good_frames)},
711 {XGBE_STATS2_INFO(rx_broadcast_frames)},
712 {XGBE_STATS2_INFO(rx_multicast_frames)},
713 {XGBE_STATS2_INFO(rx_pause_frames)},
714 {XGBE_STATS2_INFO(rx_crc_errors)},
715 {XGBE_STATS2_INFO(rx_align_code_errors)},
716 {XGBE_STATS2_INFO(rx_oversized_frames)},
717 {XGBE_STATS2_INFO(rx_jabber_frames)},
718 {XGBE_STATS2_INFO(rx_undersized_frames)},
719 {XGBE_STATS2_INFO(rx_fragments)},
720 {XGBE_STATS2_INFO(overrun_type4)},
721 {XGBE_STATS2_INFO(overrun_type5)},
722 {XGBE_STATS2_INFO(rx_bytes)},
723 {XGBE_STATS2_INFO(tx_good_frames)},
724 {XGBE_STATS2_INFO(tx_broadcast_frames)},
725 {XGBE_STATS2_INFO(tx_multicast_frames)},
726 {XGBE_STATS2_INFO(tx_pause_frames)},
727 {XGBE_STATS2_INFO(tx_deferred_frames)},
728 {XGBE_STATS2_INFO(tx_collision_frames)},
729 {XGBE_STATS2_INFO(tx_single_coll_frames)},
730 {XGBE_STATS2_INFO(tx_mult_coll_frames)},
731 {XGBE_STATS2_INFO(tx_excessive_collisions)},
732 {XGBE_STATS2_INFO(tx_late_collisions)},
733 {XGBE_STATS2_INFO(tx_underrun)},
734 {XGBE_STATS2_INFO(tx_carrier_sense_errors)},
735 {XGBE_STATS2_INFO(tx_bytes)},
736 {XGBE_STATS2_INFO(tx_64byte_frames)},
737 {XGBE_STATS2_INFO(tx_65_to_127byte_frames)},
738 {XGBE_STATS2_INFO(tx_128_to_255byte_frames)},
739 {XGBE_STATS2_INFO(tx_256_to_511byte_frames)},
740 {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)},
741 {XGBE_STATS2_INFO(tx_1024byte_frames)},
742 {XGBE_STATS2_INFO(net_bytes)},
743 {XGBE_STATS2_INFO(rx_sof_overruns)},
744 {XGBE_STATS2_INFO(rx_mof_overruns)},
745 {XGBE_STATS2_INFO(rx_dma_overruns)},
746};
747
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500748#define for_each_intf(i, priv) \
749 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
750
751#define for_each_sec_slave(slave, priv) \
752 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
753
754#define first_sec_slave(priv) \
755 list_first_entry(&priv->secondary_slaves, \
756 struct gbe_slave, slave_list)
757
758static void keystone_get_drvinfo(struct net_device *ndev,
759 struct ethtool_drvinfo *info)
760{
761 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
762 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
763}
764
765static u32 keystone_get_msglevel(struct net_device *ndev)
766{
767 struct netcp_intf *netcp = netdev_priv(ndev);
768
769 return netcp->msg_enable;
770}
771
772static void keystone_set_msglevel(struct net_device *ndev, u32 value)
773{
774 struct netcp_intf *netcp = netdev_priv(ndev);
775
776 netcp->msg_enable = value;
777}
778
779static void keystone_get_stat_strings(struct net_device *ndev,
780 uint32_t stringset, uint8_t *data)
781{
782 struct netcp_intf *netcp = netdev_priv(ndev);
783 struct gbe_intf *gbe_intf;
784 struct gbe_priv *gbe_dev;
785 int i;
786
787 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
788 if (!gbe_intf)
789 return;
790 gbe_dev = gbe_intf->gbe_dev;
791
792 switch (stringset) {
793 case ETH_SS_STATS:
794 for (i = 0; i < gbe_dev->num_et_stats; i++) {
795 memcpy(data, gbe_dev->et_stats[i].desc,
796 ETH_GSTRING_LEN);
797 data += ETH_GSTRING_LEN;
798 }
799 break;
800 case ETH_SS_TEST:
801 break;
802 }
803}
804
805static int keystone_get_sset_count(struct net_device *ndev, int stringset)
806{
807 struct netcp_intf *netcp = netdev_priv(ndev);
808 struct gbe_intf *gbe_intf;
809 struct gbe_priv *gbe_dev;
810
811 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
812 if (!gbe_intf)
813 return -EINVAL;
814 gbe_dev = gbe_intf->gbe_dev;
815
816 switch (stringset) {
817 case ETH_SS_TEST:
818 return 0;
819 case ETH_SS_STATS:
820 return gbe_dev->num_et_stats;
821 default:
822 return -EINVAL;
823 }
824}
825
826static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
827{
828 void __iomem *base = NULL;
829 u32 __iomem *p;
830 u32 tmp = 0;
831 int i;
832
833 for (i = 0; i < gbe_dev->num_et_stats; i++) {
834 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
835 p = base + gbe_dev->et_stats[i].offset;
836 tmp = readl(p);
837 gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
838 if (data)
839 data[i] = gbe_dev->hw_stats[i];
840 /* write-to-decrement:
841 * new register value = old register value - write value
842 */
843 writel(tmp, p);
844 }
845}
846
847static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
848{
849 void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
850 void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
851 u64 *hw_stats = &gbe_dev->hw_stats[0];
852 void __iomem *base = NULL;
853 u32 __iomem *p;
854 u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
855 int i, j, pair;
856
857 for (pair = 0; pair < 2; pair++) {
858 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
859
860 if (pair == 0)
861 val &= ~GBE_STATS_CD_SEL;
862 else
863 val |= GBE_STATS_CD_SEL;
864
865 /* make the stat modules visible */
866 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
867
868 for (i = 0; i < pair_size; i++) {
869 j = pair * pair_size + i;
870 switch (gbe_dev->et_stats[j].type) {
871 case GBE_STATSA_MODULE:
872 case GBE_STATSC_MODULE:
873 base = gbe_statsa;
874 break;
875 case GBE_STATSB_MODULE:
876 case GBE_STATSD_MODULE:
877 base = gbe_statsb;
878 break;
879 }
880
881 p = base + gbe_dev->et_stats[j].offset;
882 tmp = readl(p);
883 hw_stats[j] += tmp;
884 if (data)
885 data[j] = hw_stats[j];
886 /* write-to-decrement:
887 * new register value = old register value - write value
888 */
889 writel(tmp, p);
890 }
891 }
892}
893
894static void keystone_get_ethtool_stats(struct net_device *ndev,
895 struct ethtool_stats *stats,
896 uint64_t *data)
897{
898 struct netcp_intf *netcp = netdev_priv(ndev);
899 struct gbe_intf *gbe_intf;
900 struct gbe_priv *gbe_dev;
901
902 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
903 if (!gbe_intf)
904 return;
905
906 gbe_dev = gbe_intf->gbe_dev;
907 spin_lock_bh(&gbe_dev->hw_stats_lock);
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500908 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
909 gbe_update_stats_ver14(gbe_dev, data);
910 else
911 gbe_update_stats(gbe_dev, data);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500912 spin_unlock_bh(&gbe_dev->hw_stats_lock);
913}
914
915static int keystone_get_settings(struct net_device *ndev,
916 struct ethtool_cmd *cmd)
917{
918 struct netcp_intf *netcp = netdev_priv(ndev);
919 struct phy_device *phy = ndev->phydev;
920 struct gbe_intf *gbe_intf;
921 int ret;
922
923 if (!phy)
924 return -EINVAL;
925
926 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
927 if (!gbe_intf)
928 return -EINVAL;
929
930 if (!gbe_intf->slave)
931 return -EINVAL;
932
933 ret = phy_ethtool_gset(phy, cmd);
934 if (!ret)
935 cmd->port = gbe_intf->slave->phy_port_t;
936
937 return ret;
938}
939
940static int keystone_set_settings(struct net_device *ndev,
941 struct ethtool_cmd *cmd)
942{
943 struct netcp_intf *netcp = netdev_priv(ndev);
944 struct phy_device *phy = ndev->phydev;
945 struct gbe_intf *gbe_intf;
946 u32 features = cmd->advertising & cmd->supported;
947
948 if (!phy)
949 return -EINVAL;
950
951 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
952 if (!gbe_intf)
953 return -EINVAL;
954
955 if (!gbe_intf->slave)
956 return -EINVAL;
957
958 if (cmd->port != gbe_intf->slave->phy_port_t) {
959 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
960 return -EINVAL;
961
962 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
963 return -EINVAL;
964
965 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
966 return -EINVAL;
967
968 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
969 return -EINVAL;
970
971 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
972 return -EINVAL;
973 }
974
975 gbe_intf->slave->phy_port_t = cmd->port;
976 return phy_ethtool_sset(phy, cmd);
977}
978
979static const struct ethtool_ops keystone_ethtool_ops = {
980 .get_drvinfo = keystone_get_drvinfo,
981 .get_link = ethtool_op_get_link,
982 .get_msglevel = keystone_get_msglevel,
983 .set_msglevel = keystone_set_msglevel,
984 .get_strings = keystone_get_stat_strings,
985 .get_sset_count = keystone_get_sset_count,
986 .get_ethtool_stats = keystone_get_ethtool_stats,
987 .get_settings = keystone_get_settings,
988 .set_settings = keystone_set_settings,
989};
990
991#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
992 ((mac)[2] << 16) | ((mac)[3] << 24))
993#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
994
995static void gbe_set_slave_mac(struct gbe_slave *slave,
996 struct gbe_intf *gbe_intf)
997{
998 struct net_device *ndev = gbe_intf->ndev;
999
1000 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1001 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1002}
1003
1004static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1005{
1006 if (priv->host_port == 0)
1007 return slave_num + 1;
1008
1009 return slave_num;
1010}
1011
1012static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1013 struct net_device *ndev,
1014 struct gbe_slave *slave,
1015 int up)
1016{
1017 struct phy_device *phy = slave->phy;
1018 u32 mac_control = 0;
1019
1020 if (up) {
1021 mac_control = slave->mac_control;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001022 if (phy && (phy->speed == SPEED_1000)) {
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001023 mac_control |= MACSL_GIG_MODE;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001024 mac_control &= ~MACSL_XGIG_MODE;
1025 } else if (phy && (phy->speed == SPEED_10000)) {
1026 mac_control |= MACSL_XGIG_MODE;
1027 mac_control &= ~MACSL_GIG_MODE;
1028 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001029
1030 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1031 mac_control));
1032
1033 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1034 ALE_PORT_STATE,
1035 ALE_PORT_STATE_FORWARD);
1036
1037 if (ndev && slave->open)
1038 netif_carrier_on(ndev);
1039 } else {
1040 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1041 mac_control));
1042 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1043 ALE_PORT_STATE,
1044 ALE_PORT_STATE_DISABLE);
1045 if (ndev)
1046 netif_carrier_off(ndev);
1047 }
1048
1049 if (phy)
1050 phy_print_status(phy);
1051}
1052
1053static bool gbe_phy_link_status(struct gbe_slave *slave)
1054{
1055 return !slave->phy || slave->phy->link;
1056}
1057
1058static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1059 struct gbe_slave *slave,
1060 struct net_device *ndev)
1061{
1062 int sp = slave->slave_num;
1063 int phy_link_state, sgmii_link_state = 1, link_state;
1064
1065 if (!slave->open)
1066 return;
1067
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001068 if (!SLAVE_LINK_IS_XGMII(slave))
1069 sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
1070 sp);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001071 phy_link_state = gbe_phy_link_status(slave);
1072 link_state = phy_link_state & sgmii_link_state;
1073
1074 if (atomic_xchg(&slave->link_state, link_state) != link_state)
1075 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1076 link_state);
1077}
1078
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001079static void xgbe_adjust_link(struct net_device *ndev)
1080{
1081 struct netcp_intf *netcp = netdev_priv(ndev);
1082 struct gbe_intf *gbe_intf;
1083
1084 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1085 if (!gbe_intf)
1086 return;
1087
1088 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1089 ndev);
1090}
1091
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001092static void gbe_adjust_link(struct net_device *ndev)
1093{
1094 struct netcp_intf *netcp = netdev_priv(ndev);
1095 struct gbe_intf *gbe_intf;
1096
1097 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1098 if (!gbe_intf)
1099 return;
1100
1101 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1102 ndev);
1103}
1104
1105static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1106{
1107 struct gbe_priv *gbe_dev = netdev_priv(ndev);
1108 struct gbe_slave *slave;
1109
1110 for_each_sec_slave(slave, gbe_dev)
1111 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1112}
1113
1114/* Reset EMAC
1115 * Soft reset is set and polled until clear, or until a timeout occurs
1116 */
1117static int gbe_port_reset(struct gbe_slave *slave)
1118{
1119 u32 i, v;
1120
1121 /* Set the soft reset bit */
1122 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1123
1124 /* Wait for the bit to clear */
1125 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1126 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1127 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1128 return 0;
1129 }
1130
1131 /* Timeout on the reset */
1132 return GMACSL_RET_WARN_RESET_INCOMPLETE;
1133}
1134
1135/* Configure EMAC */
1136static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1137 int max_rx_len)
1138{
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001139 u32 xgmii_mode;
1140
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001141 if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1142 max_rx_len = NETCP_MAX_FRAME_SIZE;
1143
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001144 /* Enable correct MII mode at SS level */
1145 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1146 (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1147 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1148 xgmii_mode |= (1 << slave->slave_num);
1149 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1150 }
1151
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001152 writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
1153 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1154}
1155
1156static void gbe_slave_stop(struct gbe_intf *intf)
1157{
1158 struct gbe_priv *gbe_dev = intf->gbe_dev;
1159 struct gbe_slave *slave = intf->slave;
1160
1161 gbe_port_reset(slave);
1162 /* Disable forwarding */
1163 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1164 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1165 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1166 1 << slave->port_num, 0, 0);
1167
1168 if (!slave->phy)
1169 return;
1170
1171 phy_stop(slave->phy);
1172 phy_disconnect(slave->phy);
1173 slave->phy = NULL;
1174}
1175
1176static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1177{
1178 void __iomem *sgmii_port_regs;
1179
1180 sgmii_port_regs = priv->sgmii_port_regs;
1181 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1182 sgmii_port_regs = priv->sgmii_port34_regs;
1183
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001184 if (!SLAVE_LINK_IS_XGMII(slave)) {
1185 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1186 netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1187 slave->link_interface);
1188 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001189}
1190
1191static int gbe_slave_open(struct gbe_intf *gbe_intf)
1192{
1193 struct gbe_priv *priv = gbe_intf->gbe_dev;
1194 struct gbe_slave *slave = gbe_intf->slave;
1195 phy_interface_t phy_mode;
1196 bool has_phy = false;
1197
1198 void (*hndlr)(struct net_device *) = gbe_adjust_link;
1199
1200 gbe_sgmii_config(priv, slave);
1201 gbe_port_reset(slave);
1202 gbe_port_config(priv, slave, priv->rx_packet_max);
1203 gbe_set_slave_mac(slave, gbe_intf);
1204 /* enable forwarding */
1205 cpsw_ale_control_set(priv->ale, slave->port_num,
1206 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1207 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1208 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1209
1210 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1211 has_phy = true;
1212 phy_mode = PHY_INTERFACE_MODE_SGMII;
1213 slave->phy_port_t = PORT_MII;
1214 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1215 has_phy = true;
1216 phy_mode = PHY_INTERFACE_MODE_NA;
1217 slave->phy_port_t = PORT_FIBRE;
1218 }
1219
1220 if (has_phy) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001221 if (priv->ss_version == XGBE_SS_VERSION_10)
1222 hndlr = xgbe_adjust_link;
1223
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001224 slave->phy = of_phy_connect(gbe_intf->ndev,
1225 slave->phy_node,
1226 hndlr, 0,
1227 phy_mode);
1228 if (!slave->phy) {
1229 dev_err(priv->dev, "phy not found on slave %d\n",
1230 slave->slave_num);
1231 return -ENODEV;
1232 }
1233 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
1234 dev_name(&slave->phy->dev));
1235 phy_start(slave->phy);
1236 phy_read_status(slave->phy);
1237 }
1238 return 0;
1239}
1240
1241static void gbe_init_host_port(struct gbe_priv *priv)
1242{
1243 int bypass_en = 1;
1244 /* Max length register */
1245 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
1246 rx_maxlen));
1247
1248 cpsw_ale_start(priv->ale);
1249
1250 if (priv->enable_ale)
1251 bypass_en = 0;
1252
1253 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
1254
1255 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
1256
1257 cpsw_ale_control_set(priv->ale, priv->host_port,
1258 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1259
1260 cpsw_ale_control_set(priv->ale, 0,
1261 ALE_PORT_UNKNOWN_VLAN_MEMBER,
1262 GBE_PORT_MASK(priv->ale_ports));
1263
1264 cpsw_ale_control_set(priv->ale, 0,
1265 ALE_PORT_UNKNOWN_MCAST_FLOOD,
1266 GBE_PORT_MASK(priv->ale_ports - 1));
1267
1268 cpsw_ale_control_set(priv->ale, 0,
1269 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
1270 GBE_PORT_MASK(priv->ale_ports));
1271
1272 cpsw_ale_control_set(priv->ale, 0,
1273 ALE_PORT_UNTAGGED_EGRESS,
1274 GBE_PORT_MASK(priv->ale_ports));
1275}
1276
1277static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1278{
1279 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1280 u16 vlan_id;
1281
1282 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1283 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
1284 ALE_MCAST_FWD_2);
1285 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1286 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1287 GBE_PORT_MASK(gbe_dev->ale_ports),
1288 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
1289 }
1290}
1291
1292static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1293{
1294 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1295 u16 vlan_id;
1296
1297 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1298
1299 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
1300 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1301 ALE_VLAN, vlan_id);
1302}
1303
1304static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1305{
1306 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1307 u16 vlan_id;
1308
1309 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
1310
1311 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1312 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
1313 }
1314}
1315
1316static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1317{
1318 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1319 u16 vlan_id;
1320
1321 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1322
1323 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1324 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1325 ALE_VLAN, vlan_id);
1326 }
1327}
1328
1329static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
1330{
1331 struct gbe_intf *gbe_intf = intf_priv;
1332 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1333
1334 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
1335 naddr->addr, naddr->type);
1336
1337 switch (naddr->type) {
1338 case ADDR_MCAST:
1339 case ADDR_BCAST:
1340 gbe_add_mcast_addr(gbe_intf, naddr->addr);
1341 break;
1342 case ADDR_UCAST:
1343 case ADDR_DEV:
1344 gbe_add_ucast_addr(gbe_intf, naddr->addr);
1345 break;
1346 case ADDR_ANY:
1347 /* nothing to do for promiscuous */
1348 default:
1349 break;
1350 }
1351
1352 return 0;
1353}
1354
1355static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
1356{
1357 struct gbe_intf *gbe_intf = intf_priv;
1358 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1359
1360 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
1361 naddr->addr, naddr->type);
1362
1363 switch (naddr->type) {
1364 case ADDR_MCAST:
1365 case ADDR_BCAST:
1366 gbe_del_mcast_addr(gbe_intf, naddr->addr);
1367 break;
1368 case ADDR_UCAST:
1369 case ADDR_DEV:
1370 gbe_del_ucast_addr(gbe_intf, naddr->addr);
1371 break;
1372 case ADDR_ANY:
1373 /* nothing to do for promiscuous */
1374 default:
1375 break;
1376 }
1377
1378 return 0;
1379}
1380
1381static int gbe_add_vid(void *intf_priv, int vid)
1382{
1383 struct gbe_intf *gbe_intf = intf_priv;
1384 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1385
1386 set_bit(vid, gbe_intf->active_vlans);
1387
1388 cpsw_ale_add_vlan(gbe_dev->ale, vid,
1389 GBE_PORT_MASK(gbe_dev->ale_ports),
1390 GBE_MASK_NO_PORTS,
1391 GBE_PORT_MASK(gbe_dev->ale_ports),
1392 GBE_PORT_MASK(gbe_dev->ale_ports - 1));
1393
1394 return 0;
1395}
1396
1397static int gbe_del_vid(void *intf_priv, int vid)
1398{
1399 struct gbe_intf *gbe_intf = intf_priv;
1400 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1401
1402 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
1403 clear_bit(vid, gbe_intf->active_vlans);
1404 return 0;
1405}
1406
1407static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
1408{
1409 struct gbe_intf *gbe_intf = intf_priv;
1410 struct phy_device *phy = gbe_intf->slave->phy;
1411 int ret = -EOPNOTSUPP;
1412
1413 if (phy)
1414 ret = phy_mii_ioctl(phy, req, cmd);
1415
1416 return ret;
1417}
1418
1419static void netcp_ethss_timer(unsigned long arg)
1420{
1421 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
1422 struct gbe_intf *gbe_intf;
1423 struct gbe_slave *slave;
1424
1425 /* Check & update SGMII link state of interfaces */
1426 for_each_intf(gbe_intf, gbe_dev) {
1427 if (!gbe_intf->slave->open)
1428 continue;
1429 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
1430 gbe_intf->ndev);
1431 }
1432
1433 /* Check & update SGMII link state of secondary ports */
1434 for_each_sec_slave(slave, gbe_dev) {
1435 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1436 }
1437
1438 spin_lock_bh(&gbe_dev->hw_stats_lock);
1439
1440 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1441 gbe_update_stats_ver14(gbe_dev, NULL);
1442 else
1443 gbe_update_stats(gbe_dev, NULL);
1444
1445 spin_unlock_bh(&gbe_dev->hw_stats_lock);
1446
1447 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
1448 add_timer(&gbe_dev->timer);
1449}
1450
1451static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
1452{
1453 struct gbe_intf *gbe_intf = data;
1454
1455 p_info->tx_pipe = &gbe_intf->tx_pipe;
1456 return 0;
1457}
1458
1459static int gbe_open(void *intf_priv, struct net_device *ndev)
1460{
1461 struct gbe_intf *gbe_intf = intf_priv;
1462 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1463 struct netcp_intf *netcp = netdev_priv(ndev);
1464 struct gbe_slave *slave = gbe_intf->slave;
1465 int port_num = slave->port_num;
1466 u32 reg;
1467 int ret;
1468
1469 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
1470 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
1471 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
1472 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
1473
1474 if (gbe_dev->enable_ale)
1475 gbe_intf->tx_pipe.dma_psflags = 0;
1476 else
1477 gbe_intf->tx_pipe.dma_psflags = port_num;
1478
1479 dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n",
1480 gbe_intf->tx_pipe.dma_chan_name,
1481 gbe_intf->tx_pipe.dma_channel,
1482 gbe_intf->tx_pipe.dma_psflags);
1483
1484 gbe_slave_stop(gbe_intf);
1485
1486 /* disable priority elevation and enable statistics on all ports */
1487 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
1488
1489 /* Control register */
1490 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
1491
1492 /* All statistics enabled and STAT AB visible by default */
1493 writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
1494 stat_port_en));
1495
1496 ret = gbe_slave_open(gbe_intf);
1497 if (ret)
1498 goto fail;
1499
1500 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1501 gbe_intf);
1502
1503 slave->open = true;
1504 netcp_ethss_update_link_state(gbe_dev, slave, ndev);
1505 return 0;
1506
1507fail:
1508 gbe_slave_stop(gbe_intf);
1509 return ret;
1510}
1511
1512static int gbe_close(void *intf_priv, struct net_device *ndev)
1513{
1514 struct gbe_intf *gbe_intf = intf_priv;
1515 struct netcp_intf *netcp = netdev_priv(ndev);
1516
1517 gbe_slave_stop(gbe_intf);
1518 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1519 gbe_intf);
1520
1521 gbe_intf->slave->open = false;
1522 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
1523 return 0;
1524}
1525
1526static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1527 struct device_node *node)
1528{
1529 int port_reg_num;
1530 u32 port_reg_ofs, emac_reg_ofs;
1531
1532 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
1533 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
1534 return -EINVAL;
1535 }
1536
1537 if (of_property_read_u32(node, "link-interface",
1538 &slave->link_interface)) {
1539 dev_warn(gbe_dev->dev,
1540 "missing link-interface value defaulting to 1G mac-phy link\n");
1541 slave->link_interface = SGMII_LINK_MAC_PHY;
1542 }
1543
1544 slave->open = false;
1545 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
1546 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
1547
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001548 if (slave->link_interface >= XGMII_LINK_MAC_PHY)
1549 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
1550 else
1551 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001552
1553 /* Emac regs memmap are contiguous but port regs are not */
1554 port_reg_num = slave->slave_num;
1555 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1556 if (slave->slave_num > 1) {
1557 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
1558 port_reg_num -= 2;
1559 } else {
1560 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
1561 }
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001562 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1563 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001564 } else {
1565 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
1566 gbe_dev->ss_version);
1567 return -EINVAL;
1568 }
1569
1570 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1571 emac_reg_ofs = GBE13_EMAC_OFFSET;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001572 else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
1573 emac_reg_ofs = XGBE10_EMAC_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001574
1575 slave->port_regs = gbe_dev->ss_regs + port_reg_ofs +
1576 (0x30 * port_reg_num);
1577 slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs +
1578 (0x40 * slave->slave_num);
1579
1580 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1581 /* Initialize slave port register offsets */
1582 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
1583 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1584 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
1585 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
1586 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1587 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1588 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1589 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1590 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1591
1592 /* Initialize EMAC register offsets */
1593 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
1594 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1595 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
1596
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001597 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1598 /* Initialize slave port register offsets */
1599 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
1600 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1601 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
1602 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
1603 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1604 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1605 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1606 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1607 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1608
1609 /* Initialize EMAC register offsets */
1610 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
1611 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1612 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001613 }
1614
1615 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
1616 return 0;
1617}
1618
1619static void init_secondary_ports(struct gbe_priv *gbe_dev,
1620 struct device_node *node)
1621{
1622 struct device *dev = gbe_dev->dev;
1623 phy_interface_t phy_mode;
1624 struct gbe_priv **priv;
1625 struct device_node *port;
1626 struct gbe_slave *slave;
1627 bool mac_phy_link = false;
1628
1629 for_each_child_of_node(node, port) {
1630 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
1631 if (!slave) {
1632 dev_err(dev,
1633 "memomry alloc failed for secondary port(%s), skipping...\n",
1634 port->name);
1635 continue;
1636 }
1637
1638 if (init_slave(gbe_dev, slave, port)) {
1639 dev_err(dev,
1640 "Failed to initialize secondary port(%s), skipping...\n",
1641 port->name);
1642 devm_kfree(dev, slave);
1643 continue;
1644 }
1645
1646 gbe_sgmii_config(gbe_dev, slave);
1647 gbe_port_reset(slave);
1648 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
1649 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
1650 gbe_dev->num_slaves++;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001651 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
1652 (slave->link_interface == XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001653 mac_phy_link = true;
1654
1655 slave->open = true;
1656 }
1657
1658 /* of_phy_connect() is needed only for MAC-PHY interface */
1659 if (!mac_phy_link)
1660 return;
1661
1662 /* Allocate dummy netdev device for attaching to phy device */
1663 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
1664 NET_NAME_UNKNOWN, ether_setup);
1665 if (!gbe_dev->dummy_ndev) {
1666 dev_err(dev,
1667 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
1668 return;
1669 }
1670 priv = netdev_priv(gbe_dev->dummy_ndev);
1671 *priv = gbe_dev;
1672
1673 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1674 phy_mode = PHY_INTERFACE_MODE_SGMII;
1675 slave->phy_port_t = PORT_MII;
1676 } else {
1677 phy_mode = PHY_INTERFACE_MODE_NA;
1678 slave->phy_port_t = PORT_FIBRE;
1679 }
1680
1681 for_each_sec_slave(slave, gbe_dev) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001682 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
1683 (slave->link_interface != XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001684 continue;
1685 slave->phy =
1686 of_phy_connect(gbe_dev->dummy_ndev,
1687 slave->phy_node,
1688 gbe_adjust_link_sec_slaves,
1689 0, phy_mode);
1690 if (!slave->phy) {
1691 dev_err(dev, "phy not found for slave %d\n",
1692 slave->slave_num);
1693 slave->phy = NULL;
1694 } else {
1695 dev_dbg(dev, "phy found: id is: 0x%s\n",
1696 dev_name(&slave->phy->dev));
1697 phy_start(slave->phy);
1698 phy_read_status(slave->phy);
1699 }
1700 }
1701}
1702
1703static void free_secondary_ports(struct gbe_priv *gbe_dev)
1704{
1705 struct gbe_slave *slave;
1706
1707 for (;;) {
1708 slave = first_sec_slave(gbe_dev);
1709 if (!slave)
1710 break;
1711 if (slave->phy)
1712 phy_disconnect(slave->phy);
1713 list_del(&slave->slave_list);
1714 }
1715 if (gbe_dev->dummy_ndev)
1716 free_netdev(gbe_dev->dummy_ndev);
1717}
1718
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001719static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
1720 struct device_node *node)
1721{
1722 struct resource res;
1723 void __iomem *regs;
1724 int ret, i;
1725
1726 ret = of_address_to_resource(node, 0, &res);
1727 if (ret) {
1728 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n",
1729 node->name);
1730 return ret;
1731 }
1732
1733 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1734 if (IS_ERR(regs)) {
1735 dev_err(gbe_dev->dev, "Failed to map xgbe register base\n");
1736 return PTR_ERR(regs);
1737 }
1738 gbe_dev->ss_regs = regs;
1739
1740 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
1741 if (ret) {
1742 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n",
1743 node->name);
1744 return ret;
1745 }
1746
1747 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1748 if (IS_ERR(regs)) {
1749 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
1750 return PTR_ERR(regs);
1751 }
1752 gbe_dev->xgbe_serdes_regs = regs;
1753
1754 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1755 XGBE10_NUM_STAT_ENTRIES *
1756 (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
1757 GFP_KERNEL);
1758 if (!gbe_dev->hw_stats) {
1759 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1760 return -ENOMEM;
1761 }
1762
1763 gbe_dev->ss_version = XGBE_SS_VERSION_10;
1764 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
1765 XGBE10_SGMII_MODULE_OFFSET;
1766 gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET;
1767 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
1768
1769 for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
1770 gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs +
1771 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
1772
1773 gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
1774 gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
1775 gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
1776 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
1777 gbe_dev->et_stats = xgbe10_et_stats;
1778 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
1779
1780 /* Subsystem registers */
1781 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1782 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
1783
1784 /* Switch module registers */
1785 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1786 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1787 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1788 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1789 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1790
1791 /* Host port registers */
1792 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1793 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
1794 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1795 return 0;
1796}
1797
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001798static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
1799 struct device_node *node)
1800{
1801 struct resource res;
1802 void __iomem *regs;
1803 int ret;
1804
1805 ret = of_address_to_resource(node, 0, &res);
1806 if (ret) {
1807 dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n",
1808 node->name);
1809 return ret;
1810 }
1811
1812 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1813 if (IS_ERR(regs)) {
1814 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
1815 return PTR_ERR(regs);
1816 }
1817 gbe_dev->ss_regs = regs;
1818 gbe_dev->ss_version = readl(gbe_dev->ss_regs);
1819 return 0;
1820}
1821
1822static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
1823 struct device_node *node)
1824{
1825 void __iomem *regs;
1826 int i;
1827
1828 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1829 GBE13_NUM_HW_STAT_ENTRIES *
1830 GBE13_NUM_SLAVES * sizeof(u64),
1831 GFP_KERNEL);
1832 if (!gbe_dev->hw_stats) {
1833 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1834 return -ENOMEM;
1835 }
1836
1837 regs = gbe_dev->ss_regs;
1838 gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET;
1839 gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET;
1840 gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET;
1841 gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET;
1842
1843 for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++)
1844 gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET +
1845 (GBE_HW_STATS_REG_MAP_SZ * i);
1846
1847 gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET;
1848 gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
1849 gbe_dev->host_port = GBE13_HOST_PORT_NUM;
1850 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
1851 gbe_dev->et_stats = gbe13_et_stats;
1852 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
1853
1854 /* Subsystem registers */
1855 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1856
1857 /* Switch module registers */
1858 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1859 GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1860 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
1861 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1862 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1863 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1864
1865 /* Host port registers */
1866 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1867 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1868 return 0;
1869}
1870
1871static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
1872 struct device_node *node, void **inst_priv)
1873{
1874 struct device_node *interfaces, *interface;
1875 struct device_node *secondary_ports;
1876 struct cpsw_ale_params ale_params;
1877 struct gbe_priv *gbe_dev;
1878 u32 slave_num;
1879 int ret = 0;
1880
1881 if (!node) {
1882 dev_err(dev, "device tree info unavailable\n");
1883 return -ENODEV;
1884 }
1885
1886 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
1887 if (!gbe_dev)
1888 return -ENOMEM;
1889
1890 gbe_dev->dev = dev;
1891 gbe_dev->netcp_device = netcp_device;
1892 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
1893
1894 /* init the hw stats lock */
1895 spin_lock_init(&gbe_dev->hw_stats_lock);
1896
1897 if (of_find_property(node, "enable-ale", NULL)) {
1898 gbe_dev->enable_ale = true;
1899 dev_info(dev, "ALE enabled\n");
1900 } else {
1901 gbe_dev->enable_ale = false;
1902 dev_dbg(dev, "ALE bypass enabled*\n");
1903 }
1904
1905 ret = of_property_read_u32(node, "tx-queue",
1906 &gbe_dev->tx_queue_id);
1907 if (ret < 0) {
1908 dev_err(dev, "missing tx_queue parameter\n");
1909 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
1910 }
1911
1912 ret = of_property_read_string(node, "tx-channel",
1913 &gbe_dev->dma_chan_name);
1914 if (ret < 0) {
1915 dev_err(dev, "missing \"tx-channel\" parameter\n");
1916 ret = -ENODEV;
1917 goto quit;
1918 }
1919
1920 if (!strcmp(node->name, "gbe")) {
1921 ret = get_gbe_resource_version(gbe_dev, node);
1922 if (ret)
1923 goto quit;
1924
1925 ret = set_gbe_ethss14_priv(gbe_dev, node);
1926 if (ret)
1927 goto quit;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001928 } else if (!strcmp(node->name, "xgbe")) {
1929 ret = set_xgbe_ethss10_priv(gbe_dev, node);
1930 if (ret)
1931 goto quit;
1932 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
1933 gbe_dev->ss_regs);
1934 if (ret)
1935 goto quit;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001936 } else {
1937 dev_err(dev, "unknown GBE node(%s)\n", node->name);
1938 ret = -ENODEV;
1939 goto quit;
1940 }
1941
1942 interfaces = of_get_child_by_name(node, "interfaces");
1943 if (!interfaces)
1944 dev_err(dev, "could not find interfaces\n");
1945
1946 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
1947 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
1948 if (ret)
1949 goto quit;
1950
1951 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
1952 if (ret)
1953 goto quit;
1954
1955 /* Create network interfaces */
1956 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
1957 for_each_child_of_node(interfaces, interface) {
1958 ret = of_property_read_u32(interface, "slave-port", &slave_num);
1959 if (ret) {
1960 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
1961 interface->name);
1962 continue;
1963 }
1964 gbe_dev->num_slaves++;
1965 }
1966
1967 if (!gbe_dev->num_slaves)
1968 dev_warn(dev, "No network interface configured\n");
1969
1970 /* Initialize Secondary slave ports */
1971 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
1972 INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
1973 if (secondary_ports)
1974 init_secondary_ports(gbe_dev, secondary_ports);
1975 of_node_put(secondary_ports);
1976
1977 if (!gbe_dev->num_slaves) {
1978 dev_err(dev, "No network interface or secondary ports configured\n");
1979 ret = -ENODEV;
1980 goto quit;
1981 }
1982
1983 memset(&ale_params, 0, sizeof(ale_params));
1984 ale_params.dev = gbe_dev->dev;
1985 ale_params.ale_regs = gbe_dev->ale_reg;
1986 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
1987 ale_params.ale_entries = gbe_dev->ale_entries;
1988 ale_params.ale_ports = gbe_dev->ale_ports;
1989
1990 gbe_dev->ale = cpsw_ale_create(&ale_params);
1991 if (!gbe_dev->ale) {
1992 dev_err(gbe_dev->dev, "error initializing ale engine\n");
1993 ret = -ENODEV;
1994 goto quit;
1995 } else {
1996 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
1997 }
1998
1999 /* initialize host port */
2000 gbe_init_host_port(gbe_dev);
2001
2002 init_timer(&gbe_dev->timer);
2003 gbe_dev->timer.data = (unsigned long)gbe_dev;
2004 gbe_dev->timer.function = netcp_ethss_timer;
2005 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2006 add_timer(&gbe_dev->timer);
2007 *inst_priv = gbe_dev;
2008 return 0;
2009
2010quit:
2011 if (gbe_dev->hw_stats)
2012 devm_kfree(dev, gbe_dev->hw_stats);
2013 if (gbe_dev->ale)
2014 cpsw_ale_destroy(gbe_dev->ale);
2015 if (gbe_dev->ss_regs)
2016 devm_iounmap(dev, gbe_dev->ss_regs);
2017 if (interfaces)
2018 of_node_put(interfaces);
2019 devm_kfree(dev, gbe_dev);
2020 return ret;
2021}
2022
2023static int gbe_attach(void *inst_priv, struct net_device *ndev,
2024 struct device_node *node, void **intf_priv)
2025{
2026 struct gbe_priv *gbe_dev = inst_priv;
2027 struct gbe_intf *gbe_intf;
2028 int ret;
2029
2030 if (!node) {
2031 dev_err(gbe_dev->dev, "interface node not available\n");
2032 return -ENODEV;
2033 }
2034
2035 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2036 if (!gbe_intf)
2037 return -ENOMEM;
2038
2039 gbe_intf->ndev = ndev;
2040 gbe_intf->dev = gbe_dev->dev;
2041 gbe_intf->gbe_dev = gbe_dev;
2042
2043 gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2044 sizeof(*gbe_intf->slave),
2045 GFP_KERNEL);
2046 if (!gbe_intf->slave) {
2047 ret = -ENOMEM;
2048 goto fail;
2049 }
2050
2051 if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2052 ret = -ENODEV;
2053 goto fail;
2054 }
2055
2056 gbe_intf->tx_pipe = gbe_dev->tx_pipe;
2057 ndev->ethtool_ops = &keystone_ethtool_ops;
2058 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
2059 *intf_priv = gbe_intf;
2060 return 0;
2061
2062fail:
2063 if (gbe_intf->slave)
2064 devm_kfree(gbe_dev->dev, gbe_intf->slave);
2065 if (gbe_intf)
2066 devm_kfree(gbe_dev->dev, gbe_intf);
2067 return ret;
2068}
2069
2070static int gbe_release(void *intf_priv)
2071{
2072 struct gbe_intf *gbe_intf = intf_priv;
2073
2074 gbe_intf->ndev->ethtool_ops = NULL;
2075 list_del(&gbe_intf->gbe_intf_list);
2076 devm_kfree(gbe_intf->dev, gbe_intf->slave);
2077 devm_kfree(gbe_intf->dev, gbe_intf);
2078 return 0;
2079}
2080
2081static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
2082{
2083 struct gbe_priv *gbe_dev = inst_priv;
2084
2085 del_timer_sync(&gbe_dev->timer);
2086 cpsw_ale_stop(gbe_dev->ale);
2087 cpsw_ale_destroy(gbe_dev->ale);
2088 netcp_txpipe_close(&gbe_dev->tx_pipe);
2089 free_secondary_ports(gbe_dev);
2090
2091 if (!list_empty(&gbe_dev->gbe_intf_head))
2092 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
2093
2094 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
2095 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
2096 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
2097 devm_kfree(gbe_dev->dev, gbe_dev);
2098 return 0;
2099}
2100
2101static struct netcp_module gbe_module = {
2102 .name = GBE_MODULE_NAME,
2103 .owner = THIS_MODULE,
2104 .primary = true,
2105 .probe = gbe_probe,
2106 .open = gbe_open,
2107 .close = gbe_close,
2108 .remove = gbe_remove,
2109 .attach = gbe_attach,
2110 .release = gbe_release,
2111 .add_addr = gbe_add_addr,
2112 .del_addr = gbe_del_addr,
2113 .add_vid = gbe_add_vid,
2114 .del_vid = gbe_del_vid,
2115 .ioctl = gbe_ioctl,
2116};
2117
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002118static struct netcp_module xgbe_module = {
2119 .name = XGBE_MODULE_NAME,
2120 .owner = THIS_MODULE,
2121 .primary = true,
2122 .probe = gbe_probe,
2123 .open = gbe_open,
2124 .close = gbe_close,
2125 .remove = gbe_remove,
2126 .attach = gbe_attach,
2127 .release = gbe_release,
2128 .add_addr = gbe_add_addr,
2129 .del_addr = gbe_del_addr,
2130 .add_vid = gbe_add_vid,
2131 .del_vid = gbe_del_vid,
2132 .ioctl = gbe_ioctl,
2133};
2134
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002135static int __init keystone_gbe_init(void)
2136{
2137 int ret;
2138
2139 ret = netcp_register_module(&gbe_module);
2140 if (ret)
2141 return ret;
2142
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002143 ret = netcp_register_module(&xgbe_module);
2144 if (ret)
2145 return ret;
2146
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002147 return 0;
2148}
2149module_init(keystone_gbe_init);
2150
2151static void __exit keystone_gbe_exit(void)
2152{
2153 netcp_unregister_module(&gbe_module);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002154 netcp_unregister_module(&xgbe_module);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002155}
2156module_exit(keystone_gbe_exit);