blob: 921a36fd139da46e1a0600d8a7acd56326f79cf4 [file] [log] [blame]
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001/*
2 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/list.h>
16#include <linux/slab.h>
17#include <linux/rtnetlink.h>
18#include <net/dsa.h>
19#include <linux/of.h>
20#include <linux/of_net.h>
21#include "dsa_priv.h"
22
23static LIST_HEAD(dsa_switch_trees);
24static DEFINE_MUTEX(dsa2_mutex);
25
26static struct dsa_switch_tree *dsa_get_dst(u32 tree)
27{
28 struct dsa_switch_tree *dst;
29
30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree)
32 return dst;
33 return NULL;
34}
35
36static void dsa_free_dst(struct kref *ref)
37{
38 struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
39 refcount);
40
41 list_del(&dst->list);
42 kfree(dst);
43}
44
45static void dsa_put_dst(struct dsa_switch_tree *dst)
46{
47 kref_put(&dst->refcount, dsa_free_dst);
48}
49
50static struct dsa_switch_tree *dsa_add_dst(u32 tree)
51{
52 struct dsa_switch_tree *dst;
53
54 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
55 if (!dst)
56 return NULL;
57 dst->tree = tree;
58 dst->cpu_switch = -1;
59 INIT_LIST_HEAD(&dst->list);
60 list_add_tail(&dsa_switch_trees, &dst->list);
61 kref_init(&dst->refcount);
62
63 return dst;
64}
65
66static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
67 struct dsa_switch *ds, u32 index)
68{
69 kref_get(&dst->refcount);
70 dst->ds[index] = ds;
71}
72
73static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
74 struct dsa_switch *ds, u32 index)
75{
76 dst->ds[index] = NULL;
77 kref_put(&dst->refcount, dsa_free_dst);
78}
79
80static bool dsa_port_is_dsa(struct device_node *port)
81{
82 const char *name;
83
84 name = of_get_property(port, "label", NULL);
85 if (!name)
86 return false;
87
88 if (!strcmp(name, "dsa"))
89 return true;
90
91 return false;
92}
93
94static bool dsa_port_is_cpu(struct device_node *port)
95{
96 const char *name;
97
98 name = of_get_property(port, "label", NULL);
99 if (!name)
100 return false;
101
102 if (!strcmp(name, "cpu"))
103 return true;
104
105 return false;
106}
107
108static bool dsa_ds_find_port(struct dsa_switch *ds,
109 struct device_node *port)
110{
111 u32 index;
112
113 for (index = 0; index < DSA_MAX_PORTS; index++)
114 if (ds->ports[index].dn == port)
115 return true;
116 return false;
117}
118
119static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
120 struct device_node *port)
121{
122 struct dsa_switch *ds;
123 u32 index;
124
125 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
126 ds = dst->ds[index];
127 if (!ds)
128 continue;
129
130 if (dsa_ds_find_port(ds, port))
131 return ds;
132 }
133
134 return NULL;
135}
136
137static int dsa_port_complete(struct dsa_switch_tree *dst,
138 struct dsa_switch *src_ds,
139 struct device_node *port,
140 u32 src_port)
141{
142 struct device_node *link;
143 int index;
144 struct dsa_switch *dst_ds;
145
146 for (index = 0;; index++) {
147 link = of_parse_phandle(port, "link", index);
148 if (!link)
149 break;
150
151 dst_ds = dsa_dst_find_port(dst, link);
152 of_node_put(link);
153
154 if (!dst_ds)
155 return 1;
156
157 src_ds->rtable[dst_ds->index] = src_port;
158 }
159
160 return 0;
161}
162
163/* A switch is complete if all the DSA ports phandles point to ports
164 * known in the tree. A return value of 1 means the tree is not
165 * complete. This is not an error condition. A value of 0 is
166 * success.
167 */
168static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
169{
170 struct device_node *port;
171 u32 index;
172 int err;
173
174 for (index = 0; index < DSA_MAX_PORTS; index++) {
175 port = ds->ports[index].dn;
176 if (!port)
177 continue;
178
179 if (!dsa_port_is_dsa(port))
180 continue;
181
182 err = dsa_port_complete(dst, ds, port, index);
183 if (err != 0)
184 return err;
185
186 ds->dsa_port_mask |= BIT(index);
187 }
188
189 return 0;
190}
191
192/* A tree is complete if all the DSA ports phandles point to ports
193 * known in the tree. A return value of 1 means the tree is not
194 * complete. This is not an error condition. A value of 0 is
195 * success.
196 */
197static int dsa_dst_complete(struct dsa_switch_tree *dst)
198{
199 struct dsa_switch *ds;
200 u32 index;
201 int err;
202
203 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
204 ds = dst->ds[index];
205 if (!ds)
206 continue;
207
208 err = dsa_ds_complete(dst, ds);
209 if (err != 0)
210 return err;
211 }
212
213 return 0;
214}
215
216static int dsa_dsa_port_apply(struct device_node *port, u32 index,
217 struct dsa_switch *ds)
218{
219 int err;
220
221 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
222 if (err) {
223 dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
224 index, err);
225 return err;
226 }
227
228 return 0;
229}
230
231static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
232 struct dsa_switch *ds)
233{
234 dsa_cpu_dsa_destroy(port);
235}
236
237static int dsa_cpu_port_apply(struct device_node *port, u32 index,
238 struct dsa_switch *ds)
239{
240 int err;
241
242 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
243 if (err) {
244 dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
245 index, err);
246 return err;
247 }
248
249 ds->cpu_port_mask |= BIT(index);
250
251 return 0;
252}
253
254static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
255 struct dsa_switch *ds)
256{
257 dsa_cpu_dsa_destroy(port);
258 ds->cpu_port_mask &= ~BIT(index);
259
260}
261
262static int dsa_user_port_apply(struct device_node *port, u32 index,
263 struct dsa_switch *ds)
264{
265 const char *name;
266 int err;
267
268 name = of_get_property(port, "label", NULL);
269
270 err = dsa_slave_create(ds, ds->dev, index, name);
271 if (err) {
272 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
273 index, err);
274 return err;
275 }
276
277 return 0;
278}
279
280static void dsa_user_port_unapply(struct device_node *port, u32 index,
281 struct dsa_switch *ds)
282{
283 if (ds->ports[index].netdev) {
284 dsa_slave_destroy(ds->ports[index].netdev);
285 ds->ports[index].netdev = NULL;
Florian Fainelli6e830d82016-06-07 16:32:39 -0700286 ds->enabled_port_mask &= ~(1 << index);
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200287 }
288}
289
290static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
291{
292 struct device_node *port;
293 u32 index;
294 int err;
295
Florian Fainelli6e830d82016-06-07 16:32:39 -0700296 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
297 * driver and before drv->setup() has run, since the switch drivers and
298 * the slave MDIO bus driver rely on these values for probing PHY
299 * devices or not
300 */
301 ds->phys_mii_mask = ds->enabled_port_mask;
302
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200303 err = ds->drv->setup(ds);
304 if (err < 0)
305 return err;
306
307 err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
308 if (err < 0)
309 return err;
310
311 err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
312 if (err < 0)
313 return err;
314
315 for (index = 0; index < DSA_MAX_PORTS; index++) {
316 port = ds->ports[index].dn;
317 if (!port)
318 continue;
319
320 if (dsa_port_is_dsa(port)) {
321 err = dsa_dsa_port_apply(port, index, ds);
322 if (err)
323 return err;
324 continue;
325 }
326
327 if (dsa_port_is_cpu(port)) {
328 err = dsa_cpu_port_apply(port, index, ds);
329 if (err)
330 return err;
331 continue;
332 }
333
334 err = dsa_user_port_apply(port, index, ds);
335 if (err)
336 continue;
337 }
338
339 return 0;
340}
341
342static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
343{
344 struct device_node *port;
345 u32 index;
346
347 for (index = 0; index < DSA_MAX_PORTS; index++) {
348 port = ds->ports[index].dn;
349 if (!port)
350 continue;
351
352 if (dsa_port_is_dsa(port)) {
353 dsa_dsa_port_unapply(port, index, ds);
354 continue;
355 }
356
357 if (dsa_port_is_cpu(port)) {
358 dsa_cpu_port_unapply(port, index, ds);
359 continue;
360 }
361
362 dsa_user_port_unapply(port, index, ds);
363 }
364}
365
366static int dsa_dst_apply(struct dsa_switch_tree *dst)
367{
368 struct dsa_switch *ds;
369 u32 index;
370 int err;
371
372 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
373 ds = dst->ds[index];
374 if (!ds)
375 continue;
376
377 err = dsa_ds_apply(dst, ds);
378 if (err)
379 return err;
380 }
381
382 /* If we use a tagging format that doesn't have an ethertype
383 * field, make sure that all packets from this point on get
384 * sent to the tag format's receive function.
385 */
386 wmb();
387 dst->master_netdev->dsa_ptr = (void *)dst;
388 dst->applied = true;
389
390 return 0;
391}
392
393static void dsa_dst_unapply(struct dsa_switch_tree *dst)
394{
395 struct dsa_switch *ds;
396 u32 index;
397
398 if (!dst->applied)
399 return;
400
401 dst->master_netdev->dsa_ptr = NULL;
402
403 /* If we used a tagging format that doesn't have an ethertype
404 * field, make sure that all packets from this point get sent
405 * without the tag and go through the regular receive path.
406 */
407 wmb();
408
409 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
410 ds = dst->ds[index];
411 if (!ds)
412 continue;
413
414 dsa_ds_unapply(dst, ds);
415 }
416
417 pr_info("DSA: tree %d unapplied\n", dst->tree);
418 dst->applied = false;
419}
420
421static int dsa_cpu_parse(struct device_node *port, u32 index,
422 struct dsa_switch_tree *dst,
423 struct dsa_switch *ds)
424{
425 struct net_device *ethernet_dev;
426 struct device_node *ethernet;
427
428 ethernet = of_parse_phandle(port, "ethernet", 0);
429 if (!ethernet)
430 return -EINVAL;
431
432 ethernet_dev = of_find_net_device_by_node(ethernet);
433 if (!ethernet_dev)
434 return -EPROBE_DEFER;
435
436 if (!ds->master_netdev)
437 ds->master_netdev = ethernet_dev;
438
439 if (!dst->master_netdev)
440 dst->master_netdev = ethernet_dev;
441
442 if (dst->cpu_switch == -1) {
443 dst->cpu_switch = ds->index;
444 dst->cpu_port = index;
445 }
446
447 dst->tag_ops = dsa_resolve_tag_protocol(ds->drv->tag_protocol);
448 if (IS_ERR(dst->tag_ops)) {
449 dev_warn(ds->dev, "No tagger for this switch\n");
450 return PTR_ERR(dst->tag_ops);
451 }
452
453 dst->rcv = dst->tag_ops->rcv;
454
455 return 0;
456}
457
458static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
459{
460 struct device_node *port;
461 u32 index;
462 int err;
463
464 for (index = 0; index < DSA_MAX_PORTS; index++) {
465 port = ds->ports[index].dn;
466 if (!port)
467 continue;
468
469 if (dsa_port_is_cpu(port)) {
470 err = dsa_cpu_parse(port, index, dst, ds);
471 if (err)
472 return err;
473 }
474 }
475
476 pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
477
478 return 0;
479}
480
481static int dsa_dst_parse(struct dsa_switch_tree *dst)
482{
483 struct dsa_switch *ds;
484 u32 index;
485 int err;
486
487 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
488 ds = dst->ds[index];
489 if (!ds)
490 continue;
491
492 err = dsa_ds_parse(dst, ds);
493 if (err)
494 return err;
495 }
496
497 if (!dst->master_netdev) {
498 pr_warn("Tree has no master device\n");
499 return -EINVAL;
500 }
501
502 pr_info("DSA: tree %d parsed\n", dst->tree);
503
504 return 0;
505}
506
507static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
508{
509 struct device_node *port;
510 int err;
511 u32 reg;
512
513 for_each_available_child_of_node(ports, port) {
514 err = of_property_read_u32(port, "reg", &reg);
515 if (err)
516 return err;
517
518 if (reg >= DSA_MAX_PORTS)
519 return -EINVAL;
520
521 ds->ports[reg].dn = port;
Florian Fainelli6e830d82016-06-07 16:32:39 -0700522
523 /* Initialize enabled_port_mask now for drv->setup()
524 * to have access to a correct value, just like what
525 * net/dsa/dsa.c::dsa_switch_setup_one does.
526 */
527 if (!dsa_port_is_cpu(port))
528 ds->enabled_port_mask |= 1 << reg;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200529 }
530
531 return 0;
532}
533
534static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
535{
536 int err;
537
538 *tree = *index = 0;
539
540 err = of_property_read_u32_index(np, "dsa,member", 0, tree);
541 if (err) {
542 /* Does not exist, but it is optional */
543 if (err == -EINVAL)
544 return 0;
545 return err;
546 }
547
548 err = of_property_read_u32_index(np, "dsa,member", 1, index);
549 if (err)
550 return err;
551
552 if (*index >= DSA_MAX_SWITCHES)
553 return -EINVAL;
554
555 return 0;
556}
557
558static struct device_node *dsa_get_ports(struct dsa_switch *ds,
559 struct device_node *np)
560{
561 struct device_node *ports;
562
563 ports = of_get_child_by_name(np, "ports");
564 if (!ports) {
565 dev_err(ds->dev, "no ports child node found\n");
566 return ERR_PTR(-EINVAL);
567 }
568
569 return ports;
570}
571
572static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
573{
574 struct device_node *ports = dsa_get_ports(ds, np);
575 struct dsa_switch_tree *dst;
576 u32 tree, index;
577 int err;
578
579 err = dsa_parse_member(np, &tree, &index);
580 if (err)
581 return err;
582
583 if (IS_ERR(ports))
584 return PTR_ERR(ports);
585
586 err = dsa_parse_ports_dn(ports, ds);
587 if (err)
588 return err;
589
590 dst = dsa_get_dst(tree);
591 if (!dst) {
592 dst = dsa_add_dst(tree);
593 if (!dst)
594 return -ENOMEM;
595 }
596
597 if (dst->ds[index]) {
598 err = -EBUSY;
599 goto out;
600 }
601
602 ds->dst = dst;
603 ds->index = index;
604 dsa_dst_add_ds(dst, ds, index);
605
606 err = dsa_dst_complete(dst);
607 if (err < 0)
608 goto out_del_dst;
609
610 if (err == 1) {
611 /* Not all switches registered yet */
612 err = 0;
613 goto out;
614 }
615
616 if (dst->applied) {
617 pr_info("DSA: Disjoint trees?\n");
618 return -EINVAL;
619 }
620
621 err = dsa_dst_parse(dst);
622 if (err)
623 goto out_del_dst;
624
625 err = dsa_dst_apply(dst);
626 if (err) {
627 dsa_dst_unapply(dst);
628 goto out_del_dst;
629 }
630
631 dsa_put_dst(dst);
632 return 0;
633
634out_del_dst:
635 dsa_dst_del_ds(dst, ds, ds->index);
636out:
637 dsa_put_dst(dst);
638
639 return err;
640}
641
642int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
643{
644 int err;
645
646 mutex_lock(&dsa2_mutex);
647 err = _dsa_register_switch(ds, np);
648 mutex_unlock(&dsa2_mutex);
649
650 return err;
651}
652EXPORT_SYMBOL_GPL(dsa_register_switch);
653
654void _dsa_unregister_switch(struct dsa_switch *ds)
655{
656 struct dsa_switch_tree *dst = ds->dst;
657
658 dsa_dst_unapply(dst);
659
660 dsa_dst_del_ds(dst, ds, ds->index);
661}
662
663void dsa_unregister_switch(struct dsa_switch *ds)
664{
665 mutex_lock(&dsa2_mutex);
666 _dsa_unregister_switch(ds);
667 mutex_unlock(&dsa2_mutex);
668}
669EXPORT_SYMBOL_GPL(dsa_unregister_switch);