blob: 99b8fc4eca6c126809ee1fda49900177f6d8917b [file] [log] [blame]
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01002 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02009 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +010010 *
11 * In addition to routines for registering and unregistering HSR support, this
12 * file also contains the receive routine that handles all incoming frames with
13 * Ethertype (protocol) ETH_P_PRP (HSRv0), and network device event handling.
14 */
15
16#include <linux/netdevice.h>
17#include <linux/rculist.h>
18#include <linux/timer.h>
19#include <linux/etherdevice.h>
20#include "hsr_main.h"
21#include "hsr_device.h"
22#include "hsr_netlink.h"
23#include "hsr_framereg.h"
24
25
26/* List of all registered virtual HSR devices */
27static LIST_HEAD(hsr_list);
28
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020029void register_hsr_master(struct hsr_priv *hsr)
Arvid Brodinf4214362013-10-30 21:10:47 +010030{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020031 list_add_tail_rcu(&hsr->hsr_list, &hsr_list);
Arvid Brodinf4214362013-10-30 21:10:47 +010032}
33
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020034void unregister_hsr_master(struct hsr_priv *hsr)
Arvid Brodinf4214362013-10-30 21:10:47 +010035{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020036 struct hsr_priv *hsr_it;
Arvid Brodinf4214362013-10-30 21:10:47 +010037
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020038 list_for_each_entry(hsr_it, &hsr_list, hsr_list)
39 if (hsr_it == hsr) {
40 list_del_rcu(&hsr_it->hsr_list);
Arvid Brodinf4214362013-10-30 21:10:47 +010041 return;
42 }
43}
44
45bool is_hsr_slave(struct net_device *dev)
46{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020047 struct hsr_priv *hsr_it;
Arvid Brodinf4214362013-10-30 21:10:47 +010048
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020049 list_for_each_entry_rcu(hsr_it, &hsr_list, hsr_list) {
50 if (dev == hsr_it->slave[0])
Arvid Brodinf4214362013-10-30 21:10:47 +010051 return true;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020052 if (dev == hsr_it->slave[1])
Arvid Brodinf4214362013-10-30 21:10:47 +010053 return true;
54 }
55
56 return false;
57}
58
59
60/* If dev is a HSR slave device, return the virtual master device. Return NULL
61 * otherwise.
62 */
63static struct hsr_priv *get_hsr_master(struct net_device *dev)
64{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020065 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +010066
67 rcu_read_lock();
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020068 list_for_each_entry_rcu(hsr, &hsr_list, hsr_list)
69 if ((dev == hsr->slave[0]) ||
70 (dev == hsr->slave[1])) {
Arvid Brodinf4214362013-10-30 21:10:47 +010071 rcu_read_unlock();
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020072 return hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +010073 }
74
75 rcu_read_unlock();
76 return NULL;
77}
78
79
80/* If dev is a HSR slave device, return the other slave device. Return NULL
81 * otherwise.
82 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020083static struct net_device *get_other_slave(struct hsr_priv *hsr,
Arvid Brodinf4214362013-10-30 21:10:47 +010084 struct net_device *dev)
85{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020086 if (dev == hsr->slave[0])
87 return hsr->slave[1];
88 if (dev == hsr->slave[1])
89 return hsr->slave[0];
Arvid Brodinf4214362013-10-30 21:10:47 +010090
91 return NULL;
92}
93
94
95static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
96 void *ptr)
97{
98 struct net_device *slave, *other_slave;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020099 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100100 int old_operstate;
101 int mtu_max;
102 int res;
103 struct net_device *dev;
104
105 dev = netdev_notifier_info_to_dev(ptr);
106
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200107 hsr = get_hsr_master(dev);
108 if (hsr) {
Arvid Brodinf4214362013-10-30 21:10:47 +0100109 /* dev is a slave device */
110 slave = dev;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200111 other_slave = get_other_slave(hsr, slave);
Arvid Brodinf4214362013-10-30 21:10:47 +0100112 } else {
113 if (!is_hsr_master(dev))
114 return NOTIFY_DONE;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200115 hsr = netdev_priv(dev);
116 slave = hsr->slave[0];
117 other_slave = hsr->slave[1];
Arvid Brodinf4214362013-10-30 21:10:47 +0100118 }
119
120 switch (event) {
121 case NETDEV_UP: /* Administrative state DOWN */
122 case NETDEV_DOWN: /* Administrative state UP */
123 case NETDEV_CHANGE: /* Link (carrier) state changes */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200124 old_operstate = hsr->dev->operstate;
125 hsr_set_carrier(hsr->dev, slave, other_slave);
Arvid Brodinf4214362013-10-30 21:10:47 +0100126 /* netif_stacked_transfer_operstate() cannot be used here since
127 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
128 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200129 hsr_set_operstate(hsr->dev, slave, other_slave);
130 hsr_check_announce(hsr->dev, old_operstate);
Arvid Brodinf4214362013-10-30 21:10:47 +0100131 break;
132 case NETDEV_CHANGEADDR:
133
134 /* This should not happen since there's no ndo_set_mac_address()
135 * for HSR devices - i.e. not supported.
136 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200137 if (dev == hsr->dev)
Arvid Brodinf4214362013-10-30 21:10:47 +0100138 break;
139
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200140 if (dev == hsr->slave[0])
141 ether_addr_copy(hsr->dev->dev_addr,
142 hsr->slave[0]->dev_addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100143
144 /* Make sure we recognize frames from ourselves in hsr_rcv() */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200145 res = hsr_create_self_node(&hsr->self_node_db,
146 hsr->dev->dev_addr,
147 hsr->slave[1] ?
148 hsr->slave[1]->dev_addr :
149 hsr->dev->dev_addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100150 if (res)
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200151 netdev_warn(hsr->dev,
Arvid Brodinf4214362013-10-30 21:10:47 +0100152 "Could not update HSR node address.\n");
153
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200154 if (dev == hsr->slave[0])
155 call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr->dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100156 break;
157 case NETDEV_CHANGEMTU:
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200158 if (dev == hsr->dev)
Arvid Brodinf4214362013-10-30 21:10:47 +0100159 break; /* Handled in ndo_change_mtu() */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200160 mtu_max = hsr_get_max_mtu(hsr);
161 if (hsr->dev->mtu > mtu_max)
162 dev_set_mtu(hsr->dev, mtu_max);
Arvid Brodinf4214362013-10-30 21:10:47 +0100163 break;
164 case NETDEV_UNREGISTER:
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200165 if (dev == hsr->slave[0])
166 hsr->slave[0] = NULL;
167 if (dev == hsr->slave[1])
168 hsr->slave[1] = NULL;
Arvid Brodinf4214362013-10-30 21:10:47 +0100169
170 /* There should really be a way to set a new slave device... */
171
172 break;
173 case NETDEV_PRE_TYPE_CHANGE:
174 /* HSR works only on Ethernet devices. Refuse slave to change
175 * its type.
176 */
177 return NOTIFY_BAD;
178 }
179
180 return NOTIFY_DONE;
181}
182
183
184static struct timer_list prune_timer;
185
186static void prune_nodes_all(unsigned long data)
187{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200188 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100189
190 rcu_read_lock();
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200191 list_for_each_entry_rcu(hsr, &hsr_list, hsr_list)
192 hsr_prune_nodes(hsr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100193 rcu_read_unlock();
194
195 prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
196 add_timer(&prune_timer);
197}
198
199
200static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
201{
202 struct hsr_tag *hsr_tag;
203 struct sk_buff *skb2;
204
205 skb2 = skb_share_check(skb, GFP_ATOMIC);
206 if (unlikely(!skb2))
207 goto err_free;
208 skb = skb2;
209
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200210 if (unlikely(!pskb_may_pull(skb, HSR_HLEN)))
Arvid Brodinf4214362013-10-30 21:10:47 +0100211 goto err_free;
212
213 hsr_tag = (struct hsr_tag *) skb->data;
214 skb->protocol = hsr_tag->encap_proto;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200215 skb_pull(skb, HSR_HLEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100216
217 return skb;
218
219err_free:
220 kfree_skb(skb);
221 return NULL;
222}
223
224
225/* The uses I can see for these HSR supervision frames are:
226 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
227 * 22") to reset any sequence_nr counters belonging to that node. Useful if
228 * the other node's counter has been reset for some reason.
229 * --
230 * Or not - resetting the counter and bridging the frame would create a
231 * loop, unfortunately.
232 *
233 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
234 * frame is received from a particular node, we know something is wrong.
235 * We just register these (as with normal frames) and throw them away.
236 *
237 * 3) Allow different MAC addresses for the two slave interfaces, using the
238 * MacAddressA field.
239 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200240static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
Arvid Brodinf4214362013-10-30 21:10:47 +0100241{
242 struct hsr_sup_tag *hsr_stag;
243
244 if (!ether_addr_equal(eth_hdr(skb)->h_dest,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200245 hsr->sup_multicast_addr))
Arvid Brodinf4214362013-10-30 21:10:47 +0100246 return false;
247
248 hsr_stag = (struct hsr_sup_tag *) skb->data;
249 if (get_hsr_stag_path(hsr_stag) != 0x0f)
250 return false;
251 if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
252 (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
253 return false;
254 if (hsr_stag->HSR_TLV_Length != 12)
255 return false;
256
257 return true;
258}
259
260
261/* Implementation somewhat according to IEC-62439-3, p. 43
262 */
263static int hsr_rcv(struct sk_buff *skb, struct net_device *dev,
264 struct packet_type *pt, struct net_device *orig_dev)
265{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200266 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100267 struct net_device *other_slave;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200268 struct hsr_node *node;
Arvid Brodinf4214362013-10-30 21:10:47 +0100269 bool deliver_to_self;
270 struct sk_buff *skb_deliver;
271 enum hsr_dev_idx dev_in_idx, dev_other_idx;
272 bool dup_out;
273 int ret;
274
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200275 hsr = get_hsr_master(dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100276
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200277 if (!hsr) {
Arvid Brodinf4214362013-10-30 21:10:47 +0100278 /* Non-HSR-slave device 'dev' is connected to a HSR network */
279 kfree_skb(skb);
280 dev->stats.rx_errors++;
281 return NET_RX_SUCCESS;
282 }
283
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200284 if (dev == hsr->slave[0]) {
Arvid Brodinf4214362013-10-30 21:10:47 +0100285 dev_in_idx = HSR_DEV_SLAVE_A;
286 dev_other_idx = HSR_DEV_SLAVE_B;
287 } else {
288 dev_in_idx = HSR_DEV_SLAVE_B;
289 dev_other_idx = HSR_DEV_SLAVE_A;
290 }
291
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200292 node = hsr_find_node(&hsr->self_node_db, skb);
Arvid Brodinf4214362013-10-30 21:10:47 +0100293 if (node) {
294 /* Always kill frames sent by ourselves */
295 kfree_skb(skb);
296 return NET_RX_SUCCESS;
297 }
298
299 /* Is this frame a candidate for local reception? */
300 deliver_to_self = false;
301 if ((skb->pkt_type == PACKET_HOST) ||
302 (skb->pkt_type == PACKET_MULTICAST) ||
303 (skb->pkt_type == PACKET_BROADCAST))
304 deliver_to_self = true;
305 else if (ether_addr_equal(eth_hdr(skb)->h_dest,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200306 hsr->dev->dev_addr)) {
Arvid Brodinf4214362013-10-30 21:10:47 +0100307 skb->pkt_type = PACKET_HOST;
308 deliver_to_self = true;
309 }
310
311
312 rcu_read_lock(); /* node_db */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200313 node = hsr_find_node(&hsr->node_db, skb);
Arvid Brodinf4214362013-10-30 21:10:47 +0100314
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200315 if (is_supervision_frame(hsr, skb)) {
Arvid Brodinf4214362013-10-30 21:10:47 +0100316 skb_pull(skb, sizeof(struct hsr_sup_tag));
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200317 node = hsr_merge_node(hsr, node, skb, dev_in_idx);
Arvid Brodinf4214362013-10-30 21:10:47 +0100318 if (!node) {
319 rcu_read_unlock(); /* node_db */
320 kfree_skb(skb);
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200321 hsr->dev->stats.rx_dropped++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100322 return NET_RX_DROP;
323 }
324 skb_push(skb, sizeof(struct hsr_sup_tag));
325 deliver_to_self = false;
326 }
327
328 if (!node) {
329 /* Source node unknown; this might be a HSR frame from
330 * another net (different multicast address). Ignore it.
331 */
332 rcu_read_unlock(); /* node_db */
333 kfree_skb(skb);
334 return NET_RX_SUCCESS;
335 }
336
337 /* Register ALL incoming frames as outgoing through the other interface.
338 * This allows us to register frames as incoming only if they are valid
339 * for the receiving interface, without using a specific counter for
340 * incoming frames.
341 */
342 dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
343 if (!dup_out)
344 hsr_register_frame_in(node, dev_in_idx);
345
346 /* Forward this frame? */
347 if (!dup_out && (skb->pkt_type != PACKET_HOST))
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200348 other_slave = get_other_slave(hsr, dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100349 else
350 other_slave = NULL;
351
352 if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
353 deliver_to_self = false;
354
355 rcu_read_unlock(); /* node_db */
356
357 if (!deliver_to_self && !other_slave) {
358 kfree_skb(skb);
359 /* Circulated frame; silently remove it. */
360 return NET_RX_SUCCESS;
361 }
362
363 skb_deliver = skb;
364 if (deliver_to_self && other_slave) {
365 /* skb_clone() is not enough since we will strip the hsr tag
366 * and do address substitution below
367 */
368 skb_deliver = pskb_copy(skb, GFP_ATOMIC);
369 if (!skb_deliver) {
370 deliver_to_self = false;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200371 hsr->dev->stats.rx_dropped++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100372 }
373 }
374
375 if (deliver_to_self) {
376 bool multicast_frame;
377
378 skb_deliver = hsr_pull_tag(skb_deliver);
379 if (!skb_deliver) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200380 hsr->dev->stats.rx_dropped++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100381 goto forward;
382 }
383#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
384 /* Move everything in the header that is after the HSR tag,
385 * to work around alignment problems caused by the 6-byte HSR
386 * tag. In practice, this removes/overwrites the HSR tag in
387 * the header and restores a "standard" packet.
388 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200389 memmove(skb_deliver->data - HSR_HLEN, skb_deliver->data,
Arvid Brodinf4214362013-10-30 21:10:47 +0100390 skb_headlen(skb_deliver));
391
392 /* Adjust skb members so they correspond with the move above.
393 * This cannot possibly underflow skb->data since hsr_pull_tag()
394 * above succeeded.
395 * At this point in the protocol stack, the transport and
396 * network headers have not been set yet, and we haven't touched
397 * the mac header nor the head. So we only need to adjust data
398 * and tail:
399 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200400 skb_deliver->data -= HSR_HLEN;
401 skb_deliver->tail -= HSR_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100402#endif
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200403 skb_deliver->dev = hsr->dev;
404 hsr_addr_subst_source(hsr, skb_deliver);
Arvid Brodinf4214362013-10-30 21:10:47 +0100405 multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
406 ret = netif_rx(skb_deliver);
407 if (ret == NET_RX_DROP) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200408 hsr->dev->stats.rx_dropped++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100409 } else {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200410 hsr->dev->stats.rx_packets++;
411 hsr->dev->stats.rx_bytes += skb->len;
Arvid Brodinf4214362013-10-30 21:10:47 +0100412 if (multicast_frame)
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200413 hsr->dev->stats.multicast++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100414 }
415 }
416
417forward:
418 if (other_slave) {
419 skb_push(skb, ETH_HLEN);
420 skb->dev = other_slave;
421 dev_queue_xmit(skb);
422 }
423
424 return NET_RX_SUCCESS;
425}
426
427
428static struct packet_type hsr_pt __read_mostly = {
429 .type = htons(ETH_P_PRP),
430 .func = hsr_rcv,
431};
432
433static struct notifier_block hsr_nb = {
434 .notifier_call = hsr_netdev_notify, /* Slave event notifications */
435};
436
437
438static int __init hsr_init(void)
439{
440 int res;
441
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200442 BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100443
444 dev_add_pack(&hsr_pt);
445
446 init_timer(&prune_timer);
447 prune_timer.function = prune_nodes_all;
448 prune_timer.data = 0;
449 prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
450 add_timer(&prune_timer);
451
452 register_netdevice_notifier(&hsr_nb);
453
454 res = hsr_netlink_init();
455
456 return res;
457}
458
459static void __exit hsr_exit(void)
460{
461 unregister_netdevice_notifier(&hsr_nb);
Julia Lawall02f2d5a2014-03-26 22:33:44 +0100462 del_timer_sync(&prune_timer);
Arvid Brodinf4214362013-10-30 21:10:47 +0100463 hsr_netlink_exit();
464 dev_remove_pack(&hsr_pt);
465}
466
467module_init(hsr_init);
468module_exit(hsr_exit);
469MODULE_LICENSE("GPL");