blob: d676090f7900711d93b1784e4fc5b5b5bf2ee7ee [file] [log] [blame]
Arvid Brodin81ba6af2014-07-04 23:35:24 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
2 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
9 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
10 */
11
12#include "hsr_slave.h"
13#include <linux/etherdevice.h>
Arvid Brodin51f3c602014-07-04 23:37:27 +020014#include <linux/if_arp.h>
Arvid Brodin81ba6af2014-07-04 23:35:24 +020015#include "hsr_main.h"
Arvid Brodin51f3c602014-07-04 23:37:27 +020016#include "hsr_device.h"
Arvid Brodin81ba6af2014-07-04 23:35:24 +020017#include "hsr_framereg.h"
18
19
Arvid Brodin51f3c602014-07-04 23:37:27 +020020static int check_slave_ok(struct net_device *dev)
21{
22 /* Don't allow HSR on non-ethernet like devices */
23 if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
24 (dev->addr_len != ETH_ALEN)) {
25 netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
26 return -EINVAL;
27 }
28
29 /* Don't allow enslaving hsr devices */
30 if (is_hsr_master(dev)) {
31 netdev_info(dev, "Cannot create trees of HSR devices.\n");
32 return -EINVAL;
33 }
34
35 if (is_hsr_slave(dev)) {
36 netdev_info(dev, "This device is already a HSR slave.\n");
37 return -EINVAL;
38 }
39
40 if (dev->priv_flags & IFF_802_1Q_VLAN) {
41 netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
42 return -EINVAL;
43 }
44
45 /* HSR over bonded devices has not been tested, but I'm not sure it
46 * won't work...
47 */
48
49 return 0;
50}
51
52
Arvid Brodin81ba6af2014-07-04 23:35:24 +020053static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
54{
55 struct hsr_tag *hsr_tag;
56 struct sk_buff *skb2;
57
58 skb2 = skb_share_check(skb, GFP_ATOMIC);
59 if (unlikely(!skb2))
60 goto err_free;
61 skb = skb2;
62
63 if (unlikely(!pskb_may_pull(skb, HSR_HLEN)))
64 goto err_free;
65
66 hsr_tag = (struct hsr_tag *) skb->data;
67 skb->protocol = hsr_tag->encap_proto;
68 skb_pull(skb, HSR_HLEN);
69
70 return skb;
71
72err_free:
73 kfree_skb(skb);
74 return NULL;
75}
76
77
78/* The uses I can see for these HSR supervision frames are:
79 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
80 * 22") to reset any sequence_nr counters belonging to that node. Useful if
81 * the other node's counter has been reset for some reason.
82 * --
83 * Or not - resetting the counter and bridging the frame would create a
84 * loop, unfortunately.
85 *
86 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
87 * frame is received from a particular node, we know something is wrong.
88 * We just register these (as with normal frames) and throw them away.
89 *
90 * 3) Allow different MAC addresses for the two slave interfaces, using the
91 * MacAddressA field.
92 */
93static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
94{
95 struct hsr_sup_tag *hsr_stag;
96
97 if (!ether_addr_equal(eth_hdr(skb)->h_dest,
98 hsr->sup_multicast_addr))
99 return false;
100
101 hsr_stag = (struct hsr_sup_tag *) skb->data;
102 if (get_hsr_stag_path(hsr_stag) != 0x0f)
103 return false;
104 if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
105 (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
106 return false;
107 if (hsr_stag->HSR_TLV_Length != 12)
108 return false;
109
110 return true;
111}
112
113
114/* Implementation somewhat according to IEC-62439-3, p. 43
115 */
116rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
117{
118 struct sk_buff *skb = *pskb;
119 struct net_device *dev = skb->dev;
120 struct hsr_priv *hsr;
121 struct net_device *other_slave;
122 struct hsr_node *node;
123 bool deliver_to_self;
124 struct sk_buff *skb_deliver;
125 enum hsr_dev_idx dev_in_idx, dev_other_idx;
126 bool dup_out;
127 int ret;
128
129 if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP))
130 return RX_HANDLER_PASS;
131
132 hsr = get_hsr_master(dev);
133 if (!hsr) {
134 WARN_ON_ONCE(1);
135 return RX_HANDLER_PASS;
136 }
137
138 if (dev == hsr->slave[0]) {
139 dev_in_idx = HSR_DEV_SLAVE_A;
140 dev_other_idx = HSR_DEV_SLAVE_B;
141 } else {
142 dev_in_idx = HSR_DEV_SLAVE_B;
143 dev_other_idx = HSR_DEV_SLAVE_A;
144 }
145
146 node = hsr_find_node(&hsr->self_node_db, skb);
147 if (node) {
148 /* Always kill frames sent by ourselves */
149 kfree_skb(skb);
150 return RX_HANDLER_CONSUMED;
151 }
152
153 /* Is this frame a candidate for local reception? */
154 deliver_to_self = false;
155 if ((skb->pkt_type == PACKET_HOST) ||
156 (skb->pkt_type == PACKET_MULTICAST) ||
157 (skb->pkt_type == PACKET_BROADCAST))
158 deliver_to_self = true;
159 else if (ether_addr_equal(eth_hdr(skb)->h_dest, hsr->dev->dev_addr)) {
160 skb->pkt_type = PACKET_HOST;
161 deliver_to_self = true;
162 }
163
164
165 rcu_read_lock(); /* node_db */
166 node = hsr_find_node(&hsr->node_db, skb);
167
168 if (is_supervision_frame(hsr, skb)) {
169 skb_pull(skb, sizeof(struct hsr_sup_tag));
170 node = hsr_merge_node(hsr, node, skb, dev_in_idx);
171 if (!node) {
172 rcu_read_unlock(); /* node_db */
173 kfree_skb(skb);
174 hsr->dev->stats.rx_dropped++;
175 return RX_HANDLER_CONSUMED;
176 }
177 skb_push(skb, sizeof(struct hsr_sup_tag));
178 deliver_to_self = false;
179 }
180
181 if (!node) {
182 /* Source node unknown; this might be a HSR frame from
183 * another net (different multicast address). Ignore it.
184 */
185 rcu_read_unlock(); /* node_db */
186 kfree_skb(skb);
187 return RX_HANDLER_CONSUMED;
188 }
189
190 /* Register ALL incoming frames as outgoing through the other interface.
191 * This allows us to register frames as incoming only if they are valid
192 * for the receiving interface, without using a specific counter for
193 * incoming frames.
194 */
195 dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
196 if (!dup_out)
197 hsr_register_frame_in(node, dev_in_idx);
198
199 /* Forward this frame? */
200 if (!dup_out && (skb->pkt_type != PACKET_HOST))
201 other_slave = get_other_slave(hsr, dev);
202 else
203 other_slave = NULL;
204
205 if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
206 deliver_to_self = false;
207
208 rcu_read_unlock(); /* node_db */
209
210 if (!deliver_to_self && !other_slave) {
211 kfree_skb(skb);
212 /* Circulated frame; silently remove it. */
213 return RX_HANDLER_CONSUMED;
214 }
215
216 skb_deliver = skb;
217 if (deliver_to_self && other_slave) {
218 /* skb_clone() is not enough since we will strip the hsr tag
219 * and do address substitution below
220 */
221 skb_deliver = pskb_copy(skb, GFP_ATOMIC);
222 if (!skb_deliver) {
223 deliver_to_self = false;
224 hsr->dev->stats.rx_dropped++;
225 }
226 }
227
228 if (deliver_to_self) {
229 bool multicast_frame;
230
231 skb_deliver = hsr_pull_tag(skb_deliver);
232 if (!skb_deliver) {
233 hsr->dev->stats.rx_dropped++;
234 goto forward;
235 }
236#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
237 /* Move everything in the header that is after the HSR tag,
238 * to work around alignment problems caused by the 6-byte HSR
239 * tag. In practice, this removes/overwrites the HSR tag in
240 * the header and restores a "standard" packet.
241 */
242 memmove(skb_deliver->data - HSR_HLEN, skb_deliver->data,
243 skb_headlen(skb_deliver));
244
245 /* Adjust skb members so they correspond with the move above.
246 * This cannot possibly underflow skb->data since hsr_pull_tag()
247 * above succeeded.
248 * At this point in the protocol stack, the transport and
249 * network headers have not been set yet, and we haven't touched
250 * the mac header nor the head. So we only need to adjust data
251 * and tail:
252 */
253 skb_deliver->data -= HSR_HLEN;
254 skb_deliver->tail -= HSR_HLEN;
255#endif
256 skb_deliver->dev = hsr->dev;
257 hsr_addr_subst_source(hsr, skb_deliver);
258 multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
259 ret = netif_rx(skb_deliver);
260 if (ret == NET_RX_DROP) {
261 hsr->dev->stats.rx_dropped++;
262 } else {
263 hsr->dev->stats.rx_packets++;
264 hsr->dev->stats.rx_bytes += skb->len;
265 if (multicast_frame)
266 hsr->dev->stats.multicast++;
267 }
268 }
269
270forward:
271 if (other_slave) {
272 skb_push(skb, ETH_HLEN);
273 skb->dev = other_slave;
274 dev_queue_xmit(skb);
275 }
276
277 return RX_HANDLER_CONSUMED;
278}
Arvid Brodin51f3c602014-07-04 23:37:27 +0200279
280int hsr_add_slave(struct hsr_priv *hsr, struct net_device *dev, int idx)
281{
282 int res;
283
284 dev_hold(dev);
285
286 res = check_slave_ok(dev);
287 if (res)
288 goto fail;
289
290 res = dev_set_promiscuity(dev, 1);
291 if (res)
292 goto fail;
293
294 res = netdev_rx_handler_register(dev, hsr_handle_frame, hsr);
295 if (res)
296 goto fail_rx_handler;
297
298
299 hsr->slave[idx] = dev;
300
301 /* Set required header length */
302 if (dev->hard_header_len + HSR_HLEN > hsr->dev->hard_header_len)
303 hsr->dev->hard_header_len = dev->hard_header_len + HSR_HLEN;
304
305 dev_set_mtu(hsr->dev, hsr_get_max_mtu(hsr));
306
307 return 0;
308
309fail_rx_handler:
310 dev_set_promiscuity(dev, -1);
311
312fail:
313 dev_put(dev);
314 return res;
315}
316
317void hsr_del_slave(struct hsr_priv *hsr, int idx)
318{
319 struct net_device *slave;
320
321 slave = hsr->slave[idx];
322 hsr->slave[idx] = NULL;
323
324 netdev_update_features(hsr->dev);
325 dev_set_mtu(hsr->dev, hsr_get_max_mtu(hsr));
326
327 if (slave) {
328 netdev_rx_handler_unregister(slave);
329 dev_set_promiscuity(slave, -1);
330 }
331
332 synchronize_rcu();
333 dev_put(slave);
334}