blob: 702814631ee11396b053227196786cf4e427766d [file] [log] [blame]
Arvid Brodin81ba6af2014-07-04 23:35:24 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
2 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
9 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
10 */
11
12#include "hsr_slave.h"
13#include <linux/etherdevice.h>
14#include "hsr_main.h"
15#include "hsr_framereg.h"
16
17
18static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
19{
20 struct hsr_tag *hsr_tag;
21 struct sk_buff *skb2;
22
23 skb2 = skb_share_check(skb, GFP_ATOMIC);
24 if (unlikely(!skb2))
25 goto err_free;
26 skb = skb2;
27
28 if (unlikely(!pskb_may_pull(skb, HSR_HLEN)))
29 goto err_free;
30
31 hsr_tag = (struct hsr_tag *) skb->data;
32 skb->protocol = hsr_tag->encap_proto;
33 skb_pull(skb, HSR_HLEN);
34
35 return skb;
36
37err_free:
38 kfree_skb(skb);
39 return NULL;
40}
41
42
43/* The uses I can see for these HSR supervision frames are:
44 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
45 * 22") to reset any sequence_nr counters belonging to that node. Useful if
46 * the other node's counter has been reset for some reason.
47 * --
48 * Or not - resetting the counter and bridging the frame would create a
49 * loop, unfortunately.
50 *
51 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
52 * frame is received from a particular node, we know something is wrong.
53 * We just register these (as with normal frames) and throw them away.
54 *
55 * 3) Allow different MAC addresses for the two slave interfaces, using the
56 * MacAddressA field.
57 */
58static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
59{
60 struct hsr_sup_tag *hsr_stag;
61
62 if (!ether_addr_equal(eth_hdr(skb)->h_dest,
63 hsr->sup_multicast_addr))
64 return false;
65
66 hsr_stag = (struct hsr_sup_tag *) skb->data;
67 if (get_hsr_stag_path(hsr_stag) != 0x0f)
68 return false;
69 if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
70 (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
71 return false;
72 if (hsr_stag->HSR_TLV_Length != 12)
73 return false;
74
75 return true;
76}
77
78
79/* Implementation somewhat according to IEC-62439-3, p. 43
80 */
81rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
82{
83 struct sk_buff *skb = *pskb;
84 struct net_device *dev = skb->dev;
85 struct hsr_priv *hsr;
86 struct net_device *other_slave;
87 struct hsr_node *node;
88 bool deliver_to_self;
89 struct sk_buff *skb_deliver;
90 enum hsr_dev_idx dev_in_idx, dev_other_idx;
91 bool dup_out;
92 int ret;
93
94 if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP))
95 return RX_HANDLER_PASS;
96
97 hsr = get_hsr_master(dev);
98 if (!hsr) {
99 WARN_ON_ONCE(1);
100 return RX_HANDLER_PASS;
101 }
102
103 if (dev == hsr->slave[0]) {
104 dev_in_idx = HSR_DEV_SLAVE_A;
105 dev_other_idx = HSR_DEV_SLAVE_B;
106 } else {
107 dev_in_idx = HSR_DEV_SLAVE_B;
108 dev_other_idx = HSR_DEV_SLAVE_A;
109 }
110
111 node = hsr_find_node(&hsr->self_node_db, skb);
112 if (node) {
113 /* Always kill frames sent by ourselves */
114 kfree_skb(skb);
115 return RX_HANDLER_CONSUMED;
116 }
117
118 /* Is this frame a candidate for local reception? */
119 deliver_to_self = false;
120 if ((skb->pkt_type == PACKET_HOST) ||
121 (skb->pkt_type == PACKET_MULTICAST) ||
122 (skb->pkt_type == PACKET_BROADCAST))
123 deliver_to_self = true;
124 else if (ether_addr_equal(eth_hdr(skb)->h_dest, hsr->dev->dev_addr)) {
125 skb->pkt_type = PACKET_HOST;
126 deliver_to_self = true;
127 }
128
129
130 rcu_read_lock(); /* node_db */
131 node = hsr_find_node(&hsr->node_db, skb);
132
133 if (is_supervision_frame(hsr, skb)) {
134 skb_pull(skb, sizeof(struct hsr_sup_tag));
135 node = hsr_merge_node(hsr, node, skb, dev_in_idx);
136 if (!node) {
137 rcu_read_unlock(); /* node_db */
138 kfree_skb(skb);
139 hsr->dev->stats.rx_dropped++;
140 return RX_HANDLER_CONSUMED;
141 }
142 skb_push(skb, sizeof(struct hsr_sup_tag));
143 deliver_to_self = false;
144 }
145
146 if (!node) {
147 /* Source node unknown; this might be a HSR frame from
148 * another net (different multicast address). Ignore it.
149 */
150 rcu_read_unlock(); /* node_db */
151 kfree_skb(skb);
152 return RX_HANDLER_CONSUMED;
153 }
154
155 /* Register ALL incoming frames as outgoing through the other interface.
156 * This allows us to register frames as incoming only if they are valid
157 * for the receiving interface, without using a specific counter for
158 * incoming frames.
159 */
160 dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
161 if (!dup_out)
162 hsr_register_frame_in(node, dev_in_idx);
163
164 /* Forward this frame? */
165 if (!dup_out && (skb->pkt_type != PACKET_HOST))
166 other_slave = get_other_slave(hsr, dev);
167 else
168 other_slave = NULL;
169
170 if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
171 deliver_to_self = false;
172
173 rcu_read_unlock(); /* node_db */
174
175 if (!deliver_to_self && !other_slave) {
176 kfree_skb(skb);
177 /* Circulated frame; silently remove it. */
178 return RX_HANDLER_CONSUMED;
179 }
180
181 skb_deliver = skb;
182 if (deliver_to_self && other_slave) {
183 /* skb_clone() is not enough since we will strip the hsr tag
184 * and do address substitution below
185 */
186 skb_deliver = pskb_copy(skb, GFP_ATOMIC);
187 if (!skb_deliver) {
188 deliver_to_self = false;
189 hsr->dev->stats.rx_dropped++;
190 }
191 }
192
193 if (deliver_to_self) {
194 bool multicast_frame;
195
196 skb_deliver = hsr_pull_tag(skb_deliver);
197 if (!skb_deliver) {
198 hsr->dev->stats.rx_dropped++;
199 goto forward;
200 }
201#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
202 /* Move everything in the header that is after the HSR tag,
203 * to work around alignment problems caused by the 6-byte HSR
204 * tag. In practice, this removes/overwrites the HSR tag in
205 * the header and restores a "standard" packet.
206 */
207 memmove(skb_deliver->data - HSR_HLEN, skb_deliver->data,
208 skb_headlen(skb_deliver));
209
210 /* Adjust skb members so they correspond with the move above.
211 * This cannot possibly underflow skb->data since hsr_pull_tag()
212 * above succeeded.
213 * At this point in the protocol stack, the transport and
214 * network headers have not been set yet, and we haven't touched
215 * the mac header nor the head. So we only need to adjust data
216 * and tail:
217 */
218 skb_deliver->data -= HSR_HLEN;
219 skb_deliver->tail -= HSR_HLEN;
220#endif
221 skb_deliver->dev = hsr->dev;
222 hsr_addr_subst_source(hsr, skb_deliver);
223 multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
224 ret = netif_rx(skb_deliver);
225 if (ret == NET_RX_DROP) {
226 hsr->dev->stats.rx_dropped++;
227 } else {
228 hsr->dev->stats.rx_packets++;
229 hsr->dev->stats.rx_bytes += skb->len;
230 if (multicast_frame)
231 hsr->dev->stats.multicast++;
232 }
233 }
234
235forward:
236 if (other_slave) {
237 skb_push(skb, ETH_HLEN);
238 skb->dev = other_slave;
239 dev_queue_xmit(skb);
240 }
241
242 return RX_HANDLER_CONSUMED;
243}