blob: 7ea925816f79d8a0e547a0feb1257fda23097ba4 [file] [log] [blame]
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01002 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02009 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +010010 *
11 * The HSR spec says never to forward the same frame twice on the same
12 * interface. A frame is identified by its source MAC address and its HSR
13 * sequence number. This code keeps track of senders and their sequence numbers
14 * to allow filtering of duplicate frames, and to detect HSR ring errors.
15 */
16
17#include <linux/if_ether.h>
18#include <linux/etherdevice.h>
19#include <linux/slab.h>
20#include <linux/rculist.h>
21#include "hsr_main.h"
22#include "hsr_framereg.h"
23#include "hsr_netlink.h"
24
25
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020026struct hsr_node {
27 struct list_head mac_list;
28 unsigned char MacAddressA[ETH_ALEN];
29 unsigned char MacAddressB[ETH_ALEN];
Arvid Brodinc5a75912014-07-04 23:38:05 +020030 /* Local slave through which AddrB frames are received from this node */
31 enum hsr_port_type AddrB_port;
32 unsigned long time_in[HSR_PT_PORTS];
33 bool time_in_stale[HSR_PT_PORTS];
34 u16 seq_out[HSR_PT_PORTS];
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020035 struct rcu_head rcu_head;
Arvid Brodinf4214362013-10-30 21:10:47 +010036};
37
Arvid Brodinf266a682014-07-04 23:41:03 +020038
Arvid Brodinf4214362013-10-30 21:10:47 +010039/* TODO: use hash lists for mac addresses (linux/jhash.h)? */
40
41
Arvid Brodinf266a682014-07-04 23:41:03 +020042/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
43 * false otherwise.
44 */
45static bool seq_nr_after(u16 a, u16 b)
46{
47 /* Remove inconsistency where
48 * seq_nr_after(a, b) == seq_nr_before(a, b)
49 */
50 if ((int) b - a == 32768)
51 return false;
52
53 return (((s16) (b - a)) < 0);
54}
55#define seq_nr_before(a, b) seq_nr_after((b), (a))
56#define seq_nr_after_or_eq(a, b) (!seq_nr_before((a), (b)))
57#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
58
59
60bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
61{
62 struct hsr_node *node;
63
64 node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
65 mac_list);
66 if (!node) {
67 WARN_ONCE(1, "HSR: No self node\n");
68 return false;
69 }
70
71 if (ether_addr_equal(addr, node->MacAddressA))
72 return true;
73 if (ether_addr_equal(addr, node->MacAddressB))
74 return true;
75
76 return false;
77}
Arvid Brodinf4214362013-10-30 21:10:47 +010078
79/* Search for mac entry. Caller must hold rcu read lock.
80 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020081static struct hsr_node *find_node_by_AddrA(struct list_head *node_db,
82 const unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +010083{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020084 struct hsr_node *node;
Arvid Brodinf4214362013-10-30 21:10:47 +010085
86 list_for_each_entry_rcu(node, node_db, mac_list) {
87 if (ether_addr_equal(node->MacAddressA, addr))
88 return node;
89 }
90
91 return NULL;
92}
93
94
Arvid Brodinf4214362013-10-30 21:10:47 +010095/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
96 * frames from self that's been looped over the HSR ring.
97 */
98int hsr_create_self_node(struct list_head *self_node_db,
99 unsigned char addr_a[ETH_ALEN],
100 unsigned char addr_b[ETH_ALEN])
101{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200102 struct hsr_node *node, *oldnode;
Arvid Brodinf4214362013-10-30 21:10:47 +0100103
104 node = kmalloc(sizeof(*node), GFP_KERNEL);
105 if (!node)
106 return -ENOMEM;
107
Joe Perchese83abe32014-02-18 10:37:20 -0800108 ether_addr_copy(node->MacAddressA, addr_a);
109 ether_addr_copy(node->MacAddressB, addr_b);
Arvid Brodinf4214362013-10-30 21:10:47 +0100110
111 rcu_read_lock();
112 oldnode = list_first_or_null_rcu(self_node_db,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200113 struct hsr_node, mac_list);
Arvid Brodinf4214362013-10-30 21:10:47 +0100114 if (oldnode) {
115 list_replace_rcu(&oldnode->mac_list, &node->mac_list);
116 rcu_read_unlock();
117 synchronize_rcu();
118 kfree(oldnode);
119 } else {
120 rcu_read_unlock();
121 list_add_tail_rcu(&node->mac_list, self_node_db);
122 }
123
124 return 0;
125}
126
Arvid Brodinf4214362013-10-30 21:10:47 +0100127
Arvid Brodinf266a682014-07-04 23:41:03 +0200128/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
129 * seq_out is used to initialize filtering of outgoing duplicate frames
130 * originating from the newly added node.
Arvid Brodinf4214362013-10-30 21:10:47 +0100131 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200132struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
133 u16 seq_out)
Arvid Brodinf4214362013-10-30 21:10:47 +0100134{
Arvid Brodinf266a682014-07-04 23:41:03 +0200135 struct hsr_node *node;
Arvid Brodinf4214362013-10-30 21:10:47 +0100136 unsigned long now;
Arvid Brodinf266a682014-07-04 23:41:03 +0200137 int i;
Arvid Brodinf4214362013-10-30 21:10:47 +0100138
139 node = kzalloc(sizeof(*node), GFP_ATOMIC);
140 if (!node)
141 return NULL;
142
Arvid Brodinf266a682014-07-04 23:41:03 +0200143 ether_addr_copy(node->MacAddressA, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100144
145 /* We are only interested in time diffs here, so use current jiffies
146 * as initialization. (0 could trigger an spurious ring error warning).
147 */
148 now = jiffies;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200149 for (i = 0; i < HSR_PT_PORTS; i++)
Arvid Brodinf4214362013-10-30 21:10:47 +0100150 node->time_in[i] = now;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200151 for (i = 0; i < HSR_PT_PORTS; i++)
Arvid Brodinf266a682014-07-04 23:41:03 +0200152 node->seq_out[i] = seq_out;
Arvid Brodinf4214362013-10-30 21:10:47 +0100153
Arvid Brodinf266a682014-07-04 23:41:03 +0200154 list_add_tail_rcu(&node->mac_list, node_db);
Arvid Brodinf4214362013-10-30 21:10:47 +0100155
156 return node;
157}
158
Arvid Brodinf266a682014-07-04 23:41:03 +0200159/* Get the hsr_node from which 'skb' was sent.
160 */
161struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
162 bool is_sup)
163{
164 struct hsr_node *node;
165 struct ethhdr *ethhdr;
166 u16 seq_out;
167
168 if (!skb_mac_header_was_set(skb))
169 return NULL;
170
171 ethhdr = (struct ethhdr *) skb_mac_header(skb);
172
173 list_for_each_entry_rcu(node, node_db, mac_list) {
174 if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
175 return node;
176 if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
177 return node;
178 }
179
Peter Heiseee1c2792016-04-13 13:52:22 +0200180 /* Everyone may create a node entry, connected node to a HSR device. */
Arvid Brodinf266a682014-07-04 23:41:03 +0200181
Peter Heiseee1c2792016-04-13 13:52:22 +0200182 if (ethhdr->h_proto == htons(ETH_P_PRP)
183 || ethhdr->h_proto == htons(ETH_P_HSR)) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200184 /* Use the existing sequence_nr from the tag as starting point
185 * for filtering duplicate frames.
186 */
187 seq_out = hsr_get_skb_sequence_nr(skb) - 1;
188 } else {
189 WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
Peter Heiseee1c2792016-04-13 13:52:22 +0200190 seq_out = HSR_SEQNR_START;
Arvid Brodinf266a682014-07-04 23:41:03 +0200191 }
192
193 return hsr_add_node(node_db, ethhdr->h_source, seq_out);
194}
195
196/* Use the Supervision frame's info about an eventual MacAddressB for merging
197 * nodes that has previously had their MacAddressB registered as a separate
198 * node.
199 */
200void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
201 struct hsr_port *port_rcv)
202{
Peter Heiseee1c2792016-04-13 13:52:22 +0200203 struct ethhdr *ethhdr;
Arvid Brodinf266a682014-07-04 23:41:03 +0200204 struct hsr_node *node_real;
205 struct hsr_sup_payload *hsr_sp;
206 struct list_head *node_db;
207 int i;
208
Peter Heiseee1c2792016-04-13 13:52:22 +0200209 ethhdr = (struct ethhdr *) skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200210
Peter Heiseee1c2792016-04-13 13:52:22 +0200211 /* Leave the ethernet header. */
212 skb_pull(skb, sizeof(struct ethhdr));
213
214 /* And leave the HSR tag. */
215 if (ethhdr->h_proto == htons(ETH_P_HSR))
216 skb_pull(skb, sizeof(struct hsr_tag));
217
218 /* And leave the HSR sup tag. */
219 skb_pull(skb, sizeof(struct hsr_sup_tag));
220
221 hsr_sp = (struct hsr_sup_payload *) skb->data;
Arvid Brodinf266a682014-07-04 23:41:03 +0200222
223 /* Merge node_curr (registered on MacAddressB) into node_real */
224 node_db = &port_rcv->hsr->node_db;
225 node_real = find_node_by_AddrA(node_db, hsr_sp->MacAddressA);
226 if (!node_real)
227 /* No frame received from AddrA of this node yet */
228 node_real = hsr_add_node(node_db, hsr_sp->MacAddressA,
229 HSR_SEQNR_START - 1);
230 if (!node_real)
231 goto done; /* No mem */
232 if (node_real == node_curr)
233 /* Node has already been merged */
234 goto done;
235
Peter Heiseee1c2792016-04-13 13:52:22 +0200236 ether_addr_copy(node_real->MacAddressB, ethhdr->h_source);
Arvid Brodinf266a682014-07-04 23:41:03 +0200237 for (i = 0; i < HSR_PT_PORTS; i++) {
238 if (!node_curr->time_in_stale[i] &&
239 time_after(node_curr->time_in[i], node_real->time_in[i])) {
240 node_real->time_in[i] = node_curr->time_in[i];
241 node_real->time_in_stale[i] = node_curr->time_in_stale[i];
242 }
243 if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
244 node_real->seq_out[i] = node_curr->seq_out[i];
245 }
246 node_real->AddrB_port = port_rcv->type;
247
248 list_del_rcu(&node_curr->mac_list);
249 kfree_rcu(node_curr, rcu_head);
250
251done:
Peter Heiseee1c2792016-04-13 13:52:22 +0200252 skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
Arvid Brodinf266a682014-07-04 23:41:03 +0200253}
254
Arvid Brodinf4214362013-10-30 21:10:47 +0100255
256/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
257 *
Arvid Brodinf266a682014-07-04 23:41:03 +0200258 * If the frame was sent by a node's B interface, replace the source
Arvid Brodinf4214362013-10-30 21:10:47 +0100259 * address with that node's "official" address (MacAddressA) so that upper
260 * layers recognize where it came from.
261 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200262void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
Arvid Brodinf4214362013-10-30 21:10:47 +0100263{
Arvid Brodinf4214362013-10-30 21:10:47 +0100264 if (!skb_mac_header_was_set(skb)) {
265 WARN_ONCE(1, "%s: Mac header not set\n", __func__);
266 return;
267 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100268
Arvid Brodinf266a682014-07-04 23:41:03 +0200269 memcpy(&eth_hdr(skb)->h_source, node->MacAddressA, ETH_ALEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100270}
271
Arvid Brodinf4214362013-10-30 21:10:47 +0100272/* 'skb' is a frame meant for another host.
Arvid Brodinf266a682014-07-04 23:41:03 +0200273 * 'port' is the outgoing interface
Arvid Brodinf4214362013-10-30 21:10:47 +0100274 *
275 * Substitute the target (dest) MAC address if necessary, so the it matches the
276 * recipient interface MAC address, regardless of whether that is the
277 * recipient's A or B interface.
278 * This is needed to keep the packets flowing through switches that learn on
279 * which "side" the different interfaces are.
280 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200281void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
Arvid Brodinc5a75912014-07-04 23:38:05 +0200282 struct hsr_port *port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100283{
Arvid Brodinf266a682014-07-04 23:41:03 +0200284 struct hsr_node *node_dst;
Arvid Brodinf4214362013-10-30 21:10:47 +0100285
Arvid Brodinf266a682014-07-04 23:41:03 +0200286 if (!skb_mac_header_was_set(skb)) {
287 WARN_ONCE(1, "%s: Mac header not set\n", __func__);
288 return;
289 }
290
291 if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
292 return;
293
294 node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest);
295 if (!node_dst) {
296 WARN_ONCE(1, "%s: Unknown node\n", __func__);
297 return;
298 }
299 if (port->type != node_dst->AddrB_port)
300 return;
Arvid Brodinf266a682014-07-04 23:41:03 +0200301
302 ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->MacAddressB);
Arvid Brodinf4214362013-10-30 21:10:47 +0100303}
304
305
Arvid Brodinf266a682014-07-04 23:41:03 +0200306void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
307 u16 sequence_nr)
Arvid Brodinf4214362013-10-30 21:10:47 +0100308{
Arvid Brodinf266a682014-07-04 23:41:03 +0200309 /* Don't register incoming frames without a valid sequence number. This
310 * ensures entries of restarted nodes gets pruned so that they can
311 * re-register and resume communications.
Arvid Brodin213e3bc2013-11-29 23:37:07 +0100312 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200313 if (seq_nr_before(sequence_nr, node->seq_out[port->type]))
314 return;
Arvid Brodinf4214362013-10-30 21:10:47 +0100315
Arvid Brodinc5a75912014-07-04 23:38:05 +0200316 node->time_in[port->type] = jiffies;
317 node->time_in_stale[port->type] = false;
Arvid Brodinf4214362013-10-30 21:10:47 +0100318}
319
Arvid Brodinf4214362013-10-30 21:10:47 +0100320/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
321 * ethhdr->h_source address and skb->mac_header set.
322 *
323 * Return:
324 * 1 if frame can be shown to have been sent recently on this interface,
325 * 0 otherwise, or
326 * negative error code on error
327 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200328int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
329 u16 sequence_nr)
Arvid Brodinf4214362013-10-30 21:10:47 +0100330{
Arvid Brodinc5a75912014-07-04 23:38:05 +0200331 if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]))
Arvid Brodinf4214362013-10-30 21:10:47 +0100332 return 1;
333
Arvid Brodinc5a75912014-07-04 23:38:05 +0200334 node->seq_out[port->type] = sequence_nr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100335 return 0;
336}
337
338
Arvid Brodinc5a75912014-07-04 23:38:05 +0200339static struct hsr_port *get_late_port(struct hsr_priv *hsr,
340 struct hsr_node *node)
Arvid Brodinf4214362013-10-30 21:10:47 +0100341{
Arvid Brodinc5a75912014-07-04 23:38:05 +0200342 if (node->time_in_stale[HSR_PT_SLAVE_A])
343 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
344 if (node->time_in_stale[HSR_PT_SLAVE_B])
345 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
Arvid Brodinf4214362013-10-30 21:10:47 +0100346
Arvid Brodinc5a75912014-07-04 23:38:05 +0200347 if (time_after(node->time_in[HSR_PT_SLAVE_B],
348 node->time_in[HSR_PT_SLAVE_A] +
349 msecs_to_jiffies(MAX_SLAVE_DIFF)))
350 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
351 if (time_after(node->time_in[HSR_PT_SLAVE_A],
352 node->time_in[HSR_PT_SLAVE_B] +
353 msecs_to_jiffies(MAX_SLAVE_DIFF)))
354 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
Arvid Brodinf4214362013-10-30 21:10:47 +0100355
Arvid Brodinc5a75912014-07-04 23:38:05 +0200356 return NULL;
Arvid Brodinf4214362013-10-30 21:10:47 +0100357}
358
359
360/* Remove stale sequence_nr records. Called by timer every
361 * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
362 */
Arvid Brodinabff7162014-07-04 23:35:47 +0200363void hsr_prune_nodes(unsigned long data)
Arvid Brodinf4214362013-10-30 21:10:47 +0100364{
Arvid Brodinabff7162014-07-04 23:35:47 +0200365 struct hsr_priv *hsr;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200366 struct hsr_node *node;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200367 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100368 unsigned long timestamp;
369 unsigned long time_a, time_b;
370
Arvid Brodinabff7162014-07-04 23:35:47 +0200371 hsr = (struct hsr_priv *) data;
372
Arvid Brodinf4214362013-10-30 21:10:47 +0100373 rcu_read_lock();
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200374 list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
Arvid Brodinf4214362013-10-30 21:10:47 +0100375 /* Shorthand */
Arvid Brodinc5a75912014-07-04 23:38:05 +0200376 time_a = node->time_in[HSR_PT_SLAVE_A];
377 time_b = node->time_in[HSR_PT_SLAVE_B];
Arvid Brodinf4214362013-10-30 21:10:47 +0100378
379 /* Check for timestamps old enough to risk wrap-around */
380 if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2))
Arvid Brodinc5a75912014-07-04 23:38:05 +0200381 node->time_in_stale[HSR_PT_SLAVE_A] = true;
Arvid Brodinf4214362013-10-30 21:10:47 +0100382 if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2))
Arvid Brodinc5a75912014-07-04 23:38:05 +0200383 node->time_in_stale[HSR_PT_SLAVE_B] = true;
Arvid Brodinf4214362013-10-30 21:10:47 +0100384
385 /* Get age of newest frame from node.
386 * At least one time_in is OK here; nodes get pruned long
387 * before both time_ins can get stale
388 */
389 timestamp = time_a;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200390 if (node->time_in_stale[HSR_PT_SLAVE_A] ||
391 (!node->time_in_stale[HSR_PT_SLAVE_B] &&
Arvid Brodinf4214362013-10-30 21:10:47 +0100392 time_after(time_b, time_a)))
393 timestamp = time_b;
394
395 /* Warn of ring error only as long as we get frames at all */
396 if (time_is_after_jiffies(timestamp +
397 msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) {
Arvid Brodinc5a75912014-07-04 23:38:05 +0200398 rcu_read_lock();
399 port = get_late_port(hsr, node);
400 if (port != NULL)
401 hsr_nl_ringerror(hsr, node->MacAddressA, port);
402 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100403 }
404
405 /* Prune old entries */
406 if (time_is_before_jiffies(timestamp +
407 msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200408 hsr_nl_nodedown(hsr, node->MacAddressA);
Arvid Brodinf4214362013-10-30 21:10:47 +0100409 list_del_rcu(&node->mac_list);
410 /* Note that we need to free this entry later: */
Wei Yongjun1aee6cc2013-12-16 14:05:50 +0800411 kfree_rcu(node, rcu_head);
Arvid Brodinf4214362013-10-30 21:10:47 +0100412 }
413 }
414 rcu_read_unlock();
415}
416
417
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200418void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
Arvid Brodinf4214362013-10-30 21:10:47 +0100419 unsigned char addr[ETH_ALEN])
420{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200421 struct hsr_node *node;
Arvid Brodinf4214362013-10-30 21:10:47 +0100422
423 if (!_pos) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200424 node = list_first_or_null_rcu(&hsr->node_db,
425 struct hsr_node, mac_list);
Arvid Brodinf4214362013-10-30 21:10:47 +0100426 if (node)
Joe Perchese83abe32014-02-18 10:37:20 -0800427 ether_addr_copy(addr, node->MacAddressA);
Arvid Brodinf4214362013-10-30 21:10:47 +0100428 return node;
429 }
430
431 node = _pos;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200432 list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
Joe Perchese83abe32014-02-18 10:37:20 -0800433 ether_addr_copy(addr, node->MacAddressA);
Arvid Brodinf4214362013-10-30 21:10:47 +0100434 return node;
435 }
436
437 return NULL;
438}
439
440
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200441int hsr_get_node_data(struct hsr_priv *hsr,
Arvid Brodinf4214362013-10-30 21:10:47 +0100442 const unsigned char *addr,
443 unsigned char addr_b[ETH_ALEN],
444 unsigned int *addr_b_ifindex,
445 int *if1_age,
446 u16 *if1_seq,
447 int *if2_age,
448 u16 *if2_seq)
449{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200450 struct hsr_node *node;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200451 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100452 unsigned long tdiff;
453
454
455 rcu_read_lock();
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200456 node = find_node_by_AddrA(&hsr->node_db, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100457 if (!node) {
458 rcu_read_unlock();
459 return -ENOENT; /* No such entry */
460 }
461
Joe Perchese83abe32014-02-18 10:37:20 -0800462 ether_addr_copy(addr_b, node->MacAddressB);
Arvid Brodinf4214362013-10-30 21:10:47 +0100463
Arvid Brodinc5a75912014-07-04 23:38:05 +0200464 tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
465 if (node->time_in_stale[HSR_PT_SLAVE_A])
Arvid Brodinf4214362013-10-30 21:10:47 +0100466 *if1_age = INT_MAX;
467#if HZ <= MSEC_PER_SEC
468 else if (tdiff > msecs_to_jiffies(INT_MAX))
469 *if1_age = INT_MAX;
470#endif
471 else
472 *if1_age = jiffies_to_msecs(tdiff);
473
Arvid Brodinc5a75912014-07-04 23:38:05 +0200474 tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
475 if (node->time_in_stale[HSR_PT_SLAVE_B])
Arvid Brodinf4214362013-10-30 21:10:47 +0100476 *if2_age = INT_MAX;
477#if HZ <= MSEC_PER_SEC
478 else if (tdiff > msecs_to_jiffies(INT_MAX))
479 *if2_age = INT_MAX;
480#endif
481 else
482 *if2_age = jiffies_to_msecs(tdiff);
483
484 /* Present sequence numbers as if they were incoming on interface */
Arvid Brodinc5a75912014-07-04 23:38:05 +0200485 *if1_seq = node->seq_out[HSR_PT_SLAVE_B];
486 *if2_seq = node->seq_out[HSR_PT_SLAVE_A];
Arvid Brodinf4214362013-10-30 21:10:47 +0100487
Arvid Brodinc5a75912014-07-04 23:38:05 +0200488 if (node->AddrB_port != HSR_PT_NONE) {
489 port = hsr_port_get_hsr(hsr, node->AddrB_port);
490 *addr_b_ifindex = port->dev->ifindex;
491 } else {
Arvid Brodinf4214362013-10-30 21:10:47 +0100492 *addr_b_ifindex = -1;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200493 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100494
495 rcu_read_unlock();
496
497 return 0;
498}