blob: a2ce359774f38cc142234dcf62a8ee06b9bcf494 [file] [log] [blame]
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01002 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02009 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +010010 *
11 * Routines for handling Netlink messages for HSR.
12 */
13
14#include "hsr_netlink.h"
15#include <linux/kernel.h>
16#include <net/rtnetlink.h>
17#include <net/genetlink.h>
18#include "hsr_main.h"
19#include "hsr_device.h"
20#include "hsr_framereg.h"
21
22static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
Arvid Brodin98bf8362013-11-29 23:38:16 +010026 [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
27 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
Arvid Brodinf4214362013-10-30 21:10:47 +010028};
29
30
31/* Here, it seems a netdevice has already been allocated for us, and the
32 * hsr_dev_setup routine has been executed. Nice!
33 */
34static int hsr_newlink(struct net *src_net, struct net_device *dev,
35 struct nlattr *tb[], struct nlattr *data[])
36{
37 struct net_device *link[2];
38 unsigned char multicast_spec;
39
40 if (!data[IFLA_HSR_SLAVE1]) {
41 netdev_info(dev, "IFLA_HSR_SLAVE1 missing!\n");
42 return -EINVAL;
43 }
44 link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
45 if (!data[IFLA_HSR_SLAVE2]) {
46 netdev_info(dev, "IFLA_HSR_SLAVE2 missing!\n");
47 return -EINVAL;
48 }
49 link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
50
51 if (!link[0] || !link[1])
52 return -ENODEV;
53 if (link[0] == link[1])
54 return -EINVAL;
55
56 if (!data[IFLA_HSR_MULTICAST_SPEC])
57 multicast_spec = 0;
58 else
59 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
60
61 return hsr_dev_finalize(dev, link, multicast_spec);
62}
63
Arvid Brodin98bf8362013-11-29 23:38:16 +010064static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
65{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020066 struct hsr_priv *hsr;
Arvid Brodin51f3c602014-07-04 23:37:27 +020067 struct net_device *slave;
68 int res;
Arvid Brodin98bf8362013-11-29 23:38:16 +010069
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020070 hsr = netdev_priv(dev);
Arvid Brodin98bf8362013-11-29 23:38:16 +010071
Arvid Brodin51f3c602014-07-04 23:37:27 +020072 res = 0;
Arvid Brodin98bf8362013-11-29 23:38:16 +010073
Arvid Brodin51f3c602014-07-04 23:37:27 +020074 rcu_read_lock();
75 slave = hsr->slave[0];
76 if (slave)
77 res = nla_put_u32(skb, IFLA_HSR_SLAVE1, slave->ifindex);
78 rcu_read_unlock();
79 if (res)
80 goto nla_put_failure;
81
82 rcu_read_lock();
83 slave = hsr->slave[1];
84 if (slave)
85 res = nla_put_u32(skb, IFLA_HSR_SLAVE2, slave->ifindex);
86 rcu_read_unlock();
87 if (res)
88 goto nla_put_failure;
Arvid Brodin98bf8362013-11-29 23:38:16 +010089
90 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020091 hsr->sup_multicast_addr) ||
92 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
Arvid Brodin98bf8362013-11-29 23:38:16 +010093 goto nla_put_failure;
94
95 return 0;
96
97nla_put_failure:
98 return -EMSGSIZE;
99}
100
Arvid Brodinf4214362013-10-30 21:10:47 +0100101static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102 .kind = "hsr",
103 .maxtype = IFLA_HSR_MAX,
104 .policy = hsr_policy,
105 .priv_size = sizeof(struct hsr_priv),
106 .setup = hsr_dev_setup,
107 .newlink = hsr_newlink,
Arvid Brodin98bf8362013-11-29 23:38:16 +0100108 .fill_info = hsr_fill_info,
Arvid Brodinf4214362013-10-30 21:10:47 +0100109};
110
111
112
113/* attribute policy */
114/* NLA_BINARY missing in libnl; use NLA_UNSPEC in userspace instead. */
115static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
116 [HSR_A_NODE_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
117 [HSR_A_NODE_ADDR_B] = { .type = NLA_BINARY, .len = ETH_ALEN },
118 [HSR_A_IFINDEX] = { .type = NLA_U32 },
119 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
120 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
121 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
122 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
123};
124
125static struct genl_family hsr_genl_family = {
126 .id = GENL_ID_GENERATE,
127 .hdrsize = 0,
128 .name = "HSR",
129 .version = 1,
130 .maxattr = HSR_A_MAX,
131};
132
Johannes Berg2a94fe42013-11-19 15:19:39 +0100133static const struct genl_multicast_group hsr_mcgrps[] = {
134 { .name = "hsr-network", },
Arvid Brodinf4214362013-10-30 21:10:47 +0100135};
136
137
138
139/* This is called if for some node with MAC address addr, we only get frames
140 * over one of the slave interfaces. This would indicate an open network ring
141 * (i.e. a link has failed somewhere).
142 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200143void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
Arvid Brodinf4214362013-10-30 21:10:47 +0100144 enum hsr_dev_idx dev_idx)
145{
146 struct sk_buff *skb;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200147 struct net_device *slave;
Arvid Brodinf4214362013-10-30 21:10:47 +0100148 void *msg_head;
149 int res;
150 int ifindex;
151
152 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
153 if (!skb)
154 goto fail;
155
156 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR);
157 if (!msg_head)
158 goto nla_put_failure;
159
160 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
161 if (res < 0)
162 goto nla_put_failure;
163
Arvid Brodin51f3c602014-07-04 23:37:27 +0200164 rcu_read_lock();
165 slave = hsr->slave[dev_idx];
166 if (slave)
167 ifindex = slave->ifindex;
Arvid Brodinf4214362013-10-30 21:10:47 +0100168 else
169 ifindex = -1;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200170 rcu_read_unlock();
171
Arvid Brodinf4214362013-10-30 21:10:47 +0100172 res = nla_put_u32(skb, HSR_A_IFINDEX, ifindex);
173 if (res < 0)
174 goto nla_put_failure;
175
176 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100177 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100178
179 return;
180
181nla_put_failure:
182 kfree_skb(skb);
183
184fail:
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200185 netdev_warn(hsr->dev, "Could not send HSR ring error message\n");
Arvid Brodinf4214362013-10-30 21:10:47 +0100186}
187
188/* This is called when we haven't heard from the node with MAC address addr for
189 * some time (just before the node is removed from the node table/list).
190 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200191void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +0100192{
193 struct sk_buff *skb;
194 void *msg_head;
195 int res;
196
197 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
198 if (!skb)
199 goto fail;
200
201 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
202 if (!msg_head)
203 goto nla_put_failure;
204
205
206 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
207 if (res < 0)
208 goto nla_put_failure;
209
210 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100211 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100212
213 return;
214
215nla_put_failure:
216 kfree_skb(skb);
217
218fail:
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200219 netdev_warn(hsr->dev, "Could not send HSR node down\n");
Arvid Brodinf4214362013-10-30 21:10:47 +0100220}
221
222
223/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
224 * about the status of a specific node in the network, defined by its MAC
225 * address.
226 *
227 * Input: hsr ifindex, node mac address
228 * Output: hsr ifindex, node mac address (copied from request),
229 * age of latest frame from node over slave 1, slave 2 [ms]
230 */
231static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
232{
233 /* For receiving */
234 struct nlattr *na;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200235 struct net_device *hsr_dev, *slave;
Arvid Brodinf4214362013-10-30 21:10:47 +0100236
237 /* For sending */
238 struct sk_buff *skb_out;
239 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200240 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100241 unsigned char hsr_node_addr_b[ETH_ALEN];
242 int hsr_node_if1_age;
243 u16 hsr_node_if1_seq;
244 int hsr_node_if2_age;
245 u16 hsr_node_if2_seq;
246 int addr_b_ifindex;
247 int res;
248
249 if (!info)
250 goto invalid;
251
252 na = info->attrs[HSR_A_IFINDEX];
253 if (!na)
254 goto invalid;
255 na = info->attrs[HSR_A_NODE_ADDR];
256 if (!na)
257 goto invalid;
258
259 hsr_dev = __dev_get_by_index(genl_info_net(info),
260 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
261 if (!hsr_dev)
262 goto invalid;
263 if (!is_hsr_master(hsr_dev))
264 goto invalid;
265
266
267 /* Send reply */
268
269 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
270 if (!skb_out) {
271 res = -ENOMEM;
272 goto fail;
273 }
274
275 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
276 info->snd_seq, &hsr_genl_family, 0,
277 HSR_C_SET_NODE_STATUS);
278 if (!msg_head) {
279 res = -ENOMEM;
280 goto nla_put_failure;
281 }
282
283 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
284 if (res < 0)
285 goto nla_put_failure;
286
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200287 hsr = netdev_priv(hsr_dev);
288 res = hsr_get_node_data(hsr,
Arvid Brodinf4214362013-10-30 21:10:47 +0100289 (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
290 hsr_node_addr_b,
291 &addr_b_ifindex,
292 &hsr_node_if1_age,
293 &hsr_node_if1_seq,
294 &hsr_node_if2_age,
295 &hsr_node_if2_seq);
296 if (res < 0)
Geyslan G. Bem84a035f2013-11-14 16:12:54 -0300297 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100298
299 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
300 nla_data(info->attrs[HSR_A_NODE_ADDR]));
301 if (res < 0)
302 goto nla_put_failure;
303
304 if (addr_b_ifindex > -1) {
305 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
306 hsr_node_addr_b);
307 if (res < 0)
308 goto nla_put_failure;
309
310 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
311 if (res < 0)
312 goto nla_put_failure;
313 }
314
315 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
316 if (res < 0)
317 goto nla_put_failure;
318 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
319 if (res < 0)
320 goto nla_put_failure;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200321 rcu_read_lock();
322 slave = hsr->slave[0];
323 if (slave)
324 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, slave->ifindex);
325 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100326 if (res < 0)
327 goto nla_put_failure;
328
329 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
330 if (res < 0)
331 goto nla_put_failure;
332 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
333 if (res < 0)
334 goto nla_put_failure;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200335 rcu_read_lock();
336 slave = hsr->slave[1];
337 if (slave)
338 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, slave->ifindex);
339 rcu_read_unlock();
340 if (res < 0)
341 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100342
343 genlmsg_end(skb_out, msg_head);
344 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
345
346 return 0;
347
348invalid:
349 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
350 return 0;
351
352nla_put_failure:
353 kfree_skb(skb_out);
354 /* Fall through */
355
356fail:
357 return res;
358}
359
Arvid Brodinf4214362013-10-30 21:10:47 +0100360/* Get a list of MacAddressA of all nodes known to this node (other than self).
361 */
362static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
363{
364 /* For receiving */
365 struct nlattr *na;
366 struct net_device *hsr_dev;
367
368 /* For sending */
369 struct sk_buff *skb_out;
370 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200371 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100372 void *pos;
373 unsigned char addr[ETH_ALEN];
374 int res;
375
376 if (!info)
377 goto invalid;
378
379 na = info->attrs[HSR_A_IFINDEX];
380 if (!na)
381 goto invalid;
382
383 hsr_dev = __dev_get_by_index(genl_info_net(info),
384 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
385 if (!hsr_dev)
386 goto invalid;
387 if (!is_hsr_master(hsr_dev))
388 goto invalid;
389
390
391 /* Send reply */
392
393 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
394 if (!skb_out) {
395 res = -ENOMEM;
396 goto fail;
397 }
398
399 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
400 info->snd_seq, &hsr_genl_family, 0,
401 HSR_C_SET_NODE_LIST);
402 if (!msg_head) {
403 res = -ENOMEM;
404 goto nla_put_failure;
405 }
406
407 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
408 if (res < 0)
409 goto nla_put_failure;
410
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200411 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100412
413 rcu_read_lock();
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200414 pos = hsr_get_next_node(hsr, NULL, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100415 while (pos) {
416 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
417 if (res < 0) {
418 rcu_read_unlock();
419 goto nla_put_failure;
420 }
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200421 pos = hsr_get_next_node(hsr, pos, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100422 }
423 rcu_read_unlock();
424
425 genlmsg_end(skb_out, msg_head);
426 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
427
428 return 0;
429
430invalid:
431 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
432 return 0;
433
434nla_put_failure:
435 kfree_skb(skb_out);
436 /* Fall through */
437
438fail:
439 return res;
440}
441
442
Johannes Berg4534de82013-11-14 17:14:46 +0100443static const struct genl_ops hsr_ops[] = {
Johannes Berg9504b3e2013-11-14 17:14:40 +0100444 {
445 .cmd = HSR_C_GET_NODE_STATUS,
446 .flags = 0,
447 .policy = hsr_genl_policy,
448 .doit = hsr_get_node_status,
449 .dumpit = NULL,
450 },
451 {
452 .cmd = HSR_C_GET_NODE_LIST,
453 .flags = 0,
454 .policy = hsr_genl_policy,
455 .doit = hsr_get_node_list,
456 .dumpit = NULL,
457 },
Arvid Brodinf4214362013-10-30 21:10:47 +0100458};
459
460int __init hsr_netlink_init(void)
461{
462 int rc;
463
464 rc = rtnl_link_register(&hsr_link_ops);
465 if (rc)
466 goto fail_rtnl_link_register;
467
Johannes Berg2a94fe42013-11-19 15:19:39 +0100468 rc = genl_register_family_with_ops_groups(&hsr_genl_family, hsr_ops,
469 hsr_mcgrps);
Arvid Brodinf4214362013-10-30 21:10:47 +0100470 if (rc)
471 goto fail_genl_register_family;
472
Arvid Brodinf4214362013-10-30 21:10:47 +0100473 return 0;
474
Arvid Brodinf4214362013-10-30 21:10:47 +0100475fail_genl_register_family:
476 rtnl_link_unregister(&hsr_link_ops);
477fail_rtnl_link_register:
478
479 return rc;
480}
481
482void __exit hsr_netlink_exit(void)
483{
Arvid Brodinf4214362013-10-30 21:10:47 +0100484 genl_unregister_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100485 rtnl_link_unregister(&hsr_link_ops);
486}
487
488MODULE_ALIAS_RTNL_LINK("hsr");