blob: 4f869d05410fea5a4f61d9348f5917d4ee270e03 [file] [log] [blame]
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01002 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02009 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +010010 *
11 * Routines for handling Netlink messages for HSR.
12 */
13
14#include "hsr_netlink.h"
15#include <linux/kernel.h>
16#include <net/rtnetlink.h>
17#include <net/genetlink.h>
18#include "hsr_main.h"
19#include "hsr_device.h"
20#include "hsr_framereg.h"
21
22static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
Peter Heiseee1c2792016-04-13 13:52:22 +020026 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
Peter Heisef9375722016-04-19 13:34:28 +020027 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
Arvid Brodin98bf8362013-11-29 23:38:16 +010028 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
Arvid Brodinf4214362013-10-30 21:10:47 +010029};
30
31
32/* Here, it seems a netdevice has already been allocated for us, and the
33 * hsr_dev_setup routine has been executed. Nice!
34 */
35static int hsr_newlink(struct net *src_net, struct net_device *dev,
36 struct nlattr *tb[], struct nlattr *data[])
37{
38 struct net_device *link[2];
Peter Heiseee1c2792016-04-13 13:52:22 +020039 unsigned char multicast_spec, hsr_version;
Arvid Brodinf4214362013-10-30 21:10:47 +010040
Arvid Brodina718dcc2014-07-04 23:42:00 +020041 if (!data) {
42 netdev_info(dev, "HSR: No slave devices specified\n");
43 return -EINVAL;
44 }
Arvid Brodinf4214362013-10-30 21:10:47 +010045 if (!data[IFLA_HSR_SLAVE1]) {
Arvid Brodina718dcc2014-07-04 23:42:00 +020046 netdev_info(dev, "HSR: Slave1 device not specified\n");
Arvid Brodinf4214362013-10-30 21:10:47 +010047 return -EINVAL;
48 }
49 link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
50 if (!data[IFLA_HSR_SLAVE2]) {
Arvid Brodina718dcc2014-07-04 23:42:00 +020051 netdev_info(dev, "HSR: Slave2 device not specified\n");
Arvid Brodinf4214362013-10-30 21:10:47 +010052 return -EINVAL;
53 }
54 link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
55
56 if (!link[0] || !link[1])
57 return -ENODEV;
58 if (link[0] == link[1])
59 return -EINVAL;
60
61 if (!data[IFLA_HSR_MULTICAST_SPEC])
62 multicast_spec = 0;
63 else
64 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
65
Peter Heiseee1c2792016-04-13 13:52:22 +020066 if (!data[IFLA_HSR_VERSION])
67 hsr_version = 0;
68 else
69 hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
70
71 return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
Arvid Brodinf4214362013-10-30 21:10:47 +010072}
73
Arvid Brodin98bf8362013-11-29 23:38:16 +010074static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
75{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020076 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +020077 struct hsr_port *port;
Arvid Brodin51f3c602014-07-04 23:37:27 +020078 int res;
Arvid Brodin98bf8362013-11-29 23:38:16 +010079
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020080 hsr = netdev_priv(dev);
Arvid Brodin98bf8362013-11-29 23:38:16 +010081
Arvid Brodin51f3c602014-07-04 23:37:27 +020082 res = 0;
Arvid Brodin98bf8362013-11-29 23:38:16 +010083
Arvid Brodin51f3c602014-07-04 23:37:27 +020084 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +020085 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
86 if (port)
87 res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +020088 rcu_read_unlock();
89 if (res)
90 goto nla_put_failure;
91
92 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +020093 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
94 if (port)
95 res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +020096 rcu_read_unlock();
97 if (res)
98 goto nla_put_failure;
Arvid Brodin98bf8362013-11-29 23:38:16 +010099
100 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200101 hsr->sup_multicast_addr) ||
102 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
Arvid Brodin98bf8362013-11-29 23:38:16 +0100103 goto nla_put_failure;
104
105 return 0;
106
107nla_put_failure:
108 return -EMSGSIZE;
109}
110
Arvid Brodinf4214362013-10-30 21:10:47 +0100111static struct rtnl_link_ops hsr_link_ops __read_mostly = {
112 .kind = "hsr",
113 .maxtype = IFLA_HSR_MAX,
114 .policy = hsr_policy,
115 .priv_size = sizeof(struct hsr_priv),
116 .setup = hsr_dev_setup,
117 .newlink = hsr_newlink,
Arvid Brodin98bf8362013-11-29 23:38:16 +0100118 .fill_info = hsr_fill_info,
Arvid Brodinf4214362013-10-30 21:10:47 +0100119};
120
121
122
123/* attribute policy */
Arvid Brodinf4214362013-10-30 21:10:47 +0100124static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
Peter Heisef9375722016-04-19 13:34:28 +0200125 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
126 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
Arvid Brodinf4214362013-10-30 21:10:47 +0100127 [HSR_A_IFINDEX] = { .type = NLA_U32 },
128 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
129 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
130 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
131 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
132};
133
134static struct genl_family hsr_genl_family = {
135 .id = GENL_ID_GENERATE,
136 .hdrsize = 0,
137 .name = "HSR",
138 .version = 1,
139 .maxattr = HSR_A_MAX,
Taehee Yoo03e72342020-03-13 06:50:33 +0000140 .netnsok = true,
Arvid Brodinf4214362013-10-30 21:10:47 +0100141};
142
Johannes Berg2a94fe42013-11-19 15:19:39 +0100143static const struct genl_multicast_group hsr_mcgrps[] = {
144 { .name = "hsr-network", },
Arvid Brodinf4214362013-10-30 21:10:47 +0100145};
146
147
148
149/* This is called if for some node with MAC address addr, we only get frames
150 * over one of the slave interfaces. This would indicate an open network ring
151 * (i.e. a link has failed somewhere).
152 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200153void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
Arvid Brodinc5a75912014-07-04 23:38:05 +0200154 struct hsr_port *port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100155{
156 struct sk_buff *skb;
157 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200158 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100159 int res;
Arvid Brodinf4214362013-10-30 21:10:47 +0100160
161 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
162 if (!skb)
163 goto fail;
164
165 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR);
166 if (!msg_head)
167 goto nla_put_failure;
168
169 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
170 if (res < 0)
171 goto nla_put_failure;
172
Arvid Brodinc5a75912014-07-04 23:38:05 +0200173 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100174 if (res < 0)
175 goto nla_put_failure;
176
177 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100178 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100179
180 return;
181
182nla_put_failure:
183 kfree_skb(skb);
184
185fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200186 rcu_read_lock();
187 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
188 netdev_warn(master->dev, "Could not send HSR ring error message\n");
189 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100190}
191
192/* This is called when we haven't heard from the node with MAC address addr for
193 * some time (just before the node is removed from the node table/list).
194 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200195void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +0100196{
197 struct sk_buff *skb;
198 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200199 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100200 int res;
201
202 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
203 if (!skb)
204 goto fail;
205
206 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
207 if (!msg_head)
208 goto nla_put_failure;
209
210
211 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
212 if (res < 0)
213 goto nla_put_failure;
214
215 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100216 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100217
218 return;
219
220nla_put_failure:
221 kfree_skb(skb);
222
223fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200224 rcu_read_lock();
225 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
226 netdev_warn(master->dev, "Could not send HSR node down\n");
227 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100228}
229
230
231/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
232 * about the status of a specific node in the network, defined by its MAC
233 * address.
234 *
235 * Input: hsr ifindex, node mac address
236 * Output: hsr ifindex, node mac address (copied from request),
237 * age of latest frame from node over slave 1, slave 2 [ms]
238 */
239static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
240{
241 /* For receiving */
242 struct nlattr *na;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200243 struct net_device *hsr_dev;
Arvid Brodinf4214362013-10-30 21:10:47 +0100244
245 /* For sending */
246 struct sk_buff *skb_out;
247 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200248 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200249 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100250 unsigned char hsr_node_addr_b[ETH_ALEN];
251 int hsr_node_if1_age;
252 u16 hsr_node_if1_seq;
253 int hsr_node_if2_age;
254 u16 hsr_node_if2_seq;
255 int addr_b_ifindex;
256 int res;
257
258 if (!info)
259 goto invalid;
260
261 na = info->attrs[HSR_A_IFINDEX];
262 if (!na)
263 goto invalid;
264 na = info->attrs[HSR_A_NODE_ADDR];
265 if (!na)
266 goto invalid;
267
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000268 rcu_read_lock();
269 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
270 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100271 if (!hsr_dev)
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000272 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100273 if (!is_hsr_master(hsr_dev))
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000274 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100275
276 /* Send reply */
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000277 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100278 if (!skb_out) {
279 res = -ENOMEM;
280 goto fail;
281 }
282
283 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
284 info->snd_seq, &hsr_genl_family, 0,
285 HSR_C_SET_NODE_STATUS);
286 if (!msg_head) {
287 res = -ENOMEM;
288 goto nla_put_failure;
289 }
290
291 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
292 if (res < 0)
293 goto nla_put_failure;
294
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200295 hsr = netdev_priv(hsr_dev);
296 res = hsr_get_node_data(hsr,
Arvid Brodinf4214362013-10-30 21:10:47 +0100297 (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
298 hsr_node_addr_b,
299 &addr_b_ifindex,
300 &hsr_node_if1_age,
301 &hsr_node_if1_seq,
302 &hsr_node_if2_age,
303 &hsr_node_if2_seq);
304 if (res < 0)
Geyslan G. Bem84a035f2013-11-14 16:12:54 -0300305 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100306
307 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
308 nla_data(info->attrs[HSR_A_NODE_ADDR]));
309 if (res < 0)
310 goto nla_put_failure;
311
312 if (addr_b_ifindex > -1) {
313 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
314 hsr_node_addr_b);
315 if (res < 0)
316 goto nla_put_failure;
317
318 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
319 if (res < 0)
320 goto nla_put_failure;
321 }
322
323 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
324 if (res < 0)
325 goto nla_put_failure;
326 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
327 if (res < 0)
328 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200329 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
330 if (port)
331 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
332 port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100333 if (res < 0)
334 goto nla_put_failure;
335
336 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
337 if (res < 0)
338 goto nla_put_failure;
339 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
340 if (res < 0)
341 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200342 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
343 if (port)
344 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
345 port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200346 if (res < 0)
347 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100348
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000349 rcu_read_unlock();
350
Arvid Brodinf4214362013-10-30 21:10:47 +0100351 genlmsg_end(skb_out, msg_head);
352 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
353
354 return 0;
355
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000356rcu_unlock:
357 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100358invalid:
359 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
360 return 0;
361
362nla_put_failure:
363 kfree_skb(skb_out);
364 /* Fall through */
365
366fail:
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000367 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100368 return res;
369}
370
Arvid Brodinf266a682014-07-04 23:41:03 +0200371/* Get a list of MacAddressA of all nodes known to this node (including self).
Arvid Brodinf4214362013-10-30 21:10:47 +0100372 */
373static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
374{
Arvid Brodinf4214362013-10-30 21:10:47 +0100375 unsigned char addr[ETH_ALEN];
Taehee Yooc669c932020-03-13 06:50:24 +0000376 struct net_device *hsr_dev;
377 struct sk_buff *skb_out;
378 struct hsr_priv *hsr;
379 bool restart = false;
380 struct nlattr *na;
381 void *pos = NULL;
382 void *msg_head;
Arvid Brodinf4214362013-10-30 21:10:47 +0100383 int res;
384
385 if (!info)
386 goto invalid;
387
388 na = info->attrs[HSR_A_IFINDEX];
389 if (!na)
390 goto invalid;
391
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000392 rcu_read_lock();
393 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
394 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100395 if (!hsr_dev)
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000396 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100397 if (!is_hsr_master(hsr_dev))
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000398 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100399
Taehee Yooc669c932020-03-13 06:50:24 +0000400restart:
Arvid Brodinf4214362013-10-30 21:10:47 +0100401 /* Send reply */
Taehee Yooc669c932020-03-13 06:50:24 +0000402 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100403 if (!skb_out) {
404 res = -ENOMEM;
405 goto fail;
406 }
407
408 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
409 info->snd_seq, &hsr_genl_family, 0,
410 HSR_C_SET_NODE_LIST);
411 if (!msg_head) {
412 res = -ENOMEM;
413 goto nla_put_failure;
414 }
415
Taehee Yooc669c932020-03-13 06:50:24 +0000416 if (!restart) {
417 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
418 if (res < 0)
419 goto nla_put_failure;
420 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100421
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200422 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100423
Taehee Yooc669c932020-03-13 06:50:24 +0000424 if (!pos)
425 pos = hsr_get_next_node(hsr, NULL, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100426 while (pos) {
427 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
Taehee Yooc669c932020-03-13 06:50:24 +0000428 if (res < 0) {
429 if (res == -EMSGSIZE) {
430 genlmsg_end(skb_out, msg_head);
431 genlmsg_unicast(genl_info_net(info), skb_out,
432 info->snd_portid);
433 restart = true;
434 goto restart;
435 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100436 goto nla_put_failure;
Taehee Yooc669c932020-03-13 06:50:24 +0000437 }
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200438 pos = hsr_get_next_node(hsr, pos, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100439 }
440 rcu_read_unlock();
441
442 genlmsg_end(skb_out, msg_head);
443 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
444
445 return 0;
446
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000447rcu_unlock:
448 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100449invalid:
450 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
451 return 0;
452
453nla_put_failure:
Taehee Yooc669c932020-03-13 06:50:24 +0000454 nlmsg_free(skb_out);
Arvid Brodinf4214362013-10-30 21:10:47 +0100455 /* Fall through */
456
457fail:
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000458 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100459 return res;
460}
461
462
Johannes Berg4534de82013-11-14 17:14:46 +0100463static const struct genl_ops hsr_ops[] = {
Johannes Berg9504b3e2013-11-14 17:14:40 +0100464 {
465 .cmd = HSR_C_GET_NODE_STATUS,
466 .flags = 0,
467 .policy = hsr_genl_policy,
468 .doit = hsr_get_node_status,
469 .dumpit = NULL,
470 },
471 {
472 .cmd = HSR_C_GET_NODE_LIST,
473 .flags = 0,
474 .policy = hsr_genl_policy,
475 .doit = hsr_get_node_list,
476 .dumpit = NULL,
477 },
Arvid Brodinf4214362013-10-30 21:10:47 +0100478};
479
480int __init hsr_netlink_init(void)
481{
482 int rc;
483
484 rc = rtnl_link_register(&hsr_link_ops);
485 if (rc)
486 goto fail_rtnl_link_register;
487
Johannes Berg2a94fe42013-11-19 15:19:39 +0100488 rc = genl_register_family_with_ops_groups(&hsr_genl_family, hsr_ops,
489 hsr_mcgrps);
Arvid Brodinf4214362013-10-30 21:10:47 +0100490 if (rc)
491 goto fail_genl_register_family;
492
Arvid Brodinf4214362013-10-30 21:10:47 +0100493 return 0;
494
Arvid Brodinf4214362013-10-30 21:10:47 +0100495fail_genl_register_family:
496 rtnl_link_unregister(&hsr_link_ops);
497fail_rtnl_link_register:
498
499 return rc;
500}
501
502void __exit hsr_netlink_exit(void)
503{
Arvid Brodinf4214362013-10-30 21:10:47 +0100504 genl_unregister_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100505 rtnl_link_unregister(&hsr_link_ops);
506}
507
508MODULE_ALIAS_RTNL_LINK("hsr");