blob: 1da236db04d6c97878eef766087589a8c1997bd5 [file] [log] [blame]
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01002 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02009 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +010010 *
11 * Routines for handling Netlink messages for HSR.
12 */
13
14#include "hsr_netlink.h"
15#include <linux/kernel.h>
16#include <net/rtnetlink.h>
17#include <net/genetlink.h>
18#include "hsr_main.h"
19#include "hsr_device.h"
20#include "hsr_framereg.h"
21
22static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
Peter Heiseee1c2792016-04-13 13:52:22 +020026 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
Peter Heisef9375722016-04-19 13:34:28 +020027 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
Arvid Brodin98bf8362013-11-29 23:38:16 +010028 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
Arvid Brodinf4214362013-10-30 21:10:47 +010029};
30
31
32/* Here, it seems a netdevice has already been allocated for us, and the
33 * hsr_dev_setup routine has been executed. Nice!
34 */
35static int hsr_newlink(struct net *src_net, struct net_device *dev,
36 struct nlattr *tb[], struct nlattr *data[])
37{
38 struct net_device *link[2];
Peter Heiseee1c2792016-04-13 13:52:22 +020039 unsigned char multicast_spec, hsr_version;
Arvid Brodinf4214362013-10-30 21:10:47 +010040
Arvid Brodina718dcc2014-07-04 23:42:00 +020041 if (!data) {
42 netdev_info(dev, "HSR: No slave devices specified\n");
43 return -EINVAL;
44 }
Arvid Brodinf4214362013-10-30 21:10:47 +010045 if (!data[IFLA_HSR_SLAVE1]) {
Arvid Brodina718dcc2014-07-04 23:42:00 +020046 netdev_info(dev, "HSR: Slave1 device not specified\n");
Arvid Brodinf4214362013-10-30 21:10:47 +010047 return -EINVAL;
48 }
49 link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
50 if (!data[IFLA_HSR_SLAVE2]) {
Arvid Brodina718dcc2014-07-04 23:42:00 +020051 netdev_info(dev, "HSR: Slave2 device not specified\n");
Arvid Brodinf4214362013-10-30 21:10:47 +010052 return -EINVAL;
53 }
54 link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
55
56 if (!link[0] || !link[1])
57 return -ENODEV;
58 if (link[0] == link[1])
59 return -EINVAL;
60
61 if (!data[IFLA_HSR_MULTICAST_SPEC])
62 multicast_spec = 0;
63 else
64 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
65
Taehee Yooc72ef9f2020-04-07 13:23:21 +000066 if (!data[IFLA_HSR_VERSION]) {
Peter Heiseee1c2792016-04-13 13:52:22 +020067 hsr_version = 0;
Taehee Yooc72ef9f2020-04-07 13:23:21 +000068 } else {
Peter Heiseee1c2792016-04-13 13:52:22 +020069 hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
Taehee Yooc72ef9f2020-04-07 13:23:21 +000070 if (hsr_version > 1) {
71 netdev_info(dev, "Only versions 0..1 are supported");
72 return -EINVAL;
73 }
74 }
Peter Heiseee1c2792016-04-13 13:52:22 +020075
76 return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
Arvid Brodinf4214362013-10-30 21:10:47 +010077}
78
Arvid Brodin98bf8362013-11-29 23:38:16 +010079static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
80{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020081 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +020082 struct hsr_port *port;
Arvid Brodin51f3c602014-07-04 23:37:27 +020083 int res;
Arvid Brodin98bf8362013-11-29 23:38:16 +010084
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020085 hsr = netdev_priv(dev);
Arvid Brodin98bf8362013-11-29 23:38:16 +010086
Arvid Brodin51f3c602014-07-04 23:37:27 +020087 res = 0;
Arvid Brodin98bf8362013-11-29 23:38:16 +010088
Arvid Brodin51f3c602014-07-04 23:37:27 +020089 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +020090 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
91 if (port)
92 res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +020093 rcu_read_unlock();
94 if (res)
95 goto nla_put_failure;
96
97 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +020098 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
99 if (port)
100 res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200101 rcu_read_unlock();
102 if (res)
103 goto nla_put_failure;
Arvid Brodin98bf8362013-11-29 23:38:16 +0100104
105 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200106 hsr->sup_multicast_addr) ||
107 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
Arvid Brodin98bf8362013-11-29 23:38:16 +0100108 goto nla_put_failure;
109
110 return 0;
111
112nla_put_failure:
113 return -EMSGSIZE;
114}
115
Arvid Brodinf4214362013-10-30 21:10:47 +0100116static struct rtnl_link_ops hsr_link_ops __read_mostly = {
117 .kind = "hsr",
118 .maxtype = IFLA_HSR_MAX,
119 .policy = hsr_policy,
120 .priv_size = sizeof(struct hsr_priv),
121 .setup = hsr_dev_setup,
122 .newlink = hsr_newlink,
Arvid Brodin98bf8362013-11-29 23:38:16 +0100123 .fill_info = hsr_fill_info,
Arvid Brodinf4214362013-10-30 21:10:47 +0100124};
125
126
127
128/* attribute policy */
Arvid Brodinf4214362013-10-30 21:10:47 +0100129static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
Peter Heisef9375722016-04-19 13:34:28 +0200130 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
131 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
Arvid Brodinf4214362013-10-30 21:10:47 +0100132 [HSR_A_IFINDEX] = { .type = NLA_U32 },
133 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
134 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
135 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
136 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
137};
138
139static struct genl_family hsr_genl_family = {
140 .id = GENL_ID_GENERATE,
141 .hdrsize = 0,
142 .name = "HSR",
143 .version = 1,
144 .maxattr = HSR_A_MAX,
Taehee Yoo03e72342020-03-13 06:50:33 +0000145 .netnsok = true,
Arvid Brodinf4214362013-10-30 21:10:47 +0100146};
147
Johannes Berg2a94fe42013-11-19 15:19:39 +0100148static const struct genl_multicast_group hsr_mcgrps[] = {
149 { .name = "hsr-network", },
Arvid Brodinf4214362013-10-30 21:10:47 +0100150};
151
152
153
154/* This is called if for some node with MAC address addr, we only get frames
155 * over one of the slave interfaces. This would indicate an open network ring
156 * (i.e. a link has failed somewhere).
157 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200158void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
Arvid Brodinc5a75912014-07-04 23:38:05 +0200159 struct hsr_port *port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100160{
161 struct sk_buff *skb;
162 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200163 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100164 int res;
Arvid Brodinf4214362013-10-30 21:10:47 +0100165
166 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
167 if (!skb)
168 goto fail;
169
170 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR);
171 if (!msg_head)
172 goto nla_put_failure;
173
174 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
175 if (res < 0)
176 goto nla_put_failure;
177
Arvid Brodinc5a75912014-07-04 23:38:05 +0200178 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100179 if (res < 0)
180 goto nla_put_failure;
181
182 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100183 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100184
185 return;
186
187nla_put_failure:
188 kfree_skb(skb);
189
190fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200191 rcu_read_lock();
192 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
193 netdev_warn(master->dev, "Could not send HSR ring error message\n");
194 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100195}
196
197/* This is called when we haven't heard from the node with MAC address addr for
198 * some time (just before the node is removed from the node table/list).
199 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200200void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +0100201{
202 struct sk_buff *skb;
203 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200204 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100205 int res;
206
207 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
208 if (!skb)
209 goto fail;
210
211 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
212 if (!msg_head)
213 goto nla_put_failure;
214
215
216 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
217 if (res < 0)
218 goto nla_put_failure;
219
220 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100221 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100222
223 return;
224
225nla_put_failure:
226 kfree_skb(skb);
227
228fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200229 rcu_read_lock();
230 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
231 netdev_warn(master->dev, "Could not send HSR node down\n");
232 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100233}
234
235
236/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
237 * about the status of a specific node in the network, defined by its MAC
238 * address.
239 *
240 * Input: hsr ifindex, node mac address
241 * Output: hsr ifindex, node mac address (copied from request),
242 * age of latest frame from node over slave 1, slave 2 [ms]
243 */
244static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
245{
246 /* For receiving */
247 struct nlattr *na;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200248 struct net_device *hsr_dev;
Arvid Brodinf4214362013-10-30 21:10:47 +0100249
250 /* For sending */
251 struct sk_buff *skb_out;
252 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200253 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200254 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100255 unsigned char hsr_node_addr_b[ETH_ALEN];
256 int hsr_node_if1_age;
257 u16 hsr_node_if1_seq;
258 int hsr_node_if2_age;
259 u16 hsr_node_if2_seq;
260 int addr_b_ifindex;
261 int res;
262
263 if (!info)
264 goto invalid;
265
266 na = info->attrs[HSR_A_IFINDEX];
267 if (!na)
268 goto invalid;
269 na = info->attrs[HSR_A_NODE_ADDR];
270 if (!na)
271 goto invalid;
272
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000273 rcu_read_lock();
274 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
275 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100276 if (!hsr_dev)
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000277 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100278 if (!is_hsr_master(hsr_dev))
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000279 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100280
281 /* Send reply */
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000282 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100283 if (!skb_out) {
284 res = -ENOMEM;
285 goto fail;
286 }
287
288 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
289 info->snd_seq, &hsr_genl_family, 0,
290 HSR_C_SET_NODE_STATUS);
291 if (!msg_head) {
292 res = -ENOMEM;
293 goto nla_put_failure;
294 }
295
296 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
297 if (res < 0)
298 goto nla_put_failure;
299
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200300 hsr = netdev_priv(hsr_dev);
301 res = hsr_get_node_data(hsr,
Arvid Brodinf4214362013-10-30 21:10:47 +0100302 (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
303 hsr_node_addr_b,
304 &addr_b_ifindex,
305 &hsr_node_if1_age,
306 &hsr_node_if1_seq,
307 &hsr_node_if2_age,
308 &hsr_node_if2_seq);
309 if (res < 0)
Geyslan G. Bem84a035f2013-11-14 16:12:54 -0300310 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100311
312 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
313 nla_data(info->attrs[HSR_A_NODE_ADDR]));
314 if (res < 0)
315 goto nla_put_failure;
316
317 if (addr_b_ifindex > -1) {
318 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
319 hsr_node_addr_b);
320 if (res < 0)
321 goto nla_put_failure;
322
323 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
324 if (res < 0)
325 goto nla_put_failure;
326 }
327
328 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
329 if (res < 0)
330 goto nla_put_failure;
331 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
332 if (res < 0)
333 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200334 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
335 if (port)
336 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
337 port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100338 if (res < 0)
339 goto nla_put_failure;
340
341 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
342 if (res < 0)
343 goto nla_put_failure;
344 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
345 if (res < 0)
346 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200347 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
348 if (port)
349 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
350 port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200351 if (res < 0)
352 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100353
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000354 rcu_read_unlock();
355
Arvid Brodinf4214362013-10-30 21:10:47 +0100356 genlmsg_end(skb_out, msg_head);
357 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
358
359 return 0;
360
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000361rcu_unlock:
362 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100363invalid:
364 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
365 return 0;
366
367nla_put_failure:
368 kfree_skb(skb_out);
369 /* Fall through */
370
371fail:
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000372 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100373 return res;
374}
375
Arvid Brodinf266a682014-07-04 23:41:03 +0200376/* Get a list of MacAddressA of all nodes known to this node (including self).
Arvid Brodinf4214362013-10-30 21:10:47 +0100377 */
378static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
379{
Arvid Brodinf4214362013-10-30 21:10:47 +0100380 unsigned char addr[ETH_ALEN];
Taehee Yooc669c932020-03-13 06:50:24 +0000381 struct net_device *hsr_dev;
382 struct sk_buff *skb_out;
383 struct hsr_priv *hsr;
384 bool restart = false;
385 struct nlattr *na;
386 void *pos = NULL;
387 void *msg_head;
Arvid Brodinf4214362013-10-30 21:10:47 +0100388 int res;
389
390 if (!info)
391 goto invalid;
392
393 na = info->attrs[HSR_A_IFINDEX];
394 if (!na)
395 goto invalid;
396
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000397 rcu_read_lock();
398 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
399 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100400 if (!hsr_dev)
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000401 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100402 if (!is_hsr_master(hsr_dev))
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000403 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100404
Taehee Yooc669c932020-03-13 06:50:24 +0000405restart:
Arvid Brodinf4214362013-10-30 21:10:47 +0100406 /* Send reply */
Taehee Yooc669c932020-03-13 06:50:24 +0000407 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100408 if (!skb_out) {
409 res = -ENOMEM;
410 goto fail;
411 }
412
413 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
414 info->snd_seq, &hsr_genl_family, 0,
415 HSR_C_SET_NODE_LIST);
416 if (!msg_head) {
417 res = -ENOMEM;
418 goto nla_put_failure;
419 }
420
Taehee Yooc669c932020-03-13 06:50:24 +0000421 if (!restart) {
422 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
423 if (res < 0)
424 goto nla_put_failure;
425 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100426
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200427 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100428
Taehee Yooc669c932020-03-13 06:50:24 +0000429 if (!pos)
430 pos = hsr_get_next_node(hsr, NULL, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100431 while (pos) {
432 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
Taehee Yooc669c932020-03-13 06:50:24 +0000433 if (res < 0) {
434 if (res == -EMSGSIZE) {
435 genlmsg_end(skb_out, msg_head);
436 genlmsg_unicast(genl_info_net(info), skb_out,
437 info->snd_portid);
438 restart = true;
439 goto restart;
440 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100441 goto nla_put_failure;
Taehee Yooc669c932020-03-13 06:50:24 +0000442 }
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200443 pos = hsr_get_next_node(hsr, pos, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100444 }
445 rcu_read_unlock();
446
447 genlmsg_end(skb_out, msg_head);
448 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
449
450 return 0;
451
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000452rcu_unlock:
453 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100454invalid:
455 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
456 return 0;
457
458nla_put_failure:
Taehee Yooc669c932020-03-13 06:50:24 +0000459 nlmsg_free(skb_out);
Arvid Brodinf4214362013-10-30 21:10:47 +0100460 /* Fall through */
461
462fail:
Taehee Yoo9bc97bc2020-03-13 06:50:14 +0000463 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100464 return res;
465}
466
467
Johannes Berg4534de82013-11-14 17:14:46 +0100468static const struct genl_ops hsr_ops[] = {
Johannes Berg9504b3e2013-11-14 17:14:40 +0100469 {
470 .cmd = HSR_C_GET_NODE_STATUS,
471 .flags = 0,
472 .policy = hsr_genl_policy,
473 .doit = hsr_get_node_status,
474 .dumpit = NULL,
475 },
476 {
477 .cmd = HSR_C_GET_NODE_LIST,
478 .flags = 0,
479 .policy = hsr_genl_policy,
480 .doit = hsr_get_node_list,
481 .dumpit = NULL,
482 },
Arvid Brodinf4214362013-10-30 21:10:47 +0100483};
484
485int __init hsr_netlink_init(void)
486{
487 int rc;
488
489 rc = rtnl_link_register(&hsr_link_ops);
490 if (rc)
491 goto fail_rtnl_link_register;
492
Johannes Berg2a94fe42013-11-19 15:19:39 +0100493 rc = genl_register_family_with_ops_groups(&hsr_genl_family, hsr_ops,
494 hsr_mcgrps);
Arvid Brodinf4214362013-10-30 21:10:47 +0100495 if (rc)
496 goto fail_genl_register_family;
497
Arvid Brodinf4214362013-10-30 21:10:47 +0100498 return 0;
499
Arvid Brodinf4214362013-10-30 21:10:47 +0100500fail_genl_register_family:
501 rtnl_link_unregister(&hsr_link_ops);
502fail_rtnl_link_register:
503
504 return rc;
505}
506
507void __exit hsr_netlink_exit(void)
508{
Arvid Brodinf4214362013-10-30 21:10:47 +0100509 genl_unregister_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100510 rtnl_link_unregister(&hsr_link_ops);
511}
512
513MODULE_ALIAS_RTNL_LINK("hsr");