blob: 88298f18cbaecbbfd0f9c610a37cf2a922d21b12 [file] [log] [blame]
Thomas Graf14c0b972006-08-04 03:38:38 -07001/*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
Thomas Graf14c0b972006-08-04 03:38:38 -070011#include <linux/types.h>
12#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Thomas Graf14c0b972006-08-04 03:38:38 -070014#include <linux/list.h>
Paul Gortmaker3a9a2312011-05-27 09:12:25 -040015#include <linux/module.h>
Eric W. Biedermane9dc8652007-09-12 13:02:17 +020016#include <net/net_namespace.h>
Eric W. Biederman881d9662007-09-17 11:56:21 -070017#include <net/sock.h>
Thomas Graf14c0b972006-08-04 03:38:38 -070018#include <net/fib_rules.h>
Thomas Grafe7030872015-07-21 10:44:01 +020019#include <net/ip_tunnels.h>
Thomas Graf14c0b972006-08-04 03:38:38 -070020
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +090021static const struct fib_kuid_range fib_kuid_range_unset = {
22 KUIDT_INIT(0),
23 KUIDT_INIT(~0),
24};
25
Ido Schimmel3c710062017-03-16 09:08:12 +010026bool fib_rule_matchall(const struct fib_rule *rule)
27{
28 if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
29 rule->flags)
30 return false;
31 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
32 return false;
33 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
34 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
35 return false;
36 return true;
37}
38EXPORT_SYMBOL_GPL(fib_rule_matchall);
39
Denis V. Lunev2994c632007-11-10 22:12:03 -080040int fib_default_rule_add(struct fib_rules_ops *ops,
41 u32 pref, u32 table, u32 flags)
42{
43 struct fib_rule *r;
44
45 r = kzalloc(ops->rule_size, GFP_KERNEL);
46 if (r == NULL)
47 return -ENOMEM;
48
Reshetova, Elena717d1e92017-06-30 13:08:06 +030049 refcount_set(&r->refcnt, 1);
Denis V. Lunev2994c632007-11-10 22:12:03 -080050 r->action = FR_ACT_TO_TBL;
51 r->pref = pref;
52 r->table = table;
53 r->flags = flags;
Donald Sharpcac56202018-02-20 08:55:58 -050054 r->proto = RTPROT_KERNEL;
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -050055 r->fr_net = ops->fro_net;
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +090056 r->uid_range = fib_kuid_range_unset;
Denis V. Lunev2994c632007-11-10 22:12:03 -080057
Stefan Tomanek73f56982013-08-03 14:14:43 +020058 r->suppress_prefixlen = -1;
59 r->suppress_ifgroup = -1;
60
Denis V. Lunev2994c632007-11-10 22:12:03 -080061 /* The lock is not required here, the list in unreacheable
62 * at the moment this function is called */
63 list_add_tail(&r->list, &ops->rules_list);
64 return 0;
65}
66EXPORT_SYMBOL(fib_default_rule_add);
67
Phil Sutterf53de1e2015-09-09 14:20:56 +020068static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
Patrick McHardyd8a566b2010-04-13 05:03:15 +000069{
70 struct list_head *pos;
71 struct fib_rule *rule;
72
73 if (!list_empty(&ops->rules_list)) {
74 pos = ops->rules_list.next;
75 if (pos->next != &ops->rules_list) {
76 rule = list_entry(pos->next, struct fib_rule, list);
77 if (rule->pref)
78 return rule->pref - 1;
79 }
80 }
81
82 return 0;
83}
Patrick McHardyd8a566b2010-04-13 05:03:15 +000084
Denis V. Lunev9e3a5482008-01-20 16:46:41 -080085static void notify_rule_change(int event, struct fib_rule *rule,
Thomas Grafc17084d2006-08-15 00:32:48 -070086 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
87 u32 pid);
Thomas Graf14c0b972006-08-04 03:38:38 -070088
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -080089static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
Thomas Graf14c0b972006-08-04 03:38:38 -070090{
91 struct fib_rules_ops *ops;
92
93 rcu_read_lock();
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -080094 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
Thomas Graf14c0b972006-08-04 03:38:38 -070095 if (ops->family == family) {
96 if (!try_module_get(ops->owner))
97 ops = NULL;
98 rcu_read_unlock();
99 return ops;
100 }
101 }
102 rcu_read_unlock();
103
104 return NULL;
105}
106
107static void rules_ops_put(struct fib_rules_ops *ops)
108{
109 if (ops)
110 module_put(ops->owner);
111}
112
Thomas Graf73417f62007-03-27 13:56:52 -0700113static void flush_route_cache(struct fib_rules_ops *ops)
114{
115 if (ops->flush_cache)
Denis V. Lunevae299fc2008-07-05 19:01:28 -0700116 ops->flush_cache(ops);
Thomas Graf73417f62007-03-27 13:56:52 -0700117}
118
Eric W. Biedermane9c51582009-12-03 12:22:55 -0800119static int __fib_rules_register(struct fib_rules_ops *ops)
Thomas Graf14c0b972006-08-04 03:38:38 -0700120{
121 int err = -EEXIST;
122 struct fib_rules_ops *o;
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800123 struct net *net;
124
125 net = ops->fro_net;
Thomas Graf14c0b972006-08-04 03:38:38 -0700126
127 if (ops->rule_size < sizeof(struct fib_rule))
128 return -EINVAL;
129
130 if (ops->match == NULL || ops->configure == NULL ||
131 ops->compare == NULL || ops->fill == NULL ||
132 ops->action == NULL)
133 return -EINVAL;
134
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800135 spin_lock(&net->rules_mod_lock);
136 list_for_each_entry(o, &net->rules_ops, list)
Thomas Graf14c0b972006-08-04 03:38:38 -0700137 if (ops->family == o->family)
138 goto errout;
139
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800140 list_add_tail_rcu(&ops->list, &net->rules_ops);
Thomas Graf14c0b972006-08-04 03:38:38 -0700141 err = 0;
142errout:
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800143 spin_unlock(&net->rules_mod_lock);
Thomas Graf14c0b972006-08-04 03:38:38 -0700144
145 return err;
146}
147
Eric W. Biedermane9c51582009-12-03 12:22:55 -0800148struct fib_rules_ops *
Patrick McHardy3d0c9c42010-04-26 16:02:04 +0200149fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
Eric W. Biedermane9c51582009-12-03 12:22:55 -0800150{
151 struct fib_rules_ops *ops;
152 int err;
153
Eric Dumazet2fb35732010-03-09 20:03:38 +0000154 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
Eric W. Biedermane9c51582009-12-03 12:22:55 -0800155 if (ops == NULL)
156 return ERR_PTR(-ENOMEM);
157
158 INIT_LIST_HEAD(&ops->rules_list);
159 ops->fro_net = net;
160
161 err = __fib_rules_register(ops);
162 if (err) {
163 kfree(ops);
164 ops = ERR_PTR(err);
165 }
166
167 return ops;
168}
Thomas Graf14c0b972006-08-04 03:38:38 -0700169EXPORT_SYMBOL_GPL(fib_rules_register);
170
stephen hemminger1df99162010-10-04 20:14:17 +0000171static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
Thomas Graf14c0b972006-08-04 03:38:38 -0700172{
173 struct fib_rule *rule, *tmp;
174
Denis V. Lunev76c72d42007-09-16 15:44:27 -0700175 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
Thomas Graf14c0b972006-08-04 03:38:38 -0700176 list_del_rcu(&rule->list);
David S. Miller7a9bc9b2012-06-29 01:32:45 -0700177 if (ops->delete)
178 ops->delete(rule);
Thomas Graf14c0b972006-08-04 03:38:38 -0700179 fib_rule_put(rule);
180 }
181}
182
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800183void fib_rules_unregister(struct fib_rules_ops *ops)
Thomas Graf14c0b972006-08-04 03:38:38 -0700184{
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800185 struct net *net = ops->fro_net;
Thomas Graf14c0b972006-08-04 03:38:38 -0700186
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800187 spin_lock(&net->rules_mod_lock);
Denis V. Lunev72132c1b2008-01-14 22:59:30 -0800188 list_del_rcu(&ops->list);
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800189 spin_unlock(&net->rules_mod_lock);
Thomas Graf14c0b972006-08-04 03:38:38 -0700190
WANG Cong419df122015-03-31 11:01:46 -0700191 fib_rules_cleanup_ops(ops);
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -0500192 kfree_rcu(ops, rcu);
Thomas Graf14c0b972006-08-04 03:38:38 -0700193}
Thomas Graf14c0b972006-08-04 03:38:38 -0700194EXPORT_SYMBOL_GPL(fib_rules_unregister);
195
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900196static int uid_range_set(struct fib_kuid_range *range)
197{
198 return uid_valid(range->start) && uid_valid(range->end);
199}
200
201static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
202{
203 struct fib_rule_uid_range *in;
204 struct fib_kuid_range out;
205
206 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
207
208 out.start = make_kuid(current_user_ns(), in->start);
209 out.end = make_kuid(current_user_ns(), in->end);
210
211 return out;
212}
213
214static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
215{
216 struct fib_rule_uid_range out = {
217 from_kuid_munged(current_user_ns(), range->start),
218 from_kuid_munged(current_user_ns(), range->end)
219 };
220
221 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
222}
223
Thomas Graf3dfbcc42006-11-09 15:23:20 -0800224static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
David Ahern96c63fa2016-06-08 10:55:39 -0700225 struct flowi *fl, int flags,
226 struct fib_lookup_arg *arg)
Thomas Graf3dfbcc42006-11-09 15:23:20 -0800227{
228 int ret = 0;
229
David S. Miller1d28f422011-03-12 00:29:39 -0500230 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
Thomas Graf3dfbcc42006-11-09 15:23:20 -0800231 goto out;
232
David S. Miller1d28f422011-03-12 00:29:39 -0500233 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
Patrick McHardy1b038a52009-12-03 01:25:56 +0000234 goto out;
235
David S. Miller1d28f422011-03-12 00:29:39 -0500236 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
Thomas Graf3dfbcc42006-11-09 15:23:20 -0800237 goto out;
238
Thomas Grafe7030872015-07-21 10:44:01 +0200239 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
240 goto out;
241
David Ahern96c63fa2016-06-08 10:55:39 -0700242 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
243 goto out;
244
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900245 if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
246 uid_gt(fl->flowi_uid, rule->uid_range.end))
247 goto out;
248
Thomas Graf3dfbcc42006-11-09 15:23:20 -0800249 ret = ops->match(rule, fl, flags);
250out:
251 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
252}
253
Thomas Graf14c0b972006-08-04 03:38:38 -0700254int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
255 int flags, struct fib_lookup_arg *arg)
256{
257 struct fib_rule *rule;
258 int err;
259
260 rcu_read_lock();
261
Denis V. Lunev76c72d42007-09-16 15:44:27 -0700262 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700263jumped:
David Ahern96c63fa2016-06-08 10:55:39 -0700264 if (!fib_rule_match(rule, ops, fl, flags, arg))
Thomas Graf14c0b972006-08-04 03:38:38 -0700265 continue;
266
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700267 if (rule->action == FR_ACT_GOTO) {
268 struct fib_rule *target;
269
270 target = rcu_dereference(rule->ctarget);
271 if (target == NULL) {
272 continue;
273 } else {
274 rule = target;
275 goto jumped;
276 }
Thomas Graffa0b2d12007-03-26 17:38:53 -0700277 } else if (rule->action == FR_ACT_NOP)
278 continue;
279 else
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700280 err = ops->action(rule, fl, flags, arg);
281
Stefan Tomanek7764a452013-08-01 02:17:15 +0200282 if (!err && ops->suppress && ops->suppress(rule, arg))
283 continue;
284
Thomas Graf14c0b972006-08-04 03:38:38 -0700285 if (err != -EAGAIN) {
Eric Dumazetebc0ffa2010-10-05 10:41:36 +0000286 if ((arg->flags & FIB_LOOKUP_NOREF) ||
Reshetova, Elena717d1e92017-06-30 13:08:06 +0300287 likely(refcount_inc_not_zero(&rule->refcnt))) {
Eric Dumazet7fa7cb72010-09-27 04:18:27 +0000288 arg->rule = rule;
289 goto out;
290 }
291 break;
Thomas Graf14c0b972006-08-04 03:38:38 -0700292 }
293 }
294
Steven Whitehouse83886b62007-03-30 13:34:27 -0700295 err = -ESRCH;
Thomas Graf14c0b972006-08-04 03:38:38 -0700296out:
297 rcu_read_unlock();
298
299 return err;
300}
Thomas Graf14c0b972006-08-04 03:38:38 -0700301EXPORT_SYMBOL_GPL(fib_rules_lookup);
302
Ido Schimmel1b2a4442017-08-03 13:28:14 +0200303static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net,
304 enum fib_event_type event_type,
305 struct fib_rule *rule, int family)
306{
307 struct fib_rule_notifier_info info = {
308 .info.family = family,
309 .rule = rule,
310 };
311
312 return call_fib_notifier(nb, net, event_type, &info.info);
313}
314
315static int call_fib_rule_notifiers(struct net *net,
316 enum fib_event_type event_type,
317 struct fib_rule *rule,
David Ahern6c31e5a2017-10-27 17:37:13 -0700318 struct fib_rules_ops *ops,
319 struct netlink_ext_ack *extack)
Ido Schimmel1b2a4442017-08-03 13:28:14 +0200320{
321 struct fib_rule_notifier_info info = {
322 .info.family = ops->family,
David Ahern6c31e5a2017-10-27 17:37:13 -0700323 .info.extack = extack,
Ido Schimmel1b2a4442017-08-03 13:28:14 +0200324 .rule = rule,
325 };
326
327 ops->fib_rules_seq++;
328 return call_fib_notifiers(net, event_type, &info.info);
329}
330
331/* Called with rcu_read_lock() */
332int fib_rules_dump(struct net *net, struct notifier_block *nb, int family)
333{
334 struct fib_rules_ops *ops;
335 struct fib_rule *rule;
336
337 ops = lookup_rules_ops(net, family);
338 if (!ops)
339 return -EAFNOSUPPORT;
340 list_for_each_entry_rcu(rule, &ops->rules_list, list)
341 call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule,
342 family);
343 rules_ops_put(ops);
344
345 return 0;
346}
347EXPORT_SYMBOL_GPL(fib_rules_dump);
348
349unsigned int fib_rules_seq_read(struct net *net, int family)
350{
351 unsigned int fib_rules_seq;
352 struct fib_rules_ops *ops;
353
354 ASSERT_RTNL();
355
356 ops = lookup_rules_ops(net, family);
357 if (!ops)
358 return 0;
359 fib_rules_seq = ops->fib_rules_seq;
360 rules_ops_put(ops);
361
362 return fib_rules_seq;
363}
364EXPORT_SYMBOL_GPL(fib_rules_seq_read);
365
Thomas Grafe1701c62007-03-24 12:46:02 -0700366static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
367 struct fib_rules_ops *ops)
368{
369 int err = -EINVAL;
370
371 if (frh->src_len)
372 if (tb[FRA_SRC] == NULL ||
373 frh->src_len > (ops->addr_size * 8) ||
374 nla_len(tb[FRA_SRC]) != ops->addr_size)
375 goto errout;
376
377 if (frh->dst_len)
378 if (tb[FRA_DST] == NULL ||
379 frh->dst_len > (ops->addr_size * 8) ||
380 nla_len(tb[FRA_DST]) != ops->addr_size)
381 goto errout;
382
383 err = 0;
384errout:
385 return err;
386}
387
Mateusz Bajorski153380e2016-06-29 09:22:10 +0200388static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
389 struct nlattr **tb, struct fib_rule *rule)
390{
391 struct fib_rule *r;
392
393 list_for_each_entry(r, &ops->rules_list, list) {
394 if (r->action != rule->action)
395 continue;
396
397 if (r->table != rule->table)
398 continue;
399
400 if (r->pref != rule->pref)
401 continue;
402
403 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
404 continue;
405
406 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
407 continue;
408
409 if (r->mark != rule->mark)
410 continue;
411
412 if (r->mark_mask != rule->mark_mask)
413 continue;
414
415 if (r->tun_id != rule->tun_id)
416 continue;
417
418 if (r->fr_net != rule->fr_net)
419 continue;
420
421 if (r->l3mdev != rule->l3mdev)
422 continue;
423
Lorenzo Colitti35b80732016-11-07 00:16:25 +0900424 if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
425 !uid_eq(r->uid_range.end, rule->uid_range.end))
426 continue;
427
Mateusz Bajorski153380e2016-06-29 09:22:10 +0200428 if (!ops->compare(r, frh, tb))
429 continue;
430 return 1;
431 }
432 return 0;
433}
434
David Ahernc21ef3e2017-04-16 09:48:24 -0700435int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
436 struct netlink_ext_ack *extack)
Thomas Graf14c0b972006-08-04 03:38:38 -0700437{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900438 struct net *net = sock_net(skb->sk);
Thomas Graf14c0b972006-08-04 03:38:38 -0700439 struct fib_rule_hdr *frh = nlmsg_data(nlh);
440 struct fib_rules_ops *ops = NULL;
441 struct fib_rule *rule, *r, *last = NULL;
442 struct nlattr *tb[FRA_MAX+1];
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700443 int err = -EINVAL, unresolved = 0;
Thomas Graf14c0b972006-08-04 03:38:38 -0700444
445 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
446 goto errout;
447
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800448 ops = lookup_rules_ops(net, frh->family);
Thomas Graf14c0b972006-08-04 03:38:38 -0700449 if (ops == NULL) {
Patrick McHardy2fe195c2008-07-01 19:59:37 -0700450 err = -EAFNOSUPPORT;
Thomas Graf14c0b972006-08-04 03:38:38 -0700451 goto errout;
452 }
453
David Ahernc21ef3e2017-04-16 09:48:24 -0700454 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
Thomas Graf14c0b972006-08-04 03:38:38 -0700455 if (err < 0)
456 goto errout;
457
Thomas Grafe1701c62007-03-24 12:46:02 -0700458 err = validate_rulemsg(frh, tb, ops);
459 if (err < 0)
460 goto errout;
461
Thomas Graf14c0b972006-08-04 03:38:38 -0700462 rule = kzalloc(ops->rule_size, GFP_KERNEL);
463 if (rule == NULL) {
464 err = -ENOMEM;
465 goto errout;
466 }
David Ahern5d89fb32017-07-13 13:36:40 -0700467 refcount_set(&rule->refcnt, 1);
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -0500468 rule->fr_net = net;
Donald Sharpcac56202018-02-20 08:55:58 -0500469 rule->proto = frh->proto;
Thomas Graf14c0b972006-08-04 03:38:38 -0700470
Phil Sutterf53de1e2015-09-09 14:20:56 +0200471 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
472 : fib_default_rule_pref(ops);
Thomas Graf14c0b972006-08-04 03:38:38 -0700473
Patrick McHardy491deb22009-12-03 01:25:54 +0000474 if (tb[FRA_IIFNAME]) {
Thomas Graf14c0b972006-08-04 03:38:38 -0700475 struct net_device *dev;
476
Patrick McHardy491deb22009-12-03 01:25:54 +0000477 rule->iifindex = -1;
478 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
479 dev = __dev_get_by_name(net, rule->iifname);
Thomas Graf14c0b972006-08-04 03:38:38 -0700480 if (dev)
Patrick McHardy491deb22009-12-03 01:25:54 +0000481 rule->iifindex = dev->ifindex;
Thomas Graf14c0b972006-08-04 03:38:38 -0700482 }
483
Patrick McHardy1b038a52009-12-03 01:25:56 +0000484 if (tb[FRA_OIFNAME]) {
485 struct net_device *dev;
486
487 rule->oifindex = -1;
488 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
489 dev = __dev_get_by_name(net, rule->oifname);
490 if (dev)
491 rule->oifindex = dev->ifindex;
492 }
493
Thomas Grafb8964ed2006-11-09 15:22:18 -0800494 if (tb[FRA_FWMARK]) {
495 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
496 if (rule->mark)
497 /* compatibility: if the mark value is non-zero all bits
498 * are compared unless a mask is explicitly specified.
499 */
500 rule->mark_mask = 0xFFFFFFFF;
501 }
502
503 if (tb[FRA_FWMASK])
504 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
505
Thomas Grafe7030872015-07-21 10:44:01 +0200506 if (tb[FRA_TUN_ID])
507 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
508
Wei Yongjunadeb45c2017-04-26 14:03:50 +0000509 err = -EINVAL;
David Ahern96c63fa2016-06-08 10:55:39 -0700510 if (tb[FRA_L3MDEV]) {
511#ifdef CONFIG_NET_L3_MASTER_DEV
512 rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
513 if (rule->l3mdev != 1)
514#endif
515 goto errout_free;
516 }
517
Thomas Graf14c0b972006-08-04 03:38:38 -0700518 rule->action = frh->action;
519 rule->flags = frh->flags;
Patrick McHardy9e762a42006-08-10 23:09:48 -0700520 rule->table = frh_get_table(frh, tb);
Stefan Tomanek73f56982013-08-03 14:14:43 +0200521 if (tb[FRA_SUPPRESS_PREFIXLEN])
522 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
523 else
524 rule->suppress_prefixlen = -1;
Thomas Graf14c0b972006-08-04 03:38:38 -0700525
Stefan Tomanek6ef94cf2013-08-02 17:19:56 +0200526 if (tb[FRA_SUPPRESS_IFGROUP])
527 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
Stefan Tomanek73f56982013-08-03 14:14:43 +0200528 else
529 rule->suppress_ifgroup = -1;
Stefan Tomanek6ef94cf2013-08-02 17:19:56 +0200530
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700531 if (tb[FRA_GOTO]) {
532 if (rule->action != FR_ACT_GOTO)
533 goto errout_free;
534
535 rule->target = nla_get_u32(tb[FRA_GOTO]);
536 /* Backward jumps are prohibited to avoid endless loops */
537 if (rule->target <= rule->pref)
538 goto errout_free;
539
Denis V. Lunev76c72d42007-09-16 15:44:27 -0700540 list_for_each_entry(r, &ops->rules_list, list) {
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700541 if (r->pref == rule->target) {
Eric Dumazet7a2b03c2010-10-26 09:24:55 +0000542 RCU_INIT_POINTER(rule->ctarget, r);
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700543 break;
544 }
545 }
546
Eric Dumazet7a2b03c2010-10-26 09:24:55 +0000547 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700548 unresolved = 1;
549 } else if (rule->action == FR_ACT_GOTO)
550 goto errout_free;
551
David Ahern96c63fa2016-06-08 10:55:39 -0700552 if (rule->l3mdev && rule->table)
553 goto errout_free;
554
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900555 if (tb[FRA_UID_RANGE]) {
556 if (current_user_ns() != net->user_ns) {
557 err = -EPERM;
558 goto errout_free;
559 }
560
561 rule->uid_range = nla_get_kuid_range(tb);
562
563 if (!uid_range_set(&rule->uid_range) ||
564 !uid_lte(rule->uid_range.start, rule->uid_range.end))
565 goto errout_free;
566 } else {
567 rule->uid_range = fib_kuid_range_unset;
568 }
569
Mateusz Bajorski153380e2016-06-29 09:22:10 +0200570 if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
571 rule_exists(ops, frh, tb, rule)) {
572 err = -EEXIST;
573 goto errout_free;
574 }
575
Rami Rosen8b3521e2009-05-11 05:52:49 +0000576 err = ops->configure(rule, skb, frh, tb);
Thomas Graf14c0b972006-08-04 03:38:38 -0700577 if (err < 0)
578 goto errout_free;
579
Denis V. Lunev76c72d42007-09-16 15:44:27 -0700580 list_for_each_entry(r, &ops->rules_list, list) {
Thomas Graf14c0b972006-08-04 03:38:38 -0700581 if (r->pref > rule->pref)
582 break;
583 last = r;
584 }
585
Eric Dumazetebb9fed2010-10-23 09:44:25 +0000586 if (last)
587 list_add_rcu(&rule->list, &last->list);
588 else
589 list_add_rcu(&rule->list, &ops->rules_list);
590
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700591 if (ops->unresolved_rules) {
592 /*
593 * There are unresolved goto rules in the list, check if
594 * any of them are pointing to this new rule.
595 */
Denis V. Lunev76c72d42007-09-16 15:44:27 -0700596 list_for_each_entry(r, &ops->rules_list, list) {
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700597 if (r->action == FR_ACT_GOTO &&
Gao feng561dac22011-09-11 15:36:05 +0000598 r->target == rule->pref &&
599 rtnl_dereference(r->ctarget) == NULL) {
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700600 rcu_assign_pointer(r->ctarget, rule);
601 if (--ops->unresolved_rules == 0)
602 break;
603 }
604 }
605 }
606
607 if (rule->action == FR_ACT_GOTO)
608 ops->nr_goto_rules++;
609
610 if (unresolved)
611 ops->unresolved_rules++;
612
Thomas Grafe7030872015-07-21 10:44:01 +0200613 if (rule->tun_id)
614 ip_tunnel_need_metadata();
615
David Ahern6c31e5a2017-10-27 17:37:13 -0700616 call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops, extack);
Eric W. Biederman15e47302012-09-07 20:12:54 +0000617 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
Thomas Graf73417f62007-03-27 13:56:52 -0700618 flush_route_cache(ops);
Thomas Graf14c0b972006-08-04 03:38:38 -0700619 rules_ops_put(ops);
620 return 0;
621
622errout_free:
623 kfree(rule);
624errout:
625 rules_ops_put(ops);
626 return err;
627}
David Ahern96c63fa2016-06-08 10:55:39 -0700628EXPORT_SYMBOL_GPL(fib_nl_newrule);
Thomas Graf14c0b972006-08-04 03:38:38 -0700629
David Ahernc21ef3e2017-04-16 09:48:24 -0700630int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
631 struct netlink_ext_ack *extack)
Thomas Graf14c0b972006-08-04 03:38:38 -0700632{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900633 struct net *net = sock_net(skb->sk);
Thomas Graf14c0b972006-08-04 03:38:38 -0700634 struct fib_rule_hdr *frh = nlmsg_data(nlh);
635 struct fib_rules_ops *ops = NULL;
Serhey Popovychbdaf32c2017-06-16 15:44:47 +0300636 struct fib_rule *rule, *r;
Thomas Graf14c0b972006-08-04 03:38:38 -0700637 struct nlattr *tb[FRA_MAX+1];
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900638 struct fib_kuid_range range;
Thomas Graf14c0b972006-08-04 03:38:38 -0700639 int err = -EINVAL;
640
641 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
642 goto errout;
643
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800644 ops = lookup_rules_ops(net, frh->family);
Thomas Graf14c0b972006-08-04 03:38:38 -0700645 if (ops == NULL) {
Patrick McHardy2fe195c2008-07-01 19:59:37 -0700646 err = -EAFNOSUPPORT;
Thomas Graf14c0b972006-08-04 03:38:38 -0700647 goto errout;
648 }
649
David Ahernc21ef3e2017-04-16 09:48:24 -0700650 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
Thomas Graf14c0b972006-08-04 03:38:38 -0700651 if (err < 0)
652 goto errout;
653
Thomas Grafe1701c62007-03-24 12:46:02 -0700654 err = validate_rulemsg(frh, tb, ops);
655 if (err < 0)
656 goto errout;
657
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900658 if (tb[FRA_UID_RANGE]) {
659 range = nla_get_kuid_range(tb);
Wei Yongjunadeb45c2017-04-26 14:03:50 +0000660 if (!uid_range_set(&range)) {
661 err = -EINVAL;
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900662 goto errout;
Wei Yongjunadeb45c2017-04-26 14:03:50 +0000663 }
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900664 } else {
665 range = fib_kuid_range_unset;
666 }
667
Denis V. Lunev76c72d42007-09-16 15:44:27 -0700668 list_for_each_entry(rule, &ops->rules_list, list) {
Donald Sharpcac56202018-02-20 08:55:58 -0500669 if (frh->proto && (frh->proto != rule->proto))
670 continue;
671
Thomas Graf14c0b972006-08-04 03:38:38 -0700672 if (frh->action && (frh->action != rule->action))
673 continue;
674
Andreas Henriksson13eb2ab2013-11-07 18:26:38 +0100675 if (frh_get_table(frh, tb) &&
676 (frh_get_table(frh, tb) != rule->table))
Thomas Graf14c0b972006-08-04 03:38:38 -0700677 continue;
678
679 if (tb[FRA_PRIORITY] &&
680 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
681 continue;
682
Patrick McHardy491deb22009-12-03 01:25:54 +0000683 if (tb[FRA_IIFNAME] &&
684 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
Thomas Graf14c0b972006-08-04 03:38:38 -0700685 continue;
686
Patrick McHardy1b038a52009-12-03 01:25:56 +0000687 if (tb[FRA_OIFNAME] &&
688 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
689 continue;
690
Thomas Grafb8964ed2006-11-09 15:22:18 -0800691 if (tb[FRA_FWMARK] &&
692 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
693 continue;
694
695 if (tb[FRA_FWMASK] &&
696 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
697 continue;
698
Thomas Grafe7030872015-07-21 10:44:01 +0200699 if (tb[FRA_TUN_ID] &&
700 (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
701 continue;
702
David Ahern96c63fa2016-06-08 10:55:39 -0700703 if (tb[FRA_L3MDEV] &&
704 (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
705 continue;
706
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900707 if (uid_range_set(&range) &&
708 (!uid_eq(rule->uid_range.start, range.start) ||
709 !uid_eq(rule->uid_range.end, range.end)))
710 continue;
711
Thomas Graf14c0b972006-08-04 03:38:38 -0700712 if (!ops->compare(rule, frh, tb))
713 continue;
714
715 if (rule->flags & FIB_RULE_PERMANENT) {
716 err = -EPERM;
717 goto errout;
718 }
719
Alexander Duyck0ddcf432015-03-06 13:47:00 -0800720 if (ops->delete) {
721 err = ops->delete(rule);
722 if (err)
723 goto errout;
724 }
725
Thomas Grafe7030872015-07-21 10:44:01 +0200726 if (rule->tun_id)
727 ip_tunnel_unneed_metadata();
728
Thomas Graf14c0b972006-08-04 03:38:38 -0700729 list_del_rcu(&rule->list);
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700730
Yan, Zhengafaef732011-10-17 15:20:28 +0000731 if (rule->action == FR_ACT_GOTO) {
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700732 ops->nr_goto_rules--;
Yan, Zhengafaef732011-10-17 15:20:28 +0000733 if (rtnl_dereference(rule->ctarget) == NULL)
734 ops->unresolved_rules--;
735 }
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700736
737 /*
738 * Check if this rule is a target to any of them. If so,
Serhey Popovychbdaf32c2017-06-16 15:44:47 +0300739 * adjust to the next one with the same preference or
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700740 * disable them. As this operation is eventually very
Serhey Popovychbdaf32c2017-06-16 15:44:47 +0300741 * expensive, it is only performed if goto rules, except
742 * current if it is goto rule, have actually been added.
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700743 */
744 if (ops->nr_goto_rules > 0) {
Serhey Popovychbdaf32c2017-06-16 15:44:47 +0300745 struct fib_rule *n;
746
747 n = list_next_entry(rule, list);
748 if (&n->list == &ops->rules_list || n->pref != rule->pref)
749 n = NULL;
750 list_for_each_entry(r, &ops->rules_list, list) {
751 if (rtnl_dereference(r->ctarget) != rule)
752 continue;
753 rcu_assign_pointer(r->ctarget, n);
754 if (!n)
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700755 ops->unresolved_rules++;
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700756 }
757 }
758
David Ahern6c31e5a2017-10-27 17:37:13 -0700759 call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
760 NULL);
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800761 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
Eric W. Biederman15e47302012-09-07 20:12:54 +0000762 NETLINK_CB(skb).portid);
Thomas Graf14c0b972006-08-04 03:38:38 -0700763 fib_rule_put(rule);
Thomas Graf73417f62007-03-27 13:56:52 -0700764 flush_route_cache(ops);
Thomas Graf14c0b972006-08-04 03:38:38 -0700765 rules_ops_put(ops);
766 return 0;
767 }
768
769 err = -ENOENT;
770errout:
771 rules_ops_put(ops);
772 return err;
773}
David Ahern96c63fa2016-06-08 10:55:39 -0700774EXPORT_SYMBOL_GPL(fib_nl_delrule);
Thomas Graf14c0b972006-08-04 03:38:38 -0700775
Thomas Graf339bf982006-11-10 14:10:15 -0800776static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
777 struct fib_rule *rule)
778{
779 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
Patrick McHardy491deb22009-12-03 01:25:54 +0000780 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
Patrick McHardy1b038a52009-12-03 01:25:56 +0000781 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
Thomas Graf339bf982006-11-10 14:10:15 -0800782 + nla_total_size(4) /* FRA_PRIORITY */
783 + nla_total_size(4) /* FRA_TABLE */
Stefan Tomanek73f56982013-08-03 14:14:43 +0200784 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
Stefan Tomanek6ef94cf2013-08-02 17:19:56 +0200785 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
Thomas Graf339bf982006-11-10 14:10:15 -0800786 + nla_total_size(4) /* FRA_FWMARK */
Thomas Grafe7030872015-07-21 10:44:01 +0200787 + nla_total_size(4) /* FRA_FWMASK */
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900788 + nla_total_size_64bit(8) /* FRA_TUN_ID */
789 + nla_total_size(sizeof(struct fib_kuid_range));
Thomas Graf339bf982006-11-10 14:10:15 -0800790
791 if (ops->nlmsg_payload)
792 payload += ops->nlmsg_payload(rule);
793
794 return payload;
795}
796
Thomas Graf14c0b972006-08-04 03:38:38 -0700797static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
798 u32 pid, u32 seq, int type, int flags,
799 struct fib_rules_ops *ops)
800{
801 struct nlmsghdr *nlh;
802 struct fib_rule_hdr *frh;
803
804 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
805 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -0800806 return -EMSGSIZE;
Thomas Graf14c0b972006-08-04 03:38:38 -0700807
808 frh = nlmsg_data(nlh);
Patrick McHardy28bb1722010-04-13 05:03:16 +0000809 frh->family = ops->family;
Thomas Graf14c0b972006-08-04 03:38:38 -0700810 frh->table = rule->table;
David S. Miller0e3cea72012-04-01 20:47:01 -0400811 if (nla_put_u32(skb, FRA_TABLE, rule->table))
812 goto nla_put_failure;
Stefan Tomanek73f56982013-08-03 14:14:43 +0200813 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
Stefan Tomanek7764a452013-08-01 02:17:15 +0200814 goto nla_put_failure;
Thomas Graf14c0b972006-08-04 03:38:38 -0700815 frh->res1 = 0;
Thomas Graf14c0b972006-08-04 03:38:38 -0700816 frh->action = rule->action;
817 frh->flags = rule->flags;
Donald Sharpcac56202018-02-20 08:55:58 -0500818 frh->proto = rule->proto;
Thomas Graf14c0b972006-08-04 03:38:38 -0700819
Eric Dumazet7a2b03c2010-10-26 09:24:55 +0000820 if (rule->action == FR_ACT_GOTO &&
Eric Dumazet33d480c2011-08-11 19:30:52 +0000821 rcu_access_pointer(rule->ctarget) == NULL)
Thomas Graf0947c9fe2007-03-26 17:14:15 -0700822 frh->flags |= FIB_RULE_UNRESOLVED;
823
Patrick McHardy491deb22009-12-03 01:25:54 +0000824 if (rule->iifname[0]) {
David S. Miller0e3cea72012-04-01 20:47:01 -0400825 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
826 goto nla_put_failure;
Patrick McHardy491deb22009-12-03 01:25:54 +0000827 if (rule->iifindex == -1)
828 frh->flags |= FIB_RULE_IIF_DETACHED;
Thomas Graf2b443682007-03-26 17:37:59 -0700829 }
830
Patrick McHardy1b038a52009-12-03 01:25:56 +0000831 if (rule->oifname[0]) {
David S. Miller0e3cea72012-04-01 20:47:01 -0400832 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
833 goto nla_put_failure;
Patrick McHardy1b038a52009-12-03 01:25:56 +0000834 if (rule->oifindex == -1)
835 frh->flags |= FIB_RULE_OIF_DETACHED;
836 }
837
David S. Miller0e3cea72012-04-01 20:47:01 -0400838 if ((rule->pref &&
839 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
840 (rule->mark &&
841 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
842 ((rule->mark_mask || rule->mark) &&
843 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
844 (rule->target &&
Thomas Grafe7030872015-07-21 10:44:01 +0200845 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
846 (rule->tun_id &&
David Ahern96c63fa2016-06-08 10:55:39 -0700847 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
848 (rule->l3mdev &&
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +0900849 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
850 (uid_range_set(&rule->uid_range) &&
851 nla_put_uid_range(skb, &rule->uid_range)))
David S. Miller0e3cea72012-04-01 20:47:01 -0400852 goto nla_put_failure;
Stefan Tomanek6ef94cf2013-08-02 17:19:56 +0200853
854 if (rule->suppress_ifgroup != -1) {
855 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
856 goto nla_put_failure;
857 }
858
Rami Rosen04af8cf2009-05-20 17:26:23 -0700859 if (ops->fill(rule, skb, frh) < 0)
Thomas Graf14c0b972006-08-04 03:38:38 -0700860 goto nla_put_failure;
861
Johannes Berg053c0952015-01-16 22:09:00 +0100862 nlmsg_end(skb, nlh);
863 return 0;
Thomas Graf14c0b972006-08-04 03:38:38 -0700864
865nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -0800866 nlmsg_cancel(skb, nlh);
867 return -EMSGSIZE;
Thomas Graf14c0b972006-08-04 03:38:38 -0700868}
869
Thomas Grafc4546732007-03-25 23:24:24 -0700870static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
871 struct fib_rules_ops *ops)
Thomas Graf14c0b972006-08-04 03:38:38 -0700872{
873 int idx = 0;
874 struct fib_rule *rule;
Wilson Kok41fc0142015-09-22 21:40:22 -0700875 int err = 0;
Thomas Graf14c0b972006-08-04 03:38:38 -0700876
Eric Dumazete67f88d2011-04-27 22:56:07 +0000877 rcu_read_lock();
878 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
Thomas Grafc4546732007-03-25 23:24:24 -0700879 if (idx < cb->args[1])
Thomas Graf14c0b972006-08-04 03:38:38 -0700880 goto skip;
881
Wilson Kok41fc0142015-09-22 21:40:22 -0700882 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
883 cb->nlh->nlmsg_seq, RTM_NEWRULE,
884 NLM_F_MULTI, ops);
885 if (err)
Thomas Graf14c0b972006-08-04 03:38:38 -0700886 break;
887skip:
888 idx++;
889 }
Eric Dumazet2907c352011-05-25 07:34:04 +0000890 rcu_read_unlock();
Thomas Grafc4546732007-03-25 23:24:24 -0700891 cb->args[1] = idx;
Thomas Graf14c0b972006-08-04 03:38:38 -0700892 rules_ops_put(ops);
893
Wilson Kok41fc0142015-09-22 21:40:22 -0700894 return err;
Thomas Graf14c0b972006-08-04 03:38:38 -0700895}
896
Thomas Grafc4546732007-03-25 23:24:24 -0700897static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
898{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900899 struct net *net = sock_net(skb->sk);
Thomas Grafc4546732007-03-25 23:24:24 -0700900 struct fib_rules_ops *ops;
901 int idx = 0, family;
902
903 family = rtnl_msg_family(cb->nlh);
904 if (family != AF_UNSPEC) {
905 /* Protocol specific dump request */
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800906 ops = lookup_rules_ops(net, family);
Thomas Grafc4546732007-03-25 23:24:24 -0700907 if (ops == NULL)
908 return -EAFNOSUPPORT;
909
Wilson Kok41fc0142015-09-22 21:40:22 -0700910 dump_rules(skb, cb, ops);
911
912 return skb->len;
Thomas Grafc4546732007-03-25 23:24:24 -0700913 }
914
915 rcu_read_lock();
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800916 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
Thomas Grafc4546732007-03-25 23:24:24 -0700917 if (idx < cb->args[0] || !try_module_get(ops->owner))
918 goto skip;
919
920 if (dump_rules(skb, cb, ops) < 0)
921 break;
922
923 cb->args[1] = 0;
Eric Dumazet2fb35732010-03-09 20:03:38 +0000924skip:
Thomas Grafc4546732007-03-25 23:24:24 -0700925 idx++;
926 }
927 rcu_read_unlock();
928 cb->args[0] = idx;
929
930 return skb->len;
931}
Thomas Graf14c0b972006-08-04 03:38:38 -0700932
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800933static void notify_rule_change(int event, struct fib_rule *rule,
Thomas Grafc17084d2006-08-15 00:32:48 -0700934 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
935 u32 pid)
Thomas Graf14c0b972006-08-04 03:38:38 -0700936{
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800937 struct net *net;
Thomas Grafc17084d2006-08-15 00:32:48 -0700938 struct sk_buff *skb;
939 int err = -ENOBUFS;
Thomas Graf14c0b972006-08-04 03:38:38 -0700940
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800941 net = ops->fro_net;
Thomas Graf339bf982006-11-10 14:10:15 -0800942 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
Thomas Graf14c0b972006-08-04 03:38:38 -0700943 if (skb == NULL)
Thomas Grafc17084d2006-08-15 00:32:48 -0700944 goto errout;
945
946 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
Patrick McHardy26932562007-01-31 23:16:40 -0800947 if (err < 0) {
948 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
949 WARN_ON(err == -EMSGSIZE);
950 kfree_skb(skb);
951 goto errout;
952 }
Denis V. Lunev9e3a5482008-01-20 16:46:41 -0800953
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -0800954 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
955 return;
Thomas Grafc17084d2006-08-15 00:32:48 -0700956errout:
957 if (err < 0)
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800958 rtnl_set_sk_err(net, ops->nlgroup, err);
Thomas Graf14c0b972006-08-04 03:38:38 -0700959}
960
961static void attach_rules(struct list_head *rules, struct net_device *dev)
962{
963 struct fib_rule *rule;
964
965 list_for_each_entry(rule, rules, list) {
Patrick McHardy491deb22009-12-03 01:25:54 +0000966 if (rule->iifindex == -1 &&
967 strcmp(dev->name, rule->iifname) == 0)
968 rule->iifindex = dev->ifindex;
Patrick McHardy1b038a52009-12-03 01:25:56 +0000969 if (rule->oifindex == -1 &&
970 strcmp(dev->name, rule->oifname) == 0)
971 rule->oifindex = dev->ifindex;
Thomas Graf14c0b972006-08-04 03:38:38 -0700972 }
973}
974
975static void detach_rules(struct list_head *rules, struct net_device *dev)
976{
977 struct fib_rule *rule;
978
Patrick McHardy1b038a52009-12-03 01:25:56 +0000979 list_for_each_entry(rule, rules, list) {
Patrick McHardy491deb22009-12-03 01:25:54 +0000980 if (rule->iifindex == dev->ifindex)
981 rule->iifindex = -1;
Patrick McHardy1b038a52009-12-03 01:25:56 +0000982 if (rule->oifindex == dev->ifindex)
983 rule->oifindex = -1;
984 }
Thomas Graf14c0b972006-08-04 03:38:38 -0700985}
986
987
988static int fib_rules_event(struct notifier_block *this, unsigned long event,
Jiri Pirko351638e2013-05-28 01:30:21 +0000989 void *ptr)
Thomas Graf14c0b972006-08-04 03:38:38 -0700990{
Jiri Pirko351638e2013-05-28 01:30:21 +0000991 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900992 struct net *net = dev_net(dev);
Thomas Graf14c0b972006-08-04 03:38:38 -0700993 struct fib_rules_ops *ops;
994
Eric Dumazet748e2d92012-08-22 21:50:59 +0000995 ASSERT_RTNL();
Thomas Graf14c0b972006-08-04 03:38:38 -0700996
997 switch (event) {
998 case NETDEV_REGISTER:
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -0800999 list_for_each_entry(ops, &net->rules_ops, list)
Denis V. Lunev76c72d42007-09-16 15:44:27 -07001000 attach_rules(&ops->rules_list, dev);
Thomas Graf14c0b972006-08-04 03:38:38 -07001001 break;
1002
Maciej Żenczykowski946c0322014-02-07 16:23:48 -08001003 case NETDEV_CHANGENAME:
1004 list_for_each_entry(ops, &net->rules_ops, list) {
1005 detach_rules(&ops->rules_list, dev);
1006 attach_rules(&ops->rules_list, dev);
1007 }
1008 break;
1009
Thomas Graf14c0b972006-08-04 03:38:38 -07001010 case NETDEV_UNREGISTER:
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001011 list_for_each_entry(ops, &net->rules_ops, list)
Denis V. Lunev76c72d42007-09-16 15:44:27 -07001012 detach_rules(&ops->rules_list, dev);
Thomas Graf14c0b972006-08-04 03:38:38 -07001013 break;
1014 }
1015
Thomas Graf14c0b972006-08-04 03:38:38 -07001016 return NOTIFY_DONE;
1017}
1018
1019static struct notifier_block fib_rules_notifier = {
1020 .notifier_call = fib_rules_event,
1021};
1022
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001023static int __net_init fib_rules_net_init(struct net *net)
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001024{
1025 INIT_LIST_HEAD(&net->rules_ops);
1026 spin_lock_init(&net->rules_mod_lock);
1027 return 0;
1028}
1029
Vasily Averince2b7db2017-11-12 22:30:01 +03001030static void __net_exit fib_rules_net_exit(struct net *net)
1031{
1032 WARN_ON_ONCE(!list_empty(&net->rules_ops));
1033}
1034
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001035static struct pernet_operations fib_rules_net_ops = {
1036 .init = fib_rules_net_init,
Vasily Averince2b7db2017-11-12 22:30:01 +03001037 .exit = fib_rules_net_exit,
Kirill Tkhai86b63412018-02-13 12:29:03 +03001038 .async = true,
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001039};
1040
Thomas Graf14c0b972006-08-04 03:38:38 -07001041static int __init fib_rules_init(void)
1042{
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001043 int err;
Florian Westphalb97bac62017-08-09 20:41:48 +02001044 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
1045 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
1046 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0);
Thomas Graf9d9e6a52007-03-25 23:20:05 -07001047
Eric W. Biederman5d6d4802008-11-07 22:52:34 -08001048 err = register_pernet_subsys(&fib_rules_net_ops);
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001049 if (err < 0)
1050 goto fail;
1051
Eric W. Biederman5d6d4802008-11-07 22:52:34 -08001052 err = register_netdevice_notifier(&fib_rules_notifier);
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001053 if (err < 0)
1054 goto fail_unregister;
Eric W. Biederman5d6d4802008-11-07 22:52:34 -08001055
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001056 return 0;
1057
1058fail_unregister:
Eric W. Biederman5d6d4802008-11-07 22:52:34 -08001059 unregister_pernet_subsys(&fib_rules_net_ops);
Denis V. Lunev5fd30ee2008-01-10 03:20:28 -08001060fail:
1061 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
1062 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
1063 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
1064 return err;
Thomas Graf14c0b972006-08-04 03:38:38 -07001065}
1066
1067subsys_initcall(fib_rules_init);