Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 1 | #include <linux/rtnetlink.h> |
| 2 | #include <linux/notifier.h> |
| 3 | #include <linux/rcupdate.h> |
| 4 | #include <linux/kernel.h> |
Ido Schimmel | 864150d | 2017-09-01 12:15:17 +0300 | [diff] [blame] | 5 | #include <linux/module.h> |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 6 | #include <linux/init.h> |
| 7 | #include <net/net_namespace.h> |
| 8 | #include <net/fib_notifier.h> |
| 9 | |
| 10 | static ATOMIC_NOTIFIER_HEAD(fib_chain); |
| 11 | |
| 12 | int call_fib_notifier(struct notifier_block *nb, struct net *net, |
| 13 | enum fib_event_type event_type, |
| 14 | struct fib_notifier_info *info) |
| 15 | { |
David Ahern | c30d935 | 2018-03-27 18:21:55 -0700 | [diff] [blame] | 16 | int err; |
| 17 | |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 18 | info->net = net; |
David Ahern | c30d935 | 2018-03-27 18:21:55 -0700 | [diff] [blame] | 19 | err = nb->notifier_call(nb, event_type, info); |
| 20 | return notifier_to_errno(err); |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 21 | } |
| 22 | EXPORT_SYMBOL(call_fib_notifier); |
| 23 | |
| 24 | int call_fib_notifiers(struct net *net, enum fib_event_type event_type, |
| 25 | struct fib_notifier_info *info) |
| 26 | { |
David Ahern | c30d935 | 2018-03-27 18:21:55 -0700 | [diff] [blame] | 27 | int err; |
| 28 | |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 29 | info->net = net; |
David Ahern | c30d935 | 2018-03-27 18:21:55 -0700 | [diff] [blame] | 30 | err = atomic_notifier_call_chain(&fib_chain, event_type, info); |
| 31 | return notifier_to_errno(err); |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 32 | } |
| 33 | EXPORT_SYMBOL(call_fib_notifiers); |
| 34 | |
| 35 | static unsigned int fib_seq_sum(void) |
| 36 | { |
| 37 | struct fib_notifier_ops *ops; |
| 38 | unsigned int fib_seq = 0; |
| 39 | struct net *net; |
| 40 | |
| 41 | rtnl_lock(); |
Kirill Tkhai | f0b07bb1 | 2018-03-29 19:20:32 +0300 | [diff] [blame] | 42 | down_read(&net_rwsem); |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 43 | for_each_net(net) { |
Kirill Tkhai | 11bf284 | 2017-11-14 16:51:56 +0300 | [diff] [blame] | 44 | rcu_read_lock(); |
| 45 | list_for_each_entry_rcu(ops, &net->fib_notifier_ops, list) { |
Ido Schimmel | 864150d | 2017-09-01 12:15:17 +0300 | [diff] [blame] | 46 | if (!try_module_get(ops->owner)) |
| 47 | continue; |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 48 | fib_seq += ops->fib_seq_read(net); |
Ido Schimmel | 864150d | 2017-09-01 12:15:17 +0300 | [diff] [blame] | 49 | module_put(ops->owner); |
| 50 | } |
Kirill Tkhai | 11bf284 | 2017-11-14 16:51:56 +0300 | [diff] [blame] | 51 | rcu_read_unlock(); |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 52 | } |
Kirill Tkhai | f0b07bb1 | 2018-03-29 19:20:32 +0300 | [diff] [blame] | 53 | up_read(&net_rwsem); |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 54 | rtnl_unlock(); |
| 55 | |
| 56 | return fib_seq; |
| 57 | } |
| 58 | |
| 59 | static int fib_net_dump(struct net *net, struct notifier_block *nb) |
| 60 | { |
| 61 | struct fib_notifier_ops *ops; |
| 62 | |
| 63 | list_for_each_entry_rcu(ops, &net->fib_notifier_ops, list) { |
Ido Schimmel | 864150d | 2017-09-01 12:15:17 +0300 | [diff] [blame] | 64 | int err; |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 65 | |
Ido Schimmel | 864150d | 2017-09-01 12:15:17 +0300 | [diff] [blame] | 66 | if (!try_module_get(ops->owner)) |
| 67 | continue; |
| 68 | err = ops->fib_dump(net, nb); |
| 69 | module_put(ops->owner); |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 70 | if (err) |
| 71 | return err; |
| 72 | } |
| 73 | |
| 74 | return 0; |
| 75 | } |
| 76 | |
| 77 | static bool fib_dump_is_consistent(struct notifier_block *nb, |
| 78 | void (*cb)(struct notifier_block *nb), |
| 79 | unsigned int fib_seq) |
| 80 | { |
| 81 | atomic_notifier_chain_register(&fib_chain, nb); |
| 82 | if (fib_seq == fib_seq_sum()) |
| 83 | return true; |
| 84 | atomic_notifier_chain_unregister(&fib_chain, nb); |
| 85 | if (cb) |
| 86 | cb(nb); |
| 87 | return false; |
| 88 | } |
| 89 | |
| 90 | #define FIB_DUMP_MAX_RETRIES 5 |
| 91 | int register_fib_notifier(struct notifier_block *nb, |
| 92 | void (*cb)(struct notifier_block *nb)) |
| 93 | { |
| 94 | int retries = 0; |
| 95 | int err; |
| 96 | |
| 97 | do { |
| 98 | unsigned int fib_seq = fib_seq_sum(); |
| 99 | struct net *net; |
| 100 | |
| 101 | rcu_read_lock(); |
| 102 | for_each_net_rcu(net) { |
| 103 | err = fib_net_dump(net, nb); |
| 104 | if (err) |
| 105 | goto err_fib_net_dump; |
| 106 | } |
| 107 | rcu_read_unlock(); |
| 108 | |
| 109 | if (fib_dump_is_consistent(nb, cb, fib_seq)) |
| 110 | return 0; |
| 111 | } while (++retries < FIB_DUMP_MAX_RETRIES); |
| 112 | |
| 113 | return -EBUSY; |
| 114 | |
| 115 | err_fib_net_dump: |
| 116 | rcu_read_unlock(); |
| 117 | return err; |
| 118 | } |
| 119 | EXPORT_SYMBOL(register_fib_notifier); |
| 120 | |
| 121 | int unregister_fib_notifier(struct notifier_block *nb) |
| 122 | { |
| 123 | return atomic_notifier_chain_unregister(&fib_chain, nb); |
| 124 | } |
| 125 | EXPORT_SYMBOL(unregister_fib_notifier); |
| 126 | |
| 127 | static int __fib_notifier_ops_register(struct fib_notifier_ops *ops, |
| 128 | struct net *net) |
| 129 | { |
| 130 | struct fib_notifier_ops *o; |
| 131 | |
| 132 | list_for_each_entry(o, &net->fib_notifier_ops, list) |
| 133 | if (ops->family == o->family) |
| 134 | return -EEXIST; |
| 135 | list_add_tail_rcu(&ops->list, &net->fib_notifier_ops); |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | struct fib_notifier_ops * |
| 140 | fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net) |
| 141 | { |
| 142 | struct fib_notifier_ops *ops; |
| 143 | int err; |
| 144 | |
| 145 | ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); |
| 146 | if (!ops) |
| 147 | return ERR_PTR(-ENOMEM); |
| 148 | |
| 149 | err = __fib_notifier_ops_register(ops, net); |
| 150 | if (err) |
| 151 | goto err_register; |
| 152 | |
| 153 | return ops; |
| 154 | |
| 155 | err_register: |
| 156 | kfree(ops); |
| 157 | return ERR_PTR(err); |
| 158 | } |
| 159 | EXPORT_SYMBOL(fib_notifier_ops_register); |
| 160 | |
| 161 | void fib_notifier_ops_unregister(struct fib_notifier_ops *ops) |
| 162 | { |
| 163 | list_del_rcu(&ops->list); |
| 164 | kfree_rcu(ops, rcu); |
| 165 | } |
| 166 | EXPORT_SYMBOL(fib_notifier_ops_unregister); |
| 167 | |
| 168 | static int __net_init fib_notifier_net_init(struct net *net) |
| 169 | { |
| 170 | INIT_LIST_HEAD(&net->fib_notifier_ops); |
| 171 | return 0; |
| 172 | } |
| 173 | |
Vasily Averin | 0b6f595 | 2017-11-12 22:29:33 +0300 | [diff] [blame] | 174 | static void __net_exit fib_notifier_net_exit(struct net *net) |
| 175 | { |
| 176 | WARN_ON_ONCE(!list_empty(&net->fib_notifier_ops)); |
| 177 | } |
| 178 | |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 179 | static struct pernet_operations fib_notifier_net_ops = { |
| 180 | .init = fib_notifier_net_init, |
Vasily Averin | 0b6f595 | 2017-11-12 22:29:33 +0300 | [diff] [blame] | 181 | .exit = fib_notifier_net_exit, |
Ido Schimmel | 04b1d4e | 2017-08-03 13:28:11 +0200 | [diff] [blame] | 182 | }; |
| 183 | |
| 184 | static int __init fib_notifier_init(void) |
| 185 | { |
| 186 | return register_pernet_subsys(&fib_notifier_net_ops); |
| 187 | } |
| 188 | |
| 189 | subsys_initcall(fib_notifier_init); |