blob: de9e2978476f18a2377699b7e7aba8484368928f [file] [log] [blame]
Robert Olsson19baf832005-06-21 12:43:18 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090010 * Jens Laas <jens.laas@data.slu.se> Swedish University of
Robert Olsson19baf832005-06-21 12:43:18 -070011 * Agricultural Sciences.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090012 *
Robert Olsson19baf832005-06-21 12:43:18 -070013 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030015 * This work is based on the LPC-trie which is originally described in:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 *
Robert Olsson19baf832005-06-21 12:43:18 -070017 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
Justin P. Mattock631dd1a2010-10-18 11:03:14 +020019 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
Robert Olsson19baf832005-06-21 12:43:18 -070020 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
Robert Olsson19baf832005-06-21 12:43:18 -070025 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
Robert Olssonfd966252005-12-22 11:25:10 -080042 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
Robert Olsson19baf832005-06-21 12:43:18 -070049 */
50
Jens Låås80b71b82009-08-28 23:57:15 -070051#define VERSION "0.409"
Robert Olsson19baf832005-06-21 12:43:18 -070052
Robert Olsson19baf832005-06-21 12:43:18 -070053#include <asm/uaccess.h>
54#include <asm/system.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070055#include <linux/bitops.h>
Robert Olsson19baf832005-06-21 12:43:18 -070056#include <linux/types.h>
57#include <linux/kernel.h>
Robert Olsson19baf832005-06-21 12:43:18 -070058#include <linux/mm.h>
59#include <linux/string.h>
60#include <linux/socket.h>
61#include <linux/sockios.h>
62#include <linux/errno.h>
63#include <linux/in.h>
64#include <linux/inet.h>
Stephen Hemmingercd8787a2006-01-03 14:38:34 -080065#include <linux/inetdevice.h>
Robert Olsson19baf832005-06-21 12:43:18 -070066#include <linux/netdevice.h>
67#include <linux/if_arp.h>
68#include <linux/proc_fs.h>
Robert Olsson2373ce12005-08-25 13:01:29 -070069#include <linux/rcupdate.h>
Robert Olsson19baf832005-06-21 12:43:18 -070070#include <linux/skbuff.h>
71#include <linux/netlink.h>
72#include <linux/init.h>
73#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090074#include <linux/slab.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040075#include <linux/prefetch.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020076#include <net/net_namespace.h>
Robert Olsson19baf832005-06-21 12:43:18 -070077#include <net/ip.h>
78#include <net/protocol.h>
79#include <net/route.h>
80#include <net/tcp.h>
81#include <net/sock.h>
82#include <net/ip_fib.h>
83#include "fib_lookup.h"
84
Robert Olsson06ef9212006-03-20 21:35:01 -080085#define MAX_STAT_DEPTH 32
Robert Olsson19baf832005-06-21 12:43:18 -070086
Robert Olsson19baf832005-06-21 12:43:18 -070087#define KEYLENGTH (8*sizeof(t_key))
Robert Olsson19baf832005-06-21 12:43:18 -070088
Robert Olsson19baf832005-06-21 12:43:18 -070089typedef unsigned int t_key;
90
91#define T_TNODE 0
92#define T_LEAF 1
93#define NODE_TYPE_MASK 0x1UL
Robert Olsson2373ce12005-08-25 13:01:29 -070094#define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
95
Olof Johansson91b9a272005-08-09 20:24:39 -070096#define IS_TNODE(n) (!(n->parent & T_LEAF))
97#define IS_LEAF(n) (n->parent & T_LEAF)
Robert Olsson19baf832005-06-21 12:43:18 -070098
David S. Millerb299e4f2011-02-02 20:48:10 -080099struct rt_trie_node {
Olof Johansson91b9a272005-08-09 20:24:39 -0700100 unsigned long parent;
Eric Dumazet8d965442008-01-13 22:31:44 -0800101 t_key key;
Robert Olsson19baf832005-06-21 12:43:18 -0700102};
103
104struct leaf {
Olof Johansson91b9a272005-08-09 20:24:39 -0700105 unsigned long parent;
Eric Dumazet8d965442008-01-13 22:31:44 -0800106 t_key key;
Robert Olsson19baf832005-06-21 12:43:18 -0700107 struct hlist_head list;
Robert Olsson2373ce12005-08-25 13:01:29 -0700108 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700109};
110
111struct leaf_info {
112 struct hlist_node hlist;
113 int plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000114 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
Robert Olsson19baf832005-06-21 12:43:18 -0700115 struct list_head falh;
Eric Dumazet5c745012011-07-18 03:16:33 +0000116 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700117};
118
119struct tnode {
Olof Johansson91b9a272005-08-09 20:24:39 -0700120 unsigned long parent;
Eric Dumazet8d965442008-01-13 22:31:44 -0800121 t_key key;
Eric Dumazet112d8cf2008-01-12 21:27:41 -0800122 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
123 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
Eric Dumazet8d965442008-01-13 22:31:44 -0800124 unsigned int full_children; /* KEYLENGTH bits needed */
125 unsigned int empty_children; /* KEYLENGTH bits needed */
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700126 union {
127 struct rcu_head rcu;
128 struct work_struct work;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700129 struct tnode *tnode_free;
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700130 };
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700131 struct rt_trie_node __rcu *child[0];
Robert Olsson19baf832005-06-21 12:43:18 -0700132};
133
134#ifdef CONFIG_IP_FIB_TRIE_STATS
135struct trie_use_stats {
136 unsigned int gets;
137 unsigned int backtrack;
138 unsigned int semantic_match_passed;
139 unsigned int semantic_match_miss;
140 unsigned int null_node_hit;
Robert Olsson2f368952005-07-05 15:02:40 -0700141 unsigned int resize_node_skipped;
Robert Olsson19baf832005-06-21 12:43:18 -0700142};
143#endif
144
145struct trie_stat {
146 unsigned int totdepth;
147 unsigned int maxdepth;
148 unsigned int tnodes;
149 unsigned int leaves;
150 unsigned int nullpointers;
Stephen Hemminger93672292008-01-22 21:54:05 -0800151 unsigned int prefixes;
Robert Olsson06ef9212006-03-20 21:35:01 -0800152 unsigned int nodesizes[MAX_STAT_DEPTH];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700153};
Robert Olsson19baf832005-06-21 12:43:18 -0700154
155struct trie {
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700156 struct rt_trie_node __rcu *trie;
Robert Olsson19baf832005-06-21 12:43:18 -0700157#ifdef CONFIG_IP_FIB_TRIE_STATS
158 struct trie_use_stats stats;
159#endif
Robert Olsson19baf832005-06-21 12:43:18 -0700160};
161
David S. Millerb299e4f2011-02-02 20:48:10 -0800162static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
163static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800164 int wasfull);
David S. Millerb299e4f2011-02-02 20:48:10 -0800165static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700166static struct tnode *inflate(struct trie *t, struct tnode *tn);
167static struct tnode *halve(struct trie *t, struct tnode *tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700168/* tnodes to free after resize(); protected by RTNL */
169static struct tnode *tnode_free_head;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000170static size_t tnode_free_size;
171
172/*
173 * synchronize_rcu after call_rcu for that many pages; it should be especially
174 * useful before resizing the root node with PREEMPT_NONE configs; the value was
175 * obtained experimentally, aiming to avoid visible slowdown.
176 */
177static const int sync_pages = 128;
Robert Olsson19baf832005-06-21 12:43:18 -0700178
Christoph Lametere18b8902006-12-06 20:33:20 -0800179static struct kmem_cache *fn_alias_kmem __read_mostly;
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800180static struct kmem_cache *trie_leaf_kmem __read_mostly;
Robert Olsson19baf832005-06-21 12:43:18 -0700181
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700182/*
183 * caller must hold RTNL
184 */
185static inline struct tnode *node_parent(const struct rt_trie_node *node)
Stephen Hemminger06801912007-08-10 15:22:13 -0700186{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700187 unsigned long parent;
188
189 parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
190
191 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800192}
Stephen Hemminger06801912007-08-10 15:22:13 -0700193
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700194/*
195 * caller must hold RCU read lock or RTNL
196 */
197static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800198{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700199 unsigned long parent;
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800200
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700201 parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
202 lockdep_rtnl_is_held());
203
204 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
Stephen Hemminger06801912007-08-10 15:22:13 -0700205}
206
Stephen Hemminger6440cc92008-03-22 17:59:58 -0700207/* Same as rcu_assign_pointer
208 * but that macro() assumes that value is a pointer.
209 */
David S. Millerb299e4f2011-02-02 20:48:10 -0800210static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
Stephen Hemminger06801912007-08-10 15:22:13 -0700211{
Stephen Hemminger6440cc92008-03-22 17:59:58 -0700212 smp_wmb();
213 node->parent = (unsigned long)ptr | NODE_TYPE(node);
Stephen Hemminger06801912007-08-10 15:22:13 -0700214}
Robert Olsson2373ce12005-08-25 13:01:29 -0700215
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700216/*
217 * caller must hold RTNL
218 */
219static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
Robert Olsson19baf832005-06-21 12:43:18 -0700220{
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800221 BUG_ON(i >= 1U << tn->bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700222
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700223 return rtnl_dereference(tn->child[i]);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800224}
225
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700226/*
227 * caller must hold RCU read lock or RTNL
228 */
229static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800230{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700231 BUG_ON(i >= 1U << tn->bits);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800232
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700233 return rcu_dereference_rtnl(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700234}
235
Stephen Hemmignerbb435b82005-08-09 20:25:39 -0700236static inline int tnode_child_length(const struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700237{
Olof Johansson91b9a272005-08-09 20:24:39 -0700238 return 1 << tn->bits;
Robert Olsson19baf832005-06-21 12:43:18 -0700239}
240
David S. Miller3b004562011-02-16 14:56:22 -0800241static inline t_key mask_pfx(t_key k, unsigned int l)
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -0700242{
243 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
244}
245
David S. Miller3b004562011-02-16 14:56:22 -0800246static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700247{
Olof Johansson91b9a272005-08-09 20:24:39 -0700248 if (offset < KEYLENGTH)
Robert Olsson19baf832005-06-21 12:43:18 -0700249 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
Olof Johansson91b9a272005-08-09 20:24:39 -0700250 else
Robert Olsson19baf832005-06-21 12:43:18 -0700251 return 0;
252}
253
254static inline int tkey_equals(t_key a, t_key b)
255{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700256 return a == b;
Robert Olsson19baf832005-06-21 12:43:18 -0700257}
258
259static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
260{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700261 if (bits == 0 || offset >= KEYLENGTH)
262 return 1;
Olof Johansson91b9a272005-08-09 20:24:39 -0700263 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
264 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700265}
Robert Olsson19baf832005-06-21 12:43:18 -0700266
267static inline int tkey_mismatch(t_key a, int offset, t_key b)
268{
269 t_key diff = a ^ b;
270 int i = offset;
271
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700272 if (!diff)
273 return 0;
274 while ((diff << i) >> (KEYLENGTH-1) == 0)
Robert Olsson19baf832005-06-21 12:43:18 -0700275 i++;
276 return i;
277}
278
Robert Olsson19baf832005-06-21 12:43:18 -0700279/*
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900280 To understand this stuff, an understanding of keys and all their bits is
281 necessary. Every node in the trie has a key associated with it, but not
Robert Olsson19baf832005-06-21 12:43:18 -0700282 all of the bits in that key are significant.
283
284 Consider a node 'n' and its parent 'tp'.
285
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900286 If n is a leaf, every bit in its key is significant. Its presence is
287 necessitated by path compression, since during a tree traversal (when
288 searching for a leaf - unless we are doing an insertion) we will completely
289 ignore all skipped bits we encounter. Thus we need to verify, at the end of
290 a potentially successful search, that we have indeed been walking the
Robert Olsson19baf832005-06-21 12:43:18 -0700291 correct key path.
292
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900293 Note that we can never "miss" the correct key in the tree if present by
294 following the wrong path. Path compression ensures that segments of the key
295 that are the same for all keys with a given prefix are skipped, but the
296 skipped part *is* identical for each node in the subtrie below the skipped
297 bit! trie_insert() in this implementation takes care of that - note the
Robert Olsson19baf832005-06-21 12:43:18 -0700298 call to tkey_sub_equals() in trie_insert().
299
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900300 if n is an internal node - a 'tnode' here, the various parts of its key
Robert Olsson19baf832005-06-21 12:43:18 -0700301 have many different meanings.
302
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900303 Example:
Robert Olsson19baf832005-06-21 12:43:18 -0700304 _________________________________________________________________
305 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
306 -----------------------------------------------------------------
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900307 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
Robert Olsson19baf832005-06-21 12:43:18 -0700308
309 _________________________________________________________________
310 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
311 -----------------------------------------------------------------
312 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
313
314 tp->pos = 7
315 tp->bits = 3
316 n->pos = 15
Olof Johansson91b9a272005-08-09 20:24:39 -0700317 n->bits = 4
Robert Olsson19baf832005-06-21 12:43:18 -0700318
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900319 First, let's just ignore the bits that come before the parent tp, that is
320 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
Robert Olsson19baf832005-06-21 12:43:18 -0700321 not use them for anything.
322
323 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900324 index into the parent's child array. That is, they will be used to find
Robert Olsson19baf832005-06-21 12:43:18 -0700325 'n' among tp's children.
326
327 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
328 for the node n.
329
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900330 All the bits we have seen so far are significant to the node n. The rest
Robert Olsson19baf832005-06-21 12:43:18 -0700331 of the bits are really not needed or indeed known in n->key.
332
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900333 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
Robert Olsson19baf832005-06-21 12:43:18 -0700334 n's child array, and will of course be different for each child.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900335
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700336
Robert Olsson19baf832005-06-21 12:43:18 -0700337 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
338 at this point.
339
340*/
341
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700342static inline void check_tnode(const struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700343{
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700344 WARN_ON(tn && tn->pos+tn->bits > 32);
Robert Olsson19baf832005-06-21 12:43:18 -0700345}
346
Denis V. Lunevf5026fa2007-12-13 09:47:57 -0800347static const int halve_threshold = 25;
348static const int inflate_threshold = 50;
Jarek Poplawski345aa032009-07-07 19:39:16 -0700349static const int halve_threshold_root = 15;
Jens Låås80b71b82009-08-28 23:57:15 -0700350static const int inflate_threshold_root = 30;
Robert Olsson2373ce12005-08-25 13:01:29 -0700351
352static void __alias_free_mem(struct rcu_head *head)
353{
354 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
355 kmem_cache_free(fn_alias_kmem, fa);
356}
357
358static inline void alias_free_mem_rcu(struct fib_alias *fa)
359{
360 call_rcu(&fa->rcu, __alias_free_mem);
361}
362
363static void __leaf_free_rcu(struct rcu_head *head)
364{
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800365 struct leaf *l = container_of(head, struct leaf, rcu);
366 kmem_cache_free(trie_leaf_kmem, l);
Robert Olsson2373ce12005-08-25 13:01:29 -0700367}
368
Stephen Hemminger387a5482008-04-10 03:47:34 -0700369static inline void free_leaf(struct leaf *l)
370{
371 call_rcu_bh(&l->rcu, __leaf_free_rcu);
372}
373
Robert Olsson2373ce12005-08-25 13:01:29 -0700374static inline void free_leaf_info(struct leaf_info *leaf)
375{
Lai Jiangshanbceb0f42011-03-18 11:42:34 +0800376 kfree_rcu(leaf, rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700377}
378
Eric Dumazet8d965442008-01-13 22:31:44 -0800379static struct tnode *tnode_alloc(size_t size)
Robert Olsson2373ce12005-08-25 13:01:29 -0700380{
Robert Olsson2373ce12005-08-25 13:01:29 -0700381 if (size <= PAGE_SIZE)
Eric Dumazet8d965442008-01-13 22:31:44 -0800382 return kzalloc(size, GFP_KERNEL);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700383 else
Eric Dumazet7a1c8e52010-11-20 07:46:35 +0000384 return vzalloc(size);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700385}
Robert Olsson2373ce12005-08-25 13:01:29 -0700386
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700387static void __tnode_vfree(struct work_struct *arg)
388{
389 struct tnode *tn = container_of(arg, struct tnode, work);
390 vfree(tn);
Robert Olsson2373ce12005-08-25 13:01:29 -0700391}
392
393static void __tnode_free_rcu(struct rcu_head *head)
394{
395 struct tnode *tn = container_of(head, struct tnode, rcu);
Eric Dumazet8d965442008-01-13 22:31:44 -0800396 size_t size = sizeof(struct tnode) +
David S. Millerb299e4f2011-02-02 20:48:10 -0800397 (sizeof(struct rt_trie_node *) << tn->bits);
Robert Olsson2373ce12005-08-25 13:01:29 -0700398
399 if (size <= PAGE_SIZE)
400 kfree(tn);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700401 else {
402 INIT_WORK(&tn->work, __tnode_vfree);
403 schedule_work(&tn->work);
404 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700405}
406
407static inline void tnode_free(struct tnode *tn)
408{
Stephen Hemminger387a5482008-04-10 03:47:34 -0700409 if (IS_LEAF(tn))
410 free_leaf((struct leaf *) tn);
411 else
Robert Olsson550e29b2006-04-04 12:53:35 -0700412 call_rcu(&tn->rcu, __tnode_free_rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700413}
414
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700415static void tnode_free_safe(struct tnode *tn)
416{
417 BUG_ON(IS_LEAF(tn));
Jarek Poplawski7b855762009-06-18 00:28:51 -0700418 tn->tnode_free = tnode_free_head;
419 tnode_free_head = tn;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000420 tnode_free_size += sizeof(struct tnode) +
David S. Millerb299e4f2011-02-02 20:48:10 -0800421 (sizeof(struct rt_trie_node *) << tn->bits);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700422}
423
424static void tnode_free_flush(void)
425{
426 struct tnode *tn;
427
428 while ((tn = tnode_free_head)) {
429 tnode_free_head = tn->tnode_free;
430 tn->tnode_free = NULL;
431 tnode_free(tn);
432 }
Jarek Poplawskic3059472009-07-14 08:33:08 +0000433
434 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
435 tnode_free_size = 0;
436 synchronize_rcu();
437 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700438}
439
Robert Olsson19baf832005-06-21 12:43:18 -0700440static struct leaf *leaf_new(void)
441{
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800442 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700443 if (l) {
Robert Olsson2373ce12005-08-25 13:01:29 -0700444 l->parent = T_LEAF;
Robert Olsson19baf832005-06-21 12:43:18 -0700445 INIT_HLIST_HEAD(&l->list);
446 }
447 return l;
448}
449
450static struct leaf_info *leaf_info_new(int plen)
451{
452 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -0700453 if (li) {
454 li->plen = plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000455 li->mask_plen = ntohl(inet_make_mask(plen));
Robert Olsson2373ce12005-08-25 13:01:29 -0700456 INIT_LIST_HEAD(&li->falh);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700457 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700458 return li;
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700459}
460
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800461static struct tnode *tnode_new(t_key key, int pos, int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700462{
David S. Millerb299e4f2011-02-02 20:48:10 -0800463 size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700464 struct tnode *tn = tnode_alloc(sz);
Robert Olsson19baf832005-06-21 12:43:18 -0700465
Olof Johansson91b9a272005-08-09 20:24:39 -0700466 if (tn) {
Robert Olsson2373ce12005-08-25 13:01:29 -0700467 tn->parent = T_TNODE;
Robert Olsson19baf832005-06-21 12:43:18 -0700468 tn->pos = pos;
469 tn->bits = bits;
470 tn->key = key;
471 tn->full_children = 0;
472 tn->empty_children = 1<<bits;
473 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700474
Eric Dumazeta034ee32010-09-09 23:32:28 +0000475 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
David S. Millerb299e4f2011-02-02 20:48:10 -0800476 sizeof(struct rt_trie_node) << bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700477 return tn;
478}
479
Robert Olsson19baf832005-06-21 12:43:18 -0700480/*
481 * Check whether a tnode 'n' is "full", i.e. it is an internal node
482 * and no bits are skipped. See discussion in dyntree paper p. 6
483 */
484
David S. Millerb299e4f2011-02-02 20:48:10 -0800485static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700486{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700487 if (n == NULL || IS_LEAF(n))
Robert Olsson19baf832005-06-21 12:43:18 -0700488 return 0;
489
490 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
491}
492
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800493static inline void put_child(struct trie *t, struct tnode *tn, int i,
David S. Millerb299e4f2011-02-02 20:48:10 -0800494 struct rt_trie_node *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700495{
496 tnode_put_child_reorg(tn, i, n, -1);
497}
498
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700499 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700500 * Add a child at position i overwriting the old value.
501 * Update the value of full_children and empty_children.
502 */
503
David S. Millerb299e4f2011-02-02 20:48:10 -0800504static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800505 int wasfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700506{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700507 struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700508 int isfull;
509
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700510 BUG_ON(i >= 1<<tn->bits);
511
Robert Olsson19baf832005-06-21 12:43:18 -0700512 /* update emptyChildren */
513 if (n == NULL && chi != NULL)
514 tn->empty_children++;
515 else if (n != NULL && chi == NULL)
516 tn->empty_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700517
Robert Olsson19baf832005-06-21 12:43:18 -0700518 /* update fullChildren */
Olof Johansson91b9a272005-08-09 20:24:39 -0700519 if (wasfull == -1)
Robert Olsson19baf832005-06-21 12:43:18 -0700520 wasfull = tnode_full(tn, chi);
521
522 isfull = tnode_full(tn, n);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700523 if (wasfull && !isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700524 tn->full_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700525 else if (!wasfull && isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700526 tn->full_children++;
Olof Johansson91b9a272005-08-09 20:24:39 -0700527
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700528 if (n)
Stephen Hemminger06801912007-08-10 15:22:13 -0700529 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700530
Robert Olsson2373ce12005-08-25 13:01:29 -0700531 rcu_assign_pointer(tn->child[i], n);
Robert Olsson19baf832005-06-21 12:43:18 -0700532}
533
Jens Låås80b71b82009-08-28 23:57:15 -0700534#define MAX_WORK 10
David S. Millerb299e4f2011-02-02 20:48:10 -0800535static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700536{
537 int i;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700538 struct tnode *old_tn;
Robert Olssone6308be2005-10-04 13:01:58 -0700539 int inflate_threshold_use;
540 int halve_threshold_use;
Jens Låås80b71b82009-08-28 23:57:15 -0700541 int max_work;
Robert Olsson19baf832005-06-21 12:43:18 -0700542
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900543 if (!tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700544 return NULL;
545
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700546 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
547 tn, inflate_threshold, halve_threshold);
Robert Olsson19baf832005-06-21 12:43:18 -0700548
549 /* No children */
550 if (tn->empty_children == tnode_child_length(tn)) {
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700551 tnode_free_safe(tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700552 return NULL;
553 }
554 /* One child */
555 if (tn->empty_children == tnode_child_length(tn) - 1)
Jens Låås80b71b82009-08-28 23:57:15 -0700556 goto one_child;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700557 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700558 * Double as long as the resulting node has a number of
559 * nonempty nodes that are above the threshold.
560 */
561
562 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700563 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
564 * the Helsinki University of Technology and Matti Tikkanen of Nokia
Robert Olsson19baf832005-06-21 12:43:18 -0700565 * Telecommunications, page 6:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700566 * "A node is doubled if the ratio of non-empty children to all
Robert Olsson19baf832005-06-21 12:43:18 -0700567 * children in the *doubled* node is at least 'high'."
568 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700569 * 'high' in this instance is the variable 'inflate_threshold'. It
570 * is expressed as a percentage, so we multiply it with
571 * tnode_child_length() and instead of multiplying by 2 (since the
572 * child array will be doubled by inflate()) and multiplying
573 * the left-hand side by 100 (to handle the percentage thing) we
Robert Olsson19baf832005-06-21 12:43:18 -0700574 * multiply the left-hand side by 50.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700575 *
576 * The left-hand side may look a bit weird: tnode_child_length(tn)
577 * - tn->empty_children is of course the number of non-null children
578 * in the current node. tn->full_children is the number of "full"
Robert Olsson19baf832005-06-21 12:43:18 -0700579 * children, that is non-null tnodes with a skip value of 0.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700580 * All of those will be doubled in the resulting inflated tnode, so
Robert Olsson19baf832005-06-21 12:43:18 -0700581 * we just count them one extra time here.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700582 *
Robert Olsson19baf832005-06-21 12:43:18 -0700583 * A clearer way to write this would be:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700584 *
Robert Olsson19baf832005-06-21 12:43:18 -0700585 * to_be_doubled = tn->full_children;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700586 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
Robert Olsson19baf832005-06-21 12:43:18 -0700587 * tn->full_children;
588 *
589 * new_child_length = tnode_child_length(tn) * 2;
590 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700591 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
Robert Olsson19baf832005-06-21 12:43:18 -0700592 * new_child_length;
593 * if (new_fill_factor >= inflate_threshold)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700594 *
595 * ...and so on, tho it would mess up the while () loop.
596 *
Robert Olsson19baf832005-06-21 12:43:18 -0700597 * anyway,
598 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
599 * inflate_threshold
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700600 *
Robert Olsson19baf832005-06-21 12:43:18 -0700601 * avoid a division:
602 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
603 * inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700604 *
Robert Olsson19baf832005-06-21 12:43:18 -0700605 * expand not_to_be_doubled and to_be_doubled, and shorten:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700606 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700607 * tn->full_children) >= inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700608 *
Robert Olsson19baf832005-06-21 12:43:18 -0700609 * expand new_child_length:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700610 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700611 * tn->full_children) >=
Robert Olsson19baf832005-06-21 12:43:18 -0700612 * inflate_threshold * tnode_child_length(tn) * 2
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700613 *
Robert Olsson19baf832005-06-21 12:43:18 -0700614 * shorten again:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700615 * 50 * (tn->full_children + tnode_child_length(tn) -
Olof Johansson91b9a272005-08-09 20:24:39 -0700616 * tn->empty_children) >= inflate_threshold *
Robert Olsson19baf832005-06-21 12:43:18 -0700617 * tnode_child_length(tn)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700618 *
Robert Olsson19baf832005-06-21 12:43:18 -0700619 */
620
621 check_tnode(tn);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700622
Robert Olssone6308be2005-10-04 13:01:58 -0700623 /* Keep root node larger */
624
David S. Millerb299e4f2011-02-02 20:48:10 -0800625 if (!node_parent((struct rt_trie_node *)tn)) {
Jens Låås80b71b82009-08-28 23:57:15 -0700626 inflate_threshold_use = inflate_threshold_root;
627 halve_threshold_use = halve_threshold_root;
Eric Dumazeta034ee32010-09-09 23:32:28 +0000628 } else {
Robert Olssone6308be2005-10-04 13:01:58 -0700629 inflate_threshold_use = inflate_threshold;
Jens Låås80b71b82009-08-28 23:57:15 -0700630 halve_threshold_use = halve_threshold;
631 }
Robert Olssone6308be2005-10-04 13:01:58 -0700632
Jens Låås80b71b82009-08-28 23:57:15 -0700633 max_work = MAX_WORK;
634 while ((tn->full_children > 0 && max_work-- &&
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800635 50 * (tn->full_children + tnode_child_length(tn)
636 - tn->empty_children)
637 >= inflate_threshold_use * tnode_child_length(tn))) {
Robert Olsson19baf832005-06-21 12:43:18 -0700638
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700639 old_tn = tn;
640 tn = inflate(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800641
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700642 if (IS_ERR(tn)) {
643 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700644#ifdef CONFIG_IP_FIB_TRIE_STATS
645 t->stats.resize_node_skipped++;
646#endif
647 break;
648 }
Robert Olsson19baf832005-06-21 12:43:18 -0700649 }
650
651 check_tnode(tn);
652
Jens Låås80b71b82009-08-28 23:57:15 -0700653 /* Return if at least one inflate is run */
Eric Dumazeta034ee32010-09-09 23:32:28 +0000654 if (max_work != MAX_WORK)
David S. Millerb299e4f2011-02-02 20:48:10 -0800655 return (struct rt_trie_node *) tn;
Jens Låås80b71b82009-08-28 23:57:15 -0700656
Robert Olsson19baf832005-06-21 12:43:18 -0700657 /*
658 * Halve as long as the number of empty children in this
659 * node is above threshold.
660 */
Robert Olsson2f368952005-07-05 15:02:40 -0700661
Jens Låås80b71b82009-08-28 23:57:15 -0700662 max_work = MAX_WORK;
663 while (tn->bits > 1 && max_work-- &&
Robert Olsson19baf832005-06-21 12:43:18 -0700664 100 * (tnode_child_length(tn) - tn->empty_children) <
Robert Olssone6308be2005-10-04 13:01:58 -0700665 halve_threshold_use * tnode_child_length(tn)) {
Robert Olsson19baf832005-06-21 12:43:18 -0700666
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700667 old_tn = tn;
668 tn = halve(t, tn);
669 if (IS_ERR(tn)) {
670 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700671#ifdef CONFIG_IP_FIB_TRIE_STATS
672 t->stats.resize_node_skipped++;
673#endif
674 break;
675 }
676 }
677
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700678
Robert Olsson19baf832005-06-21 12:43:18 -0700679 /* Only one child remains */
Jens Låås80b71b82009-08-28 23:57:15 -0700680 if (tn->empty_children == tnode_child_length(tn) - 1) {
681one_child:
Robert Olsson19baf832005-06-21 12:43:18 -0700682 for (i = 0; i < tnode_child_length(tn); i++) {
David S. Millerb299e4f2011-02-02 20:48:10 -0800683 struct rt_trie_node *n;
Olof Johansson91b9a272005-08-09 20:24:39 -0700684
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700685 n = rtnl_dereference(tn->child[i]);
Robert Olsson2373ce12005-08-25 13:01:29 -0700686 if (!n)
Olof Johansson91b9a272005-08-09 20:24:39 -0700687 continue;
Olof Johansson91b9a272005-08-09 20:24:39 -0700688
689 /* compress one level */
690
Stephen Hemminger06801912007-08-10 15:22:13 -0700691 node_set_parent(n, NULL);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700692 tnode_free_safe(tn);
Olof Johansson91b9a272005-08-09 20:24:39 -0700693 return n;
Robert Olsson19baf832005-06-21 12:43:18 -0700694 }
Jens Låås80b71b82009-08-28 23:57:15 -0700695 }
David S. Millerb299e4f2011-02-02 20:48:10 -0800696 return (struct rt_trie_node *) tn;
Robert Olsson19baf832005-06-21 12:43:18 -0700697}
698
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700699
700static void tnode_clean_free(struct tnode *tn)
701{
702 int i;
703 struct tnode *tofree;
704
705 for (i = 0; i < tnode_child_length(tn); i++) {
706 tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
707 if (tofree)
708 tnode_free(tofree);
709 }
710 tnode_free(tn);
711}
712
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700713static struct tnode *inflate(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700714{
Robert Olsson19baf832005-06-21 12:43:18 -0700715 struct tnode *oldtnode = tn;
716 int olen = tnode_child_length(tn);
717 int i;
718
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700719 pr_debug("In inflate\n");
Robert Olsson19baf832005-06-21 12:43:18 -0700720
721 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
722
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700723 if (!tn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700724 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700725
726 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700727 * Preallocate and store tnodes before the actual work so we
728 * don't get into an inconsistent state if memory allocation
729 * fails. In case of failure we return the oldnode and inflate
Robert Olsson2f368952005-07-05 15:02:40 -0700730 * of tnode is ignored.
731 */
Olof Johansson91b9a272005-08-09 20:24:39 -0700732
733 for (i = 0; i < olen; i++) {
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800734 struct tnode *inode;
Robert Olsson2f368952005-07-05 15:02:40 -0700735
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800736 inode = (struct tnode *) tnode_get_child(oldtnode, i);
Robert Olsson2f368952005-07-05 15:02:40 -0700737 if (inode &&
738 IS_TNODE(inode) &&
739 inode->pos == oldtnode->pos + oldtnode->bits &&
740 inode->bits > 1) {
741 struct tnode *left, *right;
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -0700742 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700743
Robert Olsson2f368952005-07-05 15:02:40 -0700744 left = tnode_new(inode->key&(~m), inode->pos + 1,
745 inode->bits - 1);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700746 if (!left)
747 goto nomem;
Olof Johansson91b9a272005-08-09 20:24:39 -0700748
Robert Olsson2f368952005-07-05 15:02:40 -0700749 right = tnode_new(inode->key|m, inode->pos + 1,
750 inode->bits - 1);
751
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900752 if (!right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700753 tnode_free(left);
754 goto nomem;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900755 }
Robert Olsson2f368952005-07-05 15:02:40 -0700756
David S. Millerb299e4f2011-02-02 20:48:10 -0800757 put_child(t, tn, 2*i, (struct rt_trie_node *) left);
758 put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
Robert Olsson2f368952005-07-05 15:02:40 -0700759 }
760 }
761
Olof Johansson91b9a272005-08-09 20:24:39 -0700762 for (i = 0; i < olen; i++) {
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -0800763 struct tnode *inode;
David S. Millerb299e4f2011-02-02 20:48:10 -0800764 struct rt_trie_node *node = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700765 struct tnode *left, *right;
766 int size, j;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700767
Robert Olsson19baf832005-06-21 12:43:18 -0700768 /* An empty child */
769 if (node == NULL)
770 continue;
771
772 /* A leaf or an internal node with skipped bits */
773
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700774 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
Robert Olsson19baf832005-06-21 12:43:18 -0700775 tn->pos + tn->bits - 1) {
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800776 if (tkey_extract_bits(node->key,
777 oldtnode->pos + oldtnode->bits,
778 1) == 0)
Robert Olsson19baf832005-06-21 12:43:18 -0700779 put_child(t, tn, 2*i, node);
780 else
781 put_child(t, tn, 2*i+1, node);
782 continue;
783 }
784
785 /* An internal node with two children */
786 inode = (struct tnode *) node;
787
788 if (inode->bits == 1) {
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700789 put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
790 put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
Robert Olsson19baf832005-06-21 12:43:18 -0700791
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700792 tnode_free_safe(inode);
Olof Johansson91b9a272005-08-09 20:24:39 -0700793 continue;
Robert Olsson19baf832005-06-21 12:43:18 -0700794 }
795
Olof Johansson91b9a272005-08-09 20:24:39 -0700796 /* An internal node with more than two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700797
Olof Johansson91b9a272005-08-09 20:24:39 -0700798 /* We will replace this node 'inode' with two new
799 * ones, 'left' and 'right', each with half of the
800 * original children. The two new nodes will have
801 * a position one bit further down the key and this
802 * means that the "significant" part of their keys
803 * (see the discussion near the top of this file)
804 * will differ by one bit, which will be "0" in
805 * left's key and "1" in right's key. Since we are
806 * moving the key position by one step, the bit that
807 * we are moving away from - the bit at position
808 * (inode->pos) - is the one that will differ between
809 * left and right. So... we synthesize that bit in the
810 * two new keys.
811 * The mask 'm' below will be a single "one" bit at
812 * the position (inode->pos)
813 */
Robert Olsson19baf832005-06-21 12:43:18 -0700814
Olof Johansson91b9a272005-08-09 20:24:39 -0700815 /* Use the old key, but set the new significant
816 * bit to zero.
817 */
Robert Olsson19baf832005-06-21 12:43:18 -0700818
Olof Johansson91b9a272005-08-09 20:24:39 -0700819 left = (struct tnode *) tnode_get_child(tn, 2*i);
820 put_child(t, tn, 2*i, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -0700821
Olof Johansson91b9a272005-08-09 20:24:39 -0700822 BUG_ON(!left);
Robert Olsson2f368952005-07-05 15:02:40 -0700823
Olof Johansson91b9a272005-08-09 20:24:39 -0700824 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
825 put_child(t, tn, 2*i+1, NULL);
Robert Olsson2f368952005-07-05 15:02:40 -0700826
Olof Johansson91b9a272005-08-09 20:24:39 -0700827 BUG_ON(!right);
Robert Olsson2f368952005-07-05 15:02:40 -0700828
Olof Johansson91b9a272005-08-09 20:24:39 -0700829 size = tnode_child_length(left);
830 for (j = 0; j < size; j++) {
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700831 put_child(t, left, j, rtnl_dereference(inode->child[j]));
832 put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
Robert Olsson19baf832005-06-21 12:43:18 -0700833 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700834 put_child(t, tn, 2*i, resize(t, left));
835 put_child(t, tn, 2*i+1, resize(t, right));
836
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700837 tnode_free_safe(inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700838 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700839 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700840 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700841nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700842 tnode_clean_free(tn);
843 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700844}
845
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700846static struct tnode *halve(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700847{
848 struct tnode *oldtnode = tn;
David S. Millerb299e4f2011-02-02 20:48:10 -0800849 struct rt_trie_node *left, *right;
Robert Olsson19baf832005-06-21 12:43:18 -0700850 int i;
851 int olen = tnode_child_length(tn);
852
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700853 pr_debug("In halve\n");
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700854
855 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700856
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700857 if (!tn)
858 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700859
860 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700861 * Preallocate and store tnodes before the actual work so we
862 * don't get into an inconsistent state if memory allocation
863 * fails. In case of failure we return the oldnode and halve
Robert Olsson2f368952005-07-05 15:02:40 -0700864 * of tnode is ignored.
865 */
866
Olof Johansson91b9a272005-08-09 20:24:39 -0700867 for (i = 0; i < olen; i += 2) {
Robert Olsson2f368952005-07-05 15:02:40 -0700868 left = tnode_get_child(oldtnode, i);
869 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700870
Robert Olsson2f368952005-07-05 15:02:40 -0700871 /* Two nonempty children */
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700872 if (left && right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700873 struct tnode *newn;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700874
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700875 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700876
877 if (!newn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700878 goto nomem;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700879
David S. Millerb299e4f2011-02-02 20:48:10 -0800880 put_child(t, tn, i/2, (struct rt_trie_node *)newn);
Robert Olsson2f368952005-07-05 15:02:40 -0700881 }
Robert Olsson2f368952005-07-05 15:02:40 -0700882
Robert Olsson2f368952005-07-05 15:02:40 -0700883 }
Robert Olsson19baf832005-06-21 12:43:18 -0700884
Olof Johansson91b9a272005-08-09 20:24:39 -0700885 for (i = 0; i < olen; i += 2) {
886 struct tnode *newBinNode;
887
Robert Olsson19baf832005-06-21 12:43:18 -0700888 left = tnode_get_child(oldtnode, i);
889 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700890
Robert Olsson19baf832005-06-21 12:43:18 -0700891 /* At least one of the children is empty */
892 if (left == NULL) {
893 if (right == NULL) /* Both are empty */
894 continue;
895 put_child(t, tn, i/2, right);
Olof Johansson91b9a272005-08-09 20:24:39 -0700896 continue;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700897 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700898
899 if (right == NULL) {
Robert Olsson19baf832005-06-21 12:43:18 -0700900 put_child(t, tn, i/2, left);
Olof Johansson91b9a272005-08-09 20:24:39 -0700901 continue;
902 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700903
Robert Olsson19baf832005-06-21 12:43:18 -0700904 /* Two nonempty children */
Olof Johansson91b9a272005-08-09 20:24:39 -0700905 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
906 put_child(t, tn, i/2, NULL);
Olof Johansson91b9a272005-08-09 20:24:39 -0700907 put_child(t, newBinNode, 0, left);
908 put_child(t, newBinNode, 1, right);
909 put_child(t, tn, i/2, resize(t, newBinNode));
Robert Olsson19baf832005-06-21 12:43:18 -0700910 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700911 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700912 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700913nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700914 tnode_clean_free(tn);
915 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700916}
917
Robert Olsson772cb712005-09-19 15:31:18 -0700918/* readside must use rcu_read_lock currently dump routines
Robert Olsson2373ce12005-08-25 13:01:29 -0700919 via get_fa_head and dump */
920
Robert Olsson772cb712005-09-19 15:31:18 -0700921static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700922{
Robert Olsson772cb712005-09-19 15:31:18 -0700923 struct hlist_head *head = &l->list;
Robert Olsson19baf832005-06-21 12:43:18 -0700924 struct hlist_node *node;
925 struct leaf_info *li;
926
Robert Olsson2373ce12005-08-25 13:01:29 -0700927 hlist_for_each_entry_rcu(li, node, head, hlist)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700928 if (li->plen == plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700929 return li;
Olof Johansson91b9a272005-08-09 20:24:39 -0700930
Robert Olsson19baf832005-06-21 12:43:18 -0700931 return NULL;
932}
933
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800934static inline struct list_head *get_fa_head(struct leaf *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700935{
Robert Olsson772cb712005-09-19 15:31:18 -0700936 struct leaf_info *li = find_leaf_info(l, plen);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700937
Olof Johansson91b9a272005-08-09 20:24:39 -0700938 if (!li)
939 return NULL;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700940
Olof Johansson91b9a272005-08-09 20:24:39 -0700941 return &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -0700942}
943
944static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
945{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900946 struct leaf_info *li = NULL, *last = NULL;
947 struct hlist_node *node;
Robert Olsson19baf832005-06-21 12:43:18 -0700948
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900949 if (hlist_empty(head)) {
950 hlist_add_head_rcu(&new->hlist, head);
951 } else {
952 hlist_for_each_entry(li, node, head, hlist) {
953 if (new->plen > li->plen)
954 break;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700955
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900956 last = li;
957 }
958 if (last)
959 hlist_add_after_rcu(&last->hlist, &new->hlist);
960 else
961 hlist_add_before_rcu(&new->hlist, &li->hlist);
962 }
Robert Olsson19baf832005-06-21 12:43:18 -0700963}
964
Robert Olsson2373ce12005-08-25 13:01:29 -0700965/* rcu_read_lock needs to be hold by caller from readside */
966
Robert Olsson19baf832005-06-21 12:43:18 -0700967static struct leaf *
968fib_find_node(struct trie *t, u32 key)
969{
970 int pos;
971 struct tnode *tn;
David S. Millerb299e4f2011-02-02 20:48:10 -0800972 struct rt_trie_node *n;
Robert Olsson19baf832005-06-21 12:43:18 -0700973
974 pos = 0;
Eric Dumazeta034ee32010-09-09 23:32:28 +0000975 n = rcu_dereference_rtnl(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700976
977 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
978 tn = (struct tnode *) n;
Olof Johansson91b9a272005-08-09 20:24:39 -0700979
Robert Olsson19baf832005-06-21 12:43:18 -0700980 check_tnode(tn);
Olof Johansson91b9a272005-08-09 20:24:39 -0700981
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700982 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
Olof Johansson91b9a272005-08-09 20:24:39 -0700983 pos = tn->pos + tn->bits;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800984 n = tnode_get_child_rcu(tn,
985 tkey_extract_bits(key,
986 tn->pos,
987 tn->bits));
Olof Johansson91b9a272005-08-09 20:24:39 -0700988 } else
Robert Olsson19baf832005-06-21 12:43:18 -0700989 break;
990 }
991 /* Case we have found a leaf. Compare prefixes */
992
Olof Johansson91b9a272005-08-09 20:24:39 -0700993 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
994 return (struct leaf *)n;
995
Robert Olsson19baf832005-06-21 12:43:18 -0700996 return NULL;
997}
998
Jarek Poplawski7b855762009-06-18 00:28:51 -0700999static void trie_rebalance(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -07001000{
Robert Olsson19baf832005-06-21 12:43:18 -07001001 int wasfull;
Robert Olsson3ed18d72009-05-21 15:20:59 -07001002 t_key cindex, key;
Stephen Hemminger06801912007-08-10 15:22:13 -07001003 struct tnode *tp;
Robert Olsson19baf832005-06-21 12:43:18 -07001004
Robert Olsson3ed18d72009-05-21 15:20:59 -07001005 key = tn->key;
1006
David S. Millerb299e4f2011-02-02 20:48:10 -08001007 while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
Robert Olsson19baf832005-06-21 12:43:18 -07001008 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1009 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001010 tn = (struct tnode *) resize(t, (struct tnode *)tn);
1011
1012 tnode_put_child_reorg((struct tnode *)tp, cindex,
David S. Millerb299e4f2011-02-02 20:48:10 -08001013 (struct rt_trie_node *)tn, wasfull);
Olof Johansson91b9a272005-08-09 20:24:39 -07001014
David S. Millerb299e4f2011-02-02 20:48:10 -08001015 tp = node_parent((struct rt_trie_node *) tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -07001016 if (!tp)
David S. Millerb299e4f2011-02-02 20:48:10 -08001017 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -07001018
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -07001019 tnode_free_flush();
Stephen Hemminger06801912007-08-10 15:22:13 -07001020 if (!tp)
Robert Olsson19baf832005-06-21 12:43:18 -07001021 break;
Stephen Hemminger06801912007-08-10 15:22:13 -07001022 tn = tp;
Robert Olsson19baf832005-06-21 12:43:18 -07001023 }
Stephen Hemminger06801912007-08-10 15:22:13 -07001024
Robert Olsson19baf832005-06-21 12:43:18 -07001025 /* Handle last (top) tnode */
Jarek Poplawski7b855762009-06-18 00:28:51 -07001026 if (IS_TNODE(tn))
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001027 tn = (struct tnode *)resize(t, (struct tnode *)tn);
Robert Olsson19baf832005-06-21 12:43:18 -07001028
David S. Millerb299e4f2011-02-02 20:48:10 -08001029 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001030 tnode_free_flush();
Robert Olsson19baf832005-06-21 12:43:18 -07001031}
1032
Robert Olsson2373ce12005-08-25 13:01:29 -07001033/* only used from updater-side */
1034
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001035static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -07001036{
1037 int pos, newpos;
1038 struct tnode *tp = NULL, *tn = NULL;
David S. Millerb299e4f2011-02-02 20:48:10 -08001039 struct rt_trie_node *n;
Robert Olsson19baf832005-06-21 12:43:18 -07001040 struct leaf *l;
1041 int missbit;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001042 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001043 struct leaf_info *li;
1044 t_key cindex;
1045
1046 pos = 0;
Eric Dumazet0a5c0472011-03-31 01:51:35 -07001047 n = rtnl_dereference(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -07001048
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001049 /* If we point to NULL, stop. Either the tree is empty and we should
1050 * just put a new leaf in if, or we have reached an empty child slot,
Robert Olsson19baf832005-06-21 12:43:18 -07001051 * and we should just put our new leaf in that.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001052 * If we point to a T_TNODE, check if it matches our key. Note that
1053 * a T_TNODE might be skipping any number of bits - its 'pos' need
Robert Olsson19baf832005-06-21 12:43:18 -07001054 * not be the parent's 'pos'+'bits'!
1055 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001056 * If it does match the current key, get pos/bits from it, extract
Robert Olsson19baf832005-06-21 12:43:18 -07001057 * the index from our key, push the T_TNODE and walk the tree.
1058 *
1059 * If it doesn't, we have to replace it with a new T_TNODE.
1060 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001061 * If we point to a T_LEAF, it might or might not have the same key
1062 * as we do. If it does, just change the value, update the T_LEAF's
1063 * value, and return it.
Robert Olsson19baf832005-06-21 12:43:18 -07001064 * If it doesn't, we need to replace it with a T_TNODE.
1065 */
1066
1067 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1068 tn = (struct tnode *) n;
Olof Johansson91b9a272005-08-09 20:24:39 -07001069
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001070 check_tnode(tn);
Olof Johansson91b9a272005-08-09 20:24:39 -07001071
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001072 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
Robert Olsson19baf832005-06-21 12:43:18 -07001073 tp = tn;
Olof Johansson91b9a272005-08-09 20:24:39 -07001074 pos = tn->pos + tn->bits;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001075 n = tnode_get_child(tn,
1076 tkey_extract_bits(key,
1077 tn->pos,
1078 tn->bits));
Robert Olsson19baf832005-06-21 12:43:18 -07001079
Stephen Hemminger06801912007-08-10 15:22:13 -07001080 BUG_ON(n && node_parent(n) != tn);
Olof Johansson91b9a272005-08-09 20:24:39 -07001081 } else
Robert Olsson19baf832005-06-21 12:43:18 -07001082 break;
1083 }
1084
1085 /*
1086 * n ----> NULL, LEAF or TNODE
1087 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001088 * tp is n's (parent) ----> NULL or TNODE
Robert Olsson19baf832005-06-21 12:43:18 -07001089 */
1090
Olof Johansson91b9a272005-08-09 20:24:39 -07001091 BUG_ON(tp && IS_LEAF(tp));
Robert Olsson19baf832005-06-21 12:43:18 -07001092
1093 /* Case 1: n is a leaf. Compare prefixes */
1094
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001095 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08001096 l = (struct leaf *) n;
Robert Olsson19baf832005-06-21 12:43:18 -07001097 li = leaf_info_new(plen);
Olof Johansson91b9a272005-08-09 20:24:39 -07001098
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001099 if (!li)
1100 return NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001101
1102 fa_head = &li->falh;
1103 insert_leaf_info(&l->list, li);
1104 goto done;
1105 }
Robert Olsson19baf832005-06-21 12:43:18 -07001106 l = leaf_new();
1107
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001108 if (!l)
1109 return NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001110
1111 l->key = key;
1112 li = leaf_info_new(plen);
1113
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001114 if (!li) {
Stephen Hemminger387a5482008-04-10 03:47:34 -07001115 free_leaf(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001116 return NULL;
Robert Olssonf835e472005-06-28 15:00:39 -07001117 }
Robert Olsson19baf832005-06-21 12:43:18 -07001118
1119 fa_head = &li->falh;
1120 insert_leaf_info(&l->list, li);
1121
Robert Olsson19baf832005-06-21 12:43:18 -07001122 if (t->trie && n == NULL) {
Olof Johansson91b9a272005-08-09 20:24:39 -07001123 /* Case 2: n is NULL, and will just insert a new leaf */
Robert Olsson19baf832005-06-21 12:43:18 -07001124
David S. Millerb299e4f2011-02-02 20:48:10 -08001125 node_set_parent((struct rt_trie_node *)l, tp);
Robert Olsson19baf832005-06-21 12:43:18 -07001126
Olof Johansson91b9a272005-08-09 20:24:39 -07001127 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
David S. Millerb299e4f2011-02-02 20:48:10 -08001128 put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001129 } else {
1130 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001131 /*
1132 * Add a new tnode here
Robert Olsson19baf832005-06-21 12:43:18 -07001133 * first tnode need some special handling
1134 */
1135
1136 if (tp)
Olof Johansson91b9a272005-08-09 20:24:39 -07001137 pos = tp->pos+tp->bits;
Robert Olsson19baf832005-06-21 12:43:18 -07001138 else
Olof Johansson91b9a272005-08-09 20:24:39 -07001139 pos = 0;
1140
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001141 if (n) {
Robert Olsson19baf832005-06-21 12:43:18 -07001142 newpos = tkey_mismatch(key, pos, n->key);
1143 tn = tnode_new(n->key, newpos, 1);
Olof Johansson91b9a272005-08-09 20:24:39 -07001144 } else {
Robert Olsson19baf832005-06-21 12:43:18 -07001145 newpos = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001146 tn = tnode_new(key, newpos, 1); /* First tnode */
Robert Olsson19baf832005-06-21 12:43:18 -07001147 }
Robert Olsson19baf832005-06-21 12:43:18 -07001148
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001149 if (!tn) {
Robert Olssonf835e472005-06-28 15:00:39 -07001150 free_leaf_info(li);
Stephen Hemminger387a5482008-04-10 03:47:34 -07001151 free_leaf(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001152 return NULL;
Olof Johansson91b9a272005-08-09 20:24:39 -07001153 }
1154
David S. Millerb299e4f2011-02-02 20:48:10 -08001155 node_set_parent((struct rt_trie_node *)tn, tp);
Robert Olsson19baf832005-06-21 12:43:18 -07001156
Olof Johansson91b9a272005-08-09 20:24:39 -07001157 missbit = tkey_extract_bits(key, newpos, 1);
David S. Millerb299e4f2011-02-02 20:48:10 -08001158 put_child(t, tn, missbit, (struct rt_trie_node *)l);
Robert Olsson19baf832005-06-21 12:43:18 -07001159 put_child(t, tn, 1-missbit, n);
1160
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001161 if (tp) {
Robert Olsson19baf832005-06-21 12:43:18 -07001162 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001163 put_child(t, (struct tnode *)tp, cindex,
David S. Millerb299e4f2011-02-02 20:48:10 -08001164 (struct rt_trie_node *)tn);
Olof Johansson91b9a272005-08-09 20:24:39 -07001165 } else {
David S. Millerb299e4f2011-02-02 20:48:10 -08001166 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
Robert Olsson19baf832005-06-21 12:43:18 -07001167 tp = tn;
1168 }
1169 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001170
1171 if (tp && tp->pos + tp->bits > 32)
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001172 pr_warning("fib_trie"
1173 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1174 tp, tp->pos, tp->bits, key, plen);
Olof Johansson91b9a272005-08-09 20:24:39 -07001175
Robert Olsson19baf832005-06-21 12:43:18 -07001176 /* Rebalance the trie */
Robert Olsson2373ce12005-08-25 13:01:29 -07001177
Jarek Poplawski7b855762009-06-18 00:28:51 -07001178 trie_rebalance(t, tp);
Robert Olssonf835e472005-06-28 15:00:39 -07001179done:
Robert Olsson19baf832005-06-21 12:43:18 -07001180 return fa_head;
1181}
1182
Robert Olssond562f1f2007-03-26 14:22:22 -07001183/*
1184 * Caller must hold RTNL.
1185 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001186int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001187{
1188 struct trie *t = (struct trie *) tb->tb_data;
1189 struct fib_alias *fa, *new_fa;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001190 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001191 struct fib_info *fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001192 int plen = cfg->fc_dst_len;
1193 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001194 u32 key, mask;
1195 int err;
1196 struct leaf *l;
1197
1198 if (plen > 32)
1199 return -EINVAL;
1200
Thomas Graf4e902c52006-08-17 18:14:52 -07001201 key = ntohl(cfg->fc_dst);
Robert Olsson19baf832005-06-21 12:43:18 -07001202
Patrick McHardy2dfe55b2006-08-10 23:08:33 -07001203 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001204
Olof Johansson91b9a272005-08-09 20:24:39 -07001205 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001206
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001207 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001208 return -EINVAL;
1209
1210 key = key & mask;
1211
Thomas Graf4e902c52006-08-17 18:14:52 -07001212 fi = fib_create_info(cfg);
1213 if (IS_ERR(fi)) {
1214 err = PTR_ERR(fi);
Robert Olsson19baf832005-06-21 12:43:18 -07001215 goto err;
Thomas Graf4e902c52006-08-17 18:14:52 -07001216 }
Robert Olsson19baf832005-06-21 12:43:18 -07001217
1218 l = fib_find_node(t, key);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001219 fa = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001220
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001221 if (l) {
Robert Olsson19baf832005-06-21 12:43:18 -07001222 fa_head = get_fa_head(l, plen);
1223 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1224 }
1225
1226 /* Now fa, if non-NULL, points to the first fib alias
1227 * with the same keys [prefix,tos,priority], if such key already
1228 * exists or to the node before which we will insert new one.
1229 *
1230 * If fa is NULL, we will need to allocate a new one and
1231 * insert to the head of f.
1232 *
1233 * If f is NULL, no fib node matched the destination key
1234 * and we need to allocate a new one of those as well.
1235 */
1236
Julian Anastasov936f6f82008-01-28 21:18:06 -08001237 if (fa && fa->fa_tos == tos &&
1238 fa->fa_info->fib_priority == fi->fib_priority) {
1239 struct fib_alias *fa_first, *fa_match;
Robert Olsson19baf832005-06-21 12:43:18 -07001240
1241 err = -EEXIST;
Thomas Graf4e902c52006-08-17 18:14:52 -07001242 if (cfg->fc_nlflags & NLM_F_EXCL)
Robert Olsson19baf832005-06-21 12:43:18 -07001243 goto out;
1244
Julian Anastasov936f6f82008-01-28 21:18:06 -08001245 /* We have 2 goals:
1246 * 1. Find exact match for type, scope, fib_info to avoid
1247 * duplicate routes
1248 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1249 */
1250 fa_match = NULL;
1251 fa_first = fa;
1252 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1253 list_for_each_entry_continue(fa, fa_head, fa_list) {
1254 if (fa->fa_tos != tos)
1255 break;
1256 if (fa->fa_info->fib_priority != fi->fib_priority)
1257 break;
1258 if (fa->fa_type == cfg->fc_type &&
Julian Anastasov936f6f82008-01-28 21:18:06 -08001259 fa->fa_info == fi) {
1260 fa_match = fa;
1261 break;
1262 }
1263 }
1264
Thomas Graf4e902c52006-08-17 18:14:52 -07001265 if (cfg->fc_nlflags & NLM_F_REPLACE) {
Robert Olsson19baf832005-06-21 12:43:18 -07001266 struct fib_info *fi_drop;
1267 u8 state;
1268
Julian Anastasov936f6f82008-01-28 21:18:06 -08001269 fa = fa_first;
1270 if (fa_match) {
1271 if (fa == fa_match)
1272 err = 0;
Joonwoo Park67250332008-01-18 03:45:18 -08001273 goto out;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001274 }
Robert Olsson2373ce12005-08-25 13:01:29 -07001275 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001276 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -07001277 if (new_fa == NULL)
1278 goto out;
Robert Olsson19baf832005-06-21 12:43:18 -07001279
1280 fi_drop = fa->fa_info;
Robert Olsson2373ce12005-08-25 13:01:29 -07001281 new_fa->fa_tos = fa->fa_tos;
1282 new_fa->fa_info = fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001283 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001284 state = fa->fa_state;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001285 new_fa->fa_state = state & ~FA_S_ACCESSED;
Robert Olsson19baf832005-06-21 12:43:18 -07001286
Robert Olsson2373ce12005-08-25 13:01:29 -07001287 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1288 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001289
1290 fib_release_info(fi_drop);
1291 if (state & FA_S_ACCESSED)
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07001292 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
Milan Kocianb8f55832007-05-23 14:55:06 -07001293 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1294 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
Robert Olsson19baf832005-06-21 12:43:18 -07001295
Olof Johansson91b9a272005-08-09 20:24:39 -07001296 goto succeeded;
Robert Olsson19baf832005-06-21 12:43:18 -07001297 }
1298 /* Error if we find a perfect match which
1299 * uses the same scope, type, and nexthop
1300 * information.
1301 */
Julian Anastasov936f6f82008-01-28 21:18:06 -08001302 if (fa_match)
1303 goto out;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001304
Thomas Graf4e902c52006-08-17 18:14:52 -07001305 if (!(cfg->fc_nlflags & NLM_F_APPEND))
Julian Anastasov936f6f82008-01-28 21:18:06 -08001306 fa = fa_first;
Robert Olsson19baf832005-06-21 12:43:18 -07001307 }
1308 err = -ENOENT;
Thomas Graf4e902c52006-08-17 18:14:52 -07001309 if (!(cfg->fc_nlflags & NLM_F_CREATE))
Robert Olsson19baf832005-06-21 12:43:18 -07001310 goto out;
1311
1312 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001313 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson19baf832005-06-21 12:43:18 -07001314 if (new_fa == NULL)
1315 goto out;
1316
1317 new_fa->fa_info = fi;
1318 new_fa->fa_tos = tos;
Thomas Graf4e902c52006-08-17 18:14:52 -07001319 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001320 new_fa->fa_state = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001321 /*
1322 * Insert new entry to the list.
1323 */
1324
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001325 if (!fa_head) {
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001326 fa_head = fib_insert_node(t, key, plen);
1327 if (unlikely(!fa_head)) {
1328 err = -ENOMEM;
Robert Olssonf835e472005-06-28 15:00:39 -07001329 goto out_free_new_fa;
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001330 }
Robert Olssonf835e472005-06-28 15:00:39 -07001331 }
Robert Olsson19baf832005-06-21 12:43:18 -07001332
David S. Miller21d8c492011-04-14 14:49:37 -07001333 if (!plen)
1334 tb->tb_num_default++;
1335
Robert Olsson2373ce12005-08-25 13:01:29 -07001336 list_add_tail_rcu(&new_fa->fa_list,
1337 (fa ? &fa->fa_list : fa_head));
Robert Olsson19baf832005-06-21 12:43:18 -07001338
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07001339 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
Thomas Graf4e902c52006-08-17 18:14:52 -07001340 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001341 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001342succeeded:
1343 return 0;
Robert Olssonf835e472005-06-28 15:00:39 -07001344
1345out_free_new_fa:
1346 kmem_cache_free(fn_alias_kmem, new_fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001347out:
1348 fib_release_info(fi);
Olof Johansson91b9a272005-08-09 20:24:39 -07001349err:
Robert Olsson19baf832005-06-21 12:43:18 -07001350 return err;
1351}
1352
Robert Olsson772cb712005-09-19 15:31:18 -07001353/* should be called with rcu_read_lock */
David S. Miller5b470442011-01-31 16:10:03 -08001354static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
David S. Miller22bd5b92011-03-11 19:54:08 -05001355 t_key key, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001356 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001357{
Robert Olsson19baf832005-06-21 12:43:18 -07001358 struct leaf_info *li;
1359 struct hlist_head *hhead = &l->list;
1360 struct hlist_node *node;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001361
Robert Olsson2373ce12005-08-25 13:01:29 -07001362 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
David S. Miller3be06862011-03-07 15:01:10 -08001363 struct fib_alias *fa;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001364
Eric Dumazet5c745012011-07-18 03:16:33 +00001365 if (l->key != (key & li->mask_plen))
Robert Olsson19baf832005-06-21 12:43:18 -07001366 continue;
1367
David S. Miller3be06862011-03-07 15:01:10 -08001368 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1369 struct fib_info *fi = fa->fa_info;
1370 int nhsel, err;
1371
David S. Miller22bd5b92011-03-11 19:54:08 -05001372 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
David S. Miller3be06862011-03-07 15:01:10 -08001373 continue;
David S. Miller37e826c2011-03-24 18:06:47 -07001374 if (fa->fa_info->fib_scope < flp->flowi4_scope)
David S. Miller3be06862011-03-07 15:01:10 -08001375 continue;
1376 fib_alias_accessed(fa);
1377 err = fib_props[fa->fa_type].error;
1378 if (err) {
1379#ifdef CONFIG_IP_FIB_TRIE_STATS
Julian Anastasov1fbc7842011-03-25 20:33:23 -07001380 t->stats.semantic_match_passed++;
David S. Miller3be06862011-03-07 15:01:10 -08001381#endif
Julian Anastasov1fbc7842011-03-25 20:33:23 -07001382 return err;
David S. Miller3be06862011-03-07 15:01:10 -08001383 }
1384 if (fi->fib_flags & RTNH_F_DEAD)
1385 continue;
1386 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1387 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1388
1389 if (nh->nh_flags & RTNH_F_DEAD)
1390 continue;
David S. Miller22bd5b92011-03-11 19:54:08 -05001391 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
David S. Miller3be06862011-03-07 15:01:10 -08001392 continue;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001393
Robert Olsson19baf832005-06-21 12:43:18 -07001394#ifdef CONFIG_IP_FIB_TRIE_STATS
David S. Miller3be06862011-03-07 15:01:10 -08001395 t->stats.semantic_match_passed++;
Robert Olsson19baf832005-06-21 12:43:18 -07001396#endif
Eric Dumazet5c745012011-07-18 03:16:33 +00001397 res->prefixlen = li->plen;
David S. Miller3be06862011-03-07 15:01:10 -08001398 res->nh_sel = nhsel;
1399 res->type = fa->fa_type;
David S. Miller37e826c2011-03-24 18:06:47 -07001400 res->scope = fa->fa_info->fib_scope;
David S. Miller3be06862011-03-07 15:01:10 -08001401 res->fi = fi;
1402 res->table = tb;
1403 res->fa_head = &li->falh;
1404 if (!(fib_flags & FIB_LOOKUP_NOREF))
Eric Dumazet5c745012011-07-18 03:16:33 +00001405 atomic_inc(&fi->fib_clntref);
David S. Miller3be06862011-03-07 15:01:10 -08001406 return 0;
1407 }
1408 }
1409
1410#ifdef CONFIG_IP_FIB_TRIE_STATS
1411 t->stats.semantic_match_miss++;
1412#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001413 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001414
Ben Hutchings2e655572008-07-10 16:52:52 -07001415 return 1;
Robert Olsson19baf832005-06-21 12:43:18 -07001416}
1417
David S. Miller22bd5b92011-03-11 19:54:08 -05001418int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001419 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001420{
1421 struct trie *t = (struct trie *) tb->tb_data;
Ben Hutchings2e655572008-07-10 16:52:52 -07001422 int ret;
David S. Millerb299e4f2011-02-02 20:48:10 -08001423 struct rt_trie_node *n;
Robert Olsson19baf832005-06-21 12:43:18 -07001424 struct tnode *pn;
David S. Miller3b004562011-02-16 14:56:22 -08001425 unsigned int pos, bits;
David S. Miller22bd5b92011-03-11 19:54:08 -05001426 t_key key = ntohl(flp->daddr);
David S. Miller3b004562011-02-16 14:56:22 -08001427 unsigned int chopped_off;
Robert Olsson19baf832005-06-21 12:43:18 -07001428 t_key cindex = 0;
David S. Miller3b004562011-02-16 14:56:22 -08001429 unsigned int current_prefix_length = KEYLENGTH;
Olof Johansson91b9a272005-08-09 20:24:39 -07001430 struct tnode *cn;
Eric Dumazet874ffa82010-10-13 06:56:11 +00001431 t_key pref_mismatch;
Olof Johansson91b9a272005-08-09 20:24:39 -07001432
Robert Olsson2373ce12005-08-25 13:01:29 -07001433 rcu_read_lock();
Robert Olsson19baf832005-06-21 12:43:18 -07001434
Robert Olsson2373ce12005-08-25 13:01:29 -07001435 n = rcu_dereference(t->trie);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001436 if (!n)
Robert Olsson19baf832005-06-21 12:43:18 -07001437 goto failed;
1438
1439#ifdef CONFIG_IP_FIB_TRIE_STATS
1440 t->stats.gets++;
1441#endif
1442
1443 /* Just a leaf? */
1444 if (IS_LEAF(n)) {
David S. Miller5b470442011-01-31 16:10:03 -08001445 ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001446 goto found;
Robert Olsson19baf832005-06-21 12:43:18 -07001447 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001448
Robert Olsson19baf832005-06-21 12:43:18 -07001449 pn = (struct tnode *) n;
1450 chopped_off = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001451
Olof Johansson91b9a272005-08-09 20:24:39 -07001452 while (pn) {
Robert Olsson19baf832005-06-21 12:43:18 -07001453 pos = pn->pos;
1454 bits = pn->bits;
1455
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001456 if (!chopped_off)
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -07001457 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1458 pos, bits);
Robert Olsson19baf832005-06-21 12:43:18 -07001459
Jarek Poplawskib902e572009-07-14 11:20:32 +00001460 n = tnode_get_child_rcu(pn, cindex);
Robert Olsson19baf832005-06-21 12:43:18 -07001461
1462 if (n == NULL) {
1463#ifdef CONFIG_IP_FIB_TRIE_STATS
1464 t->stats.null_node_hit++;
1465#endif
1466 goto backtrace;
1467 }
1468
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001469 if (IS_LEAF(n)) {
David S. Miller5b470442011-01-31 16:10:03 -08001470 ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
Ben Hutchings2e655572008-07-10 16:52:52 -07001471 if (ret > 0)
Olof Johansson91b9a272005-08-09 20:24:39 -07001472 goto backtrace;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001473 goto found;
Olof Johansson91b9a272005-08-09 20:24:39 -07001474 }
1475
Olof Johansson91b9a272005-08-09 20:24:39 -07001476 cn = (struct tnode *)n;
1477
1478 /*
1479 * It's a tnode, and we can do some extra checks here if we
1480 * like, to avoid descending into a dead-end branch.
1481 * This tnode is in the parent's child array at index
1482 * key[p_pos..p_pos+p_bits] but potentially with some bits
1483 * chopped off, so in reality the index may be just a
1484 * subprefix, padded with zero at the end.
1485 * We can also take a look at any skipped bits in this
1486 * tnode - everything up to p_pos is supposed to be ok,
1487 * and the non-chopped bits of the index (se previous
1488 * paragraph) are also guaranteed ok, but the rest is
1489 * considered unknown.
1490 *
1491 * The skipped bits are key[pos+bits..cn->pos].
1492 */
1493
1494 /* If current_prefix_length < pos+bits, we are already doing
1495 * actual prefix matching, which means everything from
1496 * pos+(bits-chopped_off) onward must be zero along some
1497 * branch of this subtree - otherwise there is *no* valid
1498 * prefix present. Here we can only check the skipped
1499 * bits. Remember, since we have already indexed into the
1500 * parent's child array, we know that the bits we chopped of
1501 * *are* zero.
1502 */
1503
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001504 /* NOTA BENE: Checking only skipped bits
1505 for the new node here */
Olof Johansson91b9a272005-08-09 20:24:39 -07001506
1507 if (current_prefix_length < pos+bits) {
1508 if (tkey_extract_bits(cn->key, current_prefix_length,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001509 cn->pos - current_prefix_length)
1510 || !(cn->child[0]))
Olof Johansson91b9a272005-08-09 20:24:39 -07001511 goto backtrace;
1512 }
1513
1514 /*
1515 * If chopped_off=0, the index is fully validated and we
1516 * only need to look at the skipped bits for this, the new,
1517 * tnode. What we actually want to do is to find out if
1518 * these skipped bits match our key perfectly, or if we will
1519 * have to count on finding a matching prefix further down,
1520 * because if we do, we would like to have some way of
1521 * verifying the existence of such a prefix at this point.
1522 */
1523
1524 /* The only thing we can do at this point is to verify that
1525 * any such matching prefix can indeed be a prefix to our
1526 * key, and if the bits in the node we are inspecting that
1527 * do not match our key are not ZERO, this cannot be true.
1528 * Thus, find out where there is a mismatch (before cn->pos)
1529 * and verify that all the mismatching bits are zero in the
1530 * new tnode's key.
1531 */
1532
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001533 /*
1534 * Note: We aren't very concerned about the piece of
1535 * the key that precede pn->pos+pn->bits, since these
1536 * have already been checked. The bits after cn->pos
1537 * aren't checked since these are by definition
1538 * "unknown" at this point. Thus, what we want to see
1539 * is if we are about to enter the "prefix matching"
1540 * state, and in that case verify that the skipped
1541 * bits that will prevail throughout this subtree are
1542 * zero, as they have to be if we are to find a
1543 * matching prefix.
Olof Johansson91b9a272005-08-09 20:24:39 -07001544 */
1545
Eric Dumazet874ffa82010-10-13 06:56:11 +00001546 pref_mismatch = mask_pfx(cn->key ^ key, cn->pos);
Olof Johansson91b9a272005-08-09 20:24:39 -07001547
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001548 /*
1549 * In short: If skipped bits in this node do not match
1550 * the search key, enter the "prefix matching"
1551 * state.directly.
Olof Johansson91b9a272005-08-09 20:24:39 -07001552 */
1553 if (pref_mismatch) {
Eric Dumazet874ffa82010-10-13 06:56:11 +00001554 int mp = KEYLENGTH - fls(pref_mismatch);
Olof Johansson91b9a272005-08-09 20:24:39 -07001555
Eric Dumazet874ffa82010-10-13 06:56:11 +00001556 if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
Olof Johansson91b9a272005-08-09 20:24:39 -07001557 goto backtrace;
1558
1559 if (current_prefix_length >= cn->pos)
1560 current_prefix_length = mp;
1561 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001562
Olof Johansson91b9a272005-08-09 20:24:39 -07001563 pn = (struct tnode *)n; /* Descend */
1564 chopped_off = 0;
1565 continue;
1566
Robert Olsson19baf832005-06-21 12:43:18 -07001567backtrace:
1568 chopped_off++;
1569
1570 /* As zero don't change the child key (cindex) */
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001571 while ((chopped_off <= pn->bits)
1572 && !(cindex & (1<<(chopped_off-1))))
Robert Olsson19baf832005-06-21 12:43:18 -07001573 chopped_off++;
Robert Olsson19baf832005-06-21 12:43:18 -07001574
1575 /* Decrease current_... with bits chopped off */
1576 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001577 current_prefix_length = pn->pos + pn->bits
1578 - chopped_off;
Olof Johansson91b9a272005-08-09 20:24:39 -07001579
Robert Olsson19baf832005-06-21 12:43:18 -07001580 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001581 * Either we do the actual chop off according or if we have
Robert Olsson19baf832005-06-21 12:43:18 -07001582 * chopped off all bits in this tnode walk up to our parent.
1583 */
1584
Olof Johansson91b9a272005-08-09 20:24:39 -07001585 if (chopped_off <= pn->bits) {
Robert Olsson19baf832005-06-21 12:43:18 -07001586 cindex &= ~(1 << (chopped_off-1));
Olof Johansson91b9a272005-08-09 20:24:39 -07001587 } else {
David S. Millerb299e4f2011-02-02 20:48:10 -08001588 struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
Stephen Hemminger06801912007-08-10 15:22:13 -07001589 if (!parent)
Robert Olsson19baf832005-06-21 12:43:18 -07001590 goto failed;
Olof Johansson91b9a272005-08-09 20:24:39 -07001591
Robert Olsson19baf832005-06-21 12:43:18 -07001592 /* Get Child's index */
Stephen Hemminger06801912007-08-10 15:22:13 -07001593 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1594 pn = parent;
Robert Olsson19baf832005-06-21 12:43:18 -07001595 chopped_off = 0;
1596
1597#ifdef CONFIG_IP_FIB_TRIE_STATS
1598 t->stats.backtrack++;
1599#endif
1600 goto backtrace;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001601 }
Robert Olsson19baf832005-06-21 12:43:18 -07001602 }
1603failed:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001604 ret = 1;
Robert Olsson19baf832005-06-21 12:43:18 -07001605found:
Robert Olsson2373ce12005-08-25 13:01:29 -07001606 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001607 return ret;
1608}
1609
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001610/*
1611 * Remove the leaf and return parent.
1612 */
1613static void trie_leaf_remove(struct trie *t, struct leaf *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001614{
David S. Millerb299e4f2011-02-02 20:48:10 -08001615 struct tnode *tp = node_parent((struct rt_trie_node *) l);
Robert Olsson19baf832005-06-21 12:43:18 -07001616
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001617 pr_debug("entering trie_leaf_remove(%p)\n", l);
Robert Olsson19baf832005-06-21 12:43:18 -07001618
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001619 if (tp) {
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001620 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
Robert Olsson19baf832005-06-21 12:43:18 -07001621 put_child(t, (struct tnode *)tp, cindex, NULL);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001622 trie_rebalance(t, tp);
Olof Johansson91b9a272005-08-09 20:24:39 -07001623 } else
Robert Olsson2373ce12005-08-25 13:01:29 -07001624 rcu_assign_pointer(t->trie, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -07001625
Stephen Hemminger387a5482008-04-10 03:47:34 -07001626 free_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001627}
1628
Robert Olssond562f1f2007-03-26 14:22:22 -07001629/*
1630 * Caller must hold RTNL.
1631 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001632int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001633{
1634 struct trie *t = (struct trie *) tb->tb_data;
1635 u32 key, mask;
Thomas Graf4e902c52006-08-17 18:14:52 -07001636 int plen = cfg->fc_dst_len;
1637 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001638 struct fib_alias *fa, *fa_to_delete;
1639 struct list_head *fa_head;
1640 struct leaf *l;
Olof Johansson91b9a272005-08-09 20:24:39 -07001641 struct leaf_info *li;
1642
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001643 if (plen > 32)
Robert Olsson19baf832005-06-21 12:43:18 -07001644 return -EINVAL;
1645
Thomas Graf4e902c52006-08-17 18:14:52 -07001646 key = ntohl(cfg->fc_dst);
Olof Johansson91b9a272005-08-09 20:24:39 -07001647 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001648
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001649 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001650 return -EINVAL;
1651
1652 key = key & mask;
1653 l = fib_find_node(t, key);
1654
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001655 if (!l)
Robert Olsson19baf832005-06-21 12:43:18 -07001656 return -ESRCH;
1657
1658 fa_head = get_fa_head(l, plen);
1659 fa = fib_find_alias(fa_head, tos, 0);
1660
1661 if (!fa)
1662 return -ESRCH;
1663
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001664 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
Robert Olsson19baf832005-06-21 12:43:18 -07001665
1666 fa_to_delete = NULL;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001667 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1668 list_for_each_entry_continue(fa, fa_head, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001669 struct fib_info *fi = fa->fa_info;
1670
1671 if (fa->fa_tos != tos)
1672 break;
1673
Thomas Graf4e902c52006-08-17 18:14:52 -07001674 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1675 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
David S. Miller37e826c2011-03-24 18:06:47 -07001676 fa->fa_info->fib_scope == cfg->fc_scope) &&
Julian Anastasov74cb3c12011-03-19 12:13:46 +00001677 (!cfg->fc_prefsrc ||
1678 fi->fib_prefsrc == cfg->fc_prefsrc) &&
Thomas Graf4e902c52006-08-17 18:14:52 -07001679 (!cfg->fc_protocol ||
1680 fi->fib_protocol == cfg->fc_protocol) &&
1681 fib_nh_match(cfg, fi) == 0) {
Robert Olsson19baf832005-06-21 12:43:18 -07001682 fa_to_delete = fa;
1683 break;
1684 }
1685 }
1686
Olof Johansson91b9a272005-08-09 20:24:39 -07001687 if (!fa_to_delete)
1688 return -ESRCH;
Robert Olsson19baf832005-06-21 12:43:18 -07001689
Olof Johansson91b9a272005-08-09 20:24:39 -07001690 fa = fa_to_delete;
Thomas Graf4e902c52006-08-17 18:14:52 -07001691 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001692 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001693
Olof Johansson91b9a272005-08-09 20:24:39 -07001694 l = fib_find_node(t, key);
Robert Olsson772cb712005-09-19 15:31:18 -07001695 li = find_leaf_info(l, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001696
Robert Olsson2373ce12005-08-25 13:01:29 -07001697 list_del_rcu(&fa->fa_list);
Robert Olsson19baf832005-06-21 12:43:18 -07001698
David S. Miller21d8c492011-04-14 14:49:37 -07001699 if (!plen)
1700 tb->tb_num_default--;
1701
Olof Johansson91b9a272005-08-09 20:24:39 -07001702 if (list_empty(fa_head)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001703 hlist_del_rcu(&li->hlist);
Olof Johansson91b9a272005-08-09 20:24:39 -07001704 free_leaf_info(li);
Robert Olsson2373ce12005-08-25 13:01:29 -07001705 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001706
1707 if (hlist_empty(&l->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001708 trie_leaf_remove(t, l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001709
1710 if (fa->fa_state & FA_S_ACCESSED)
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07001711 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
Olof Johansson91b9a272005-08-09 20:24:39 -07001712
Robert Olsson2373ce12005-08-25 13:01:29 -07001713 fib_release_info(fa->fa_info);
1714 alias_free_mem_rcu(fa);
Olof Johansson91b9a272005-08-09 20:24:39 -07001715 return 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001716}
1717
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001718static int trie_flush_list(struct list_head *head)
Robert Olsson19baf832005-06-21 12:43:18 -07001719{
1720 struct fib_alias *fa, *fa_node;
1721 int found = 0;
1722
1723 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1724 struct fib_info *fi = fa->fa_info;
Robert Olsson19baf832005-06-21 12:43:18 -07001725
Robert Olsson2373ce12005-08-25 13:01:29 -07001726 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1727 list_del_rcu(&fa->fa_list);
1728 fib_release_info(fa->fa_info);
1729 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001730 found++;
1731 }
1732 }
1733 return found;
1734}
1735
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001736static int trie_flush_leaf(struct leaf *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001737{
1738 int found = 0;
1739 struct hlist_head *lih = &l->list;
1740 struct hlist_node *node, *tmp;
1741 struct leaf_info *li = NULL;
1742
1743 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001744 found += trie_flush_list(&li->falh);
Robert Olsson19baf832005-06-21 12:43:18 -07001745
1746 if (list_empty(&li->falh)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001747 hlist_del_rcu(&li->hlist);
Robert Olsson19baf832005-06-21 12:43:18 -07001748 free_leaf_info(li);
1749 }
1750 }
1751 return found;
1752}
1753
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001754/*
1755 * Scan for the next right leaf starting at node p->child[idx]
1756 * Since we have back pointer, no recursion necessary.
1757 */
David S. Millerb299e4f2011-02-02 20:48:10 -08001758static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
Robert Olsson19baf832005-06-21 12:43:18 -07001759{
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001760 do {
1761 t_key idx;
Robert Olsson19baf832005-06-21 12:43:18 -07001762
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001763 if (c)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001764 idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001765 else
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001766 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001767
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001768 while (idx < 1u << p->bits) {
1769 c = tnode_get_child_rcu(p, idx++);
Robert Olsson2373ce12005-08-25 13:01:29 -07001770 if (!c)
Olof Johansson91b9a272005-08-09 20:24:39 -07001771 continue;
Robert Olsson19baf832005-06-21 12:43:18 -07001772
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001773 if (IS_LEAF(c)) {
Eric Dumazet0a5c0472011-03-31 01:51:35 -07001774 prefetch(rcu_dereference_rtnl(p->child[idx]));
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001775 return (struct leaf *) c;
Robert Olsson19baf832005-06-21 12:43:18 -07001776 }
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001777
1778 /* Rescan start scanning in new node */
1779 p = (struct tnode *) c;
1780 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001781 }
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001782
1783 /* Node empty, walk back up to parent */
David S. Millerb299e4f2011-02-02 20:48:10 -08001784 c = (struct rt_trie_node *) p;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001785 } while ((p = node_parent_rcu(c)) != NULL);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001786
1787 return NULL; /* Root of trie */
1788}
1789
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001790static struct leaf *trie_firstleaf(struct trie *t)
1791{
Eric Dumazeta034ee32010-09-09 23:32:28 +00001792 struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001793
1794 if (!n)
1795 return NULL;
1796
1797 if (IS_LEAF(n)) /* trie is just a leaf */
1798 return (struct leaf *) n;
1799
1800 return leaf_walk_rcu(n, NULL);
1801}
1802
1803static struct leaf *trie_nextleaf(struct leaf *l)
1804{
David S. Millerb299e4f2011-02-02 20:48:10 -08001805 struct rt_trie_node *c = (struct rt_trie_node *) l;
Jarek Poplawskib902e572009-07-14 11:20:32 +00001806 struct tnode *p = node_parent_rcu(c);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001807
1808 if (!p)
1809 return NULL; /* trie with just one leaf */
1810
1811 return leaf_walk_rcu(p, c);
Robert Olsson19baf832005-06-21 12:43:18 -07001812}
1813
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001814static struct leaf *trie_leafindex(struct trie *t, int index)
1815{
1816 struct leaf *l = trie_firstleaf(t);
1817
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001818 while (l && index-- > 0)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001819 l = trie_nextleaf(l);
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001820
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001821 return l;
1822}
1823
1824
Robert Olssond562f1f2007-03-26 14:22:22 -07001825/*
1826 * Caller must hold RTNL.
1827 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001828int fib_table_flush(struct fib_table *tb)
Robert Olsson19baf832005-06-21 12:43:18 -07001829{
1830 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001831 struct leaf *l, *ll = NULL;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001832 int found = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001833
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001834 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001835 found += trie_flush_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001836
1837 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001838 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001839 ll = l;
1840 }
1841
1842 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001843 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001844
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001845 pr_debug("trie_flush found=%d\n", found);
Robert Olsson19baf832005-06-21 12:43:18 -07001846 return found;
1847}
1848
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001849void fib_free_table(struct fib_table *tb)
1850{
1851 kfree(tb);
1852}
1853
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001854static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1855 struct fib_table *tb,
Robert Olsson19baf832005-06-21 12:43:18 -07001856 struct sk_buff *skb, struct netlink_callback *cb)
1857{
1858 int i, s_i;
1859 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07001860 __be32 xkey = htonl(key);
Robert Olsson19baf832005-06-21 12:43:18 -07001861
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001862 s_i = cb->args[5];
Robert Olsson19baf832005-06-21 12:43:18 -07001863 i = 0;
1864
Robert Olsson2373ce12005-08-25 13:01:29 -07001865 /* rcu_read_lock is hold by caller */
1866
1867 list_for_each_entry_rcu(fa, fah, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001868 if (i < s_i) {
1869 i++;
1870 continue;
1871 }
Robert Olsson19baf832005-06-21 12:43:18 -07001872
1873 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1874 cb->nlh->nlmsg_seq,
1875 RTM_NEWROUTE,
1876 tb->tb_id,
1877 fa->fa_type,
Thomas Grafbe403ea2006-08-17 18:15:17 -07001878 xkey,
Robert Olsson19baf832005-06-21 12:43:18 -07001879 plen,
1880 fa->fa_tos,
Stephen Hemminger64347f72008-01-22 21:55:01 -08001881 fa->fa_info, NLM_F_MULTI) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001882 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001883 return -1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001884 }
Robert Olsson19baf832005-06-21 12:43:18 -07001885 i++;
1886 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001887 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001888 return skb->len;
1889}
1890
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001891static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1892 struct sk_buff *skb, struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001893{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001894 struct leaf_info *li;
1895 struct hlist_node *node;
1896 int i, s_i;
Robert Olsson19baf832005-06-21 12:43:18 -07001897
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001898 s_i = cb->args[4];
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001899 i = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001900
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001901 /* rcu_read_lock is hold by caller */
1902 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
1903 if (i < s_i) {
1904 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001905 continue;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001906 }
Robert Olsson19baf832005-06-21 12:43:18 -07001907
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001908 if (i > s_i)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001909 cb->args[5] = 0;
Olof Johansson91b9a272005-08-09 20:24:39 -07001910
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001911 if (list_empty(&li->falh))
Robert Olsson19baf832005-06-21 12:43:18 -07001912 continue;
1913
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001914 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001915 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001916 return -1;
1917 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001918 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001919 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001920
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001921 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001922 return skb->len;
1923}
1924
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001925int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1926 struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001927{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001928 struct leaf *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001929 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001930 t_key key = cb->args[2];
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001931 int count = cb->args[3];
Robert Olsson19baf832005-06-21 12:43:18 -07001932
Robert Olsson2373ce12005-08-25 13:01:29 -07001933 rcu_read_lock();
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001934 /* Dump starting at last key.
1935 * Note: 0.0.0.0/0 (ie default) is first key.
1936 */
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001937 if (count == 0)
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001938 l = trie_firstleaf(t);
1939 else {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001940 /* Normally, continue from last key, but if that is missing
1941 * fallback to using slow rescan
1942 */
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001943 l = fib_find_node(t, key);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001944 if (!l)
1945 l = trie_leafindex(t, count);
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001946 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001947
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001948 while (l) {
1949 cb->args[2] = l->key;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001950 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001951 cb->args[3] = count;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001952 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001953 return -1;
Robert Olsson19baf832005-06-21 12:43:18 -07001954 }
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001955
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001956 ++count;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001957 l = trie_nextleaf(l);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001958 memset(&cb->args[4], 0,
1959 sizeof(cb->args) - 4*sizeof(cb->args[0]));
Robert Olsson19baf832005-06-21 12:43:18 -07001960 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001961 cb->args[3] = count;
Robert Olsson2373ce12005-08-25 13:01:29 -07001962 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001963
Robert Olsson19baf832005-06-21 12:43:18 -07001964 return skb->len;
Robert Olsson19baf832005-06-21 12:43:18 -07001965}
1966
David S. Miller5348ba82011-02-01 15:30:56 -08001967void __init fib_trie_init(void)
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001968{
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001969 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1970 sizeof(struct fib_alias),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001971 0, SLAB_PANIC, NULL);
1972
1973 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1974 max(sizeof(struct leaf),
1975 sizeof(struct leaf_info)),
1976 0, SLAB_PANIC, NULL);
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001977}
Robert Olsson19baf832005-06-21 12:43:18 -07001978
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001979
David S. Miller5348ba82011-02-01 15:30:56 -08001980struct fib_table *fib_trie_table(u32 id)
Robert Olsson19baf832005-06-21 12:43:18 -07001981{
1982 struct fib_table *tb;
1983 struct trie *t;
1984
Robert Olsson19baf832005-06-21 12:43:18 -07001985 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1986 GFP_KERNEL);
1987 if (tb == NULL)
1988 return NULL;
1989
1990 tb->tb_id = id;
Denis V. Lunev971b8932007-12-08 00:32:23 -08001991 tb->tb_default = -1;
David S. Miller21d8c492011-04-14 14:49:37 -07001992 tb->tb_num_default = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001993
1994 t = (struct trie *) tb->tb_data;
Stephen Hemmingerc28a1cf2008-01-12 20:49:13 -08001995 memset(t, 0, sizeof(*t));
Robert Olsson19baf832005-06-21 12:43:18 -07001996
Robert Olsson19baf832005-06-21 12:43:18 -07001997 return tb;
1998}
1999
Robert Olsson19baf832005-06-21 12:43:18 -07002000#ifdef CONFIG_PROC_FS
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002001/* Depth first Trie walk iterator */
2002struct fib_trie_iter {
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002003 struct seq_net_private p;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002004 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002005 struct tnode *tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002006 unsigned int index;
2007 unsigned int depth;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002008};
Robert Olsson19baf832005-06-21 12:43:18 -07002009
David S. Millerb299e4f2011-02-02 20:48:10 -08002010static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
Robert Olsson19baf832005-06-21 12:43:18 -07002011{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002012 struct tnode *tn = iter->tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002013 unsigned int cindex = iter->index;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002014 struct tnode *p;
2015
Eric W. Biederman6640e692007-01-24 14:42:04 -08002016 /* A single entry routing table */
2017 if (!tn)
2018 return NULL;
2019
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002020 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2021 iter->tnode, iter->index, iter->depth);
2022rescan:
2023 while (cindex < (1<<tn->bits)) {
David S. Millerb299e4f2011-02-02 20:48:10 -08002024 struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002025
2026 if (n) {
2027 if (IS_LEAF(n)) {
2028 iter->tnode = tn;
2029 iter->index = cindex + 1;
2030 } else {
2031 /* push down one level */
2032 iter->tnode = (struct tnode *) n;
2033 iter->index = 0;
2034 ++iter->depth;
2035 }
2036 return n;
2037 }
2038
2039 ++cindex;
2040 }
2041
2042 /* Current node exhausted, pop back up */
David S. Millerb299e4f2011-02-02 20:48:10 -08002043 p = node_parent_rcu((struct rt_trie_node *)tn);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002044 if (p) {
2045 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2046 tn = p;
2047 --iter->depth;
2048 goto rescan;
2049 }
2050
2051 /* got root? */
Robert Olsson19baf832005-06-21 12:43:18 -07002052 return NULL;
2053}
2054
David S. Millerb299e4f2011-02-02 20:48:10 -08002055static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002056 struct trie *t)
Robert Olsson19baf832005-06-21 12:43:18 -07002057{
David S. Millerb299e4f2011-02-02 20:48:10 -08002058 struct rt_trie_node *n;
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08002059
Stephen Hemminger132adf52007-03-08 20:44:43 -08002060 if (!t)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08002061 return NULL;
2062
2063 n = rcu_dereference(t->trie);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002064 if (!n)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08002065 return NULL;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002066
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002067 if (IS_TNODE(n)) {
2068 iter->tnode = (struct tnode *) n;
2069 iter->index = 0;
2070 iter->depth = 1;
2071 } else {
2072 iter->tnode = NULL;
2073 iter->index = 0;
2074 iter->depth = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002075 }
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002076
2077 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002078}
2079
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002080static void trie_collect_stats(struct trie *t, struct trie_stat *s)
Robert Olsson19baf832005-06-21 12:43:18 -07002081{
David S. Millerb299e4f2011-02-02 20:48:10 -08002082 struct rt_trie_node *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002083 struct fib_trie_iter iter;
Robert Olsson19baf832005-06-21 12:43:18 -07002084
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002085 memset(s, 0, sizeof(*s));
Robert Olsson19baf832005-06-21 12:43:18 -07002086
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002087 rcu_read_lock();
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002088 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002089 if (IS_LEAF(n)) {
Stephen Hemminger93672292008-01-22 21:54:05 -08002090 struct leaf *l = (struct leaf *)n;
2091 struct leaf_info *li;
2092 struct hlist_node *tmp;
2093
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002094 s->leaves++;
2095 s->totdepth += iter.depth;
2096 if (iter.depth > s->maxdepth)
2097 s->maxdepth = iter.depth;
Stephen Hemminger93672292008-01-22 21:54:05 -08002098
2099 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
2100 ++s->prefixes;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002101 } else {
2102 const struct tnode *tn = (const struct tnode *) n;
2103 int i;
Robert Olsson19baf832005-06-21 12:43:18 -07002104
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002105 s->tnodes++;
Stephen Hemminger132adf52007-03-08 20:44:43 -08002106 if (tn->bits < MAX_STAT_DEPTH)
Robert Olsson06ef9212006-03-20 21:35:01 -08002107 s->nodesizes[tn->bits]++;
2108
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002109 for (i = 0; i < (1<<tn->bits); i++)
2110 if (!tn->child[i])
2111 s->nullpointers++;
2112 }
2113 }
2114 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002115}
2116
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07002117/*
Robert Olsson19baf832005-06-21 12:43:18 -07002118 * This outputs /proc/net/fib_triestats
Robert Olsson19baf832005-06-21 12:43:18 -07002119 */
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002120static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
Robert Olsson19baf832005-06-21 12:43:18 -07002121{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002122 unsigned int i, max, pointers, bytes, avdepth;
Robert Olsson19baf832005-06-21 12:43:18 -07002123
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002124 if (stat->leaves)
2125 avdepth = stat->totdepth*100 / stat->leaves;
2126 else
2127 avdepth = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07002128
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002129 seq_printf(seq, "\tAver depth: %u.%02d\n",
2130 avdepth / 100, avdepth % 100);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002131 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
Robert Olsson19baf832005-06-21 12:43:18 -07002132
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002133 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002134 bytes = sizeof(struct leaf) * stat->leaves;
Stephen Hemminger93672292008-01-22 21:54:05 -08002135
2136 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
2137 bytes += sizeof(struct leaf_info) * stat->prefixes;
2138
Stephen Hemminger187b5182008-01-12 20:55:55 -08002139 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002140 bytes += sizeof(struct tnode) * stat->tnodes;
Robert Olsson19baf832005-06-21 12:43:18 -07002141
Robert Olsson06ef9212006-03-20 21:35:01 -08002142 max = MAX_STAT_DEPTH;
2143 while (max > 0 && stat->nodesizes[max-1] == 0)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002144 max--;
Robert Olsson19baf832005-06-21 12:43:18 -07002145
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002146 pointers = 0;
2147 for (i = 1; i <= max; i++)
2148 if (stat->nodesizes[i] != 0) {
Stephen Hemminger187b5182008-01-12 20:55:55 -08002149 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002150 pointers += (1<<i) * stat->nodesizes[i];
2151 }
2152 seq_putc(seq, '\n');
Stephen Hemminger187b5182008-01-12 20:55:55 -08002153 seq_printf(seq, "\tPointers: %u\n", pointers);
Robert Olsson19baf832005-06-21 12:43:18 -07002154
David S. Millerb299e4f2011-02-02 20:48:10 -08002155 bytes += sizeof(struct rt_trie_node *) * pointers;
Stephen Hemminger187b5182008-01-12 20:55:55 -08002156 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2157 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08002158}
Robert Olsson19baf832005-06-21 12:43:18 -07002159
2160#ifdef CONFIG_IP_FIB_TRIE_STATS
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08002161static void trie_show_usage(struct seq_file *seq,
2162 const struct trie_use_stats *stats)
2163{
2164 seq_printf(seq, "\nCounters:\n---------\n");
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002165 seq_printf(seq, "gets = %u\n", stats->gets);
2166 seq_printf(seq, "backtracks = %u\n", stats->backtrack);
2167 seq_printf(seq, "semantic match passed = %u\n",
2168 stats->semantic_match_passed);
2169 seq_printf(seq, "semantic match miss = %u\n",
2170 stats->semantic_match_miss);
2171 seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
2172 seq_printf(seq, "skipped node resize = %u\n\n",
2173 stats->resize_node_skipped);
Robert Olsson19baf832005-06-21 12:43:18 -07002174}
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08002175#endif /* CONFIG_IP_FIB_TRIE_STATS */
2176
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002177static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08002178{
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002179 if (tb->tb_id == RT_TABLE_LOCAL)
2180 seq_puts(seq, "Local:\n");
2181 else if (tb->tb_id == RT_TABLE_MAIN)
2182 seq_puts(seq, "Main:\n");
2183 else
2184 seq_printf(seq, "Id %d:\n", tb->tb_id);
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08002185}
Robert Olsson19baf832005-06-21 12:43:18 -07002186
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002187
Robert Olsson19baf832005-06-21 12:43:18 -07002188static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2189{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002190 struct net *net = (struct net *)seq->private;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002191 unsigned int h;
Eric W. Biederman877a9bf2007-12-07 00:47:47 -08002192
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08002193 seq_printf(seq,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002194 "Basic info: size of leaf:"
2195 " %Zd bytes, size of tnode: %Zd bytes.\n",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002196 sizeof(struct leaf), sizeof(struct tnode));
Olof Johansson91b9a272005-08-09 20:24:39 -07002197
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002198 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2199 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2200 struct hlist_node *node;
2201 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002202
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002203 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2204 struct trie *t = (struct trie *) tb->tb_data;
2205 struct trie_stat stat;
2206
2207 if (!t)
2208 continue;
2209
2210 fib_table_print(seq, tb);
2211
2212 trie_collect_stats(t, &stat);
2213 trie_show_stats(seq, &stat);
2214#ifdef CONFIG_IP_FIB_TRIE_STATS
2215 trie_show_usage(seq, &t->stats);
2216#endif
2217 }
2218 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002219
Robert Olsson19baf832005-06-21 12:43:18 -07002220 return 0;
2221}
2222
Robert Olsson19baf832005-06-21 12:43:18 -07002223static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2224{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07002225 return single_open_net(inode, file, fib_triestat_seq_show);
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002226}
2227
Arjan van de Ven9a321442007-02-12 00:55:35 -08002228static const struct file_operations fib_triestat_fops = {
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07002229 .owner = THIS_MODULE,
2230 .open = fib_triestat_seq_open,
2231 .read = seq_read,
2232 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07002233 .release = single_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002234};
2235
David S. Millerb299e4f2011-02-02 20:48:10 -08002236static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
Robert Olsson19baf832005-06-21 12:43:18 -07002237{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002238 struct fib_trie_iter *iter = seq->private;
2239 struct net *net = seq_file_net(seq);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002240 loff_t idx = 0;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002241 unsigned int h;
Robert Olsson19baf832005-06-21 12:43:18 -07002242
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002243 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2244 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2245 struct hlist_node *node;
2246 struct fib_table *tb;
2247
2248 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
David S. Millerb299e4f2011-02-02 20:48:10 -08002249 struct rt_trie_node *n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002250
2251 for (n = fib_trie_get_first(iter,
2252 (struct trie *) tb->tb_data);
2253 n; n = fib_trie_get_next(iter))
2254 if (pos == idx++) {
2255 iter->tb = tb;
2256 return n;
2257 }
2258 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002259 }
Robert Olsson19baf832005-06-21 12:43:18 -07002260
Robert Olsson19baf832005-06-21 12:43:18 -07002261 return NULL;
2262}
2263
2264static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002265 __acquires(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002266{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002267 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002268 return fib_trie_get_idx(seq, *pos);
Robert Olsson19baf832005-06-21 12:43:18 -07002269}
2270
2271static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2272{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002273 struct fib_trie_iter *iter = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002274 struct net *net = seq_file_net(seq);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002275 struct fib_table *tb = iter->tb;
2276 struct hlist_node *tb_node;
2277 unsigned int h;
David S. Millerb299e4f2011-02-02 20:48:10 -08002278 struct rt_trie_node *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002279
Robert Olsson19baf832005-06-21 12:43:18 -07002280 ++*pos;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002281 /* next node in same table */
2282 n = fib_trie_get_next(iter);
2283 if (n)
2284 return n;
Olof Johansson91b9a272005-08-09 20:24:39 -07002285
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002286 /* walk rest of this hash chain */
2287 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
Eric Dumazet0a5c0472011-03-31 01:51:35 -07002288 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002289 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2290 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2291 if (n)
2292 goto found;
2293 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002294
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002295 /* new hash chain */
2296 while (++h < FIB_TABLE_HASHSZ) {
2297 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2298 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) {
2299 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2300 if (n)
2301 goto found;
2302 }
2303 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002304 return NULL;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002305
2306found:
2307 iter->tb = tb;
2308 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002309}
2310
2311static void fib_trie_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002312 __releases(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002313{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002314 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002315}
2316
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002317static void seq_indent(struct seq_file *seq, int n)
2318{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002319 while (n-- > 0)
2320 seq_puts(seq, " ");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002321}
Robert Olsson19baf832005-06-21 12:43:18 -07002322
Eric Dumazet28d36e32008-01-14 23:09:56 -08002323static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002324{
Stephen Hemminger132adf52007-03-08 20:44:43 -08002325 switch (s) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002326 case RT_SCOPE_UNIVERSE: return "universe";
2327 case RT_SCOPE_SITE: return "site";
2328 case RT_SCOPE_LINK: return "link";
2329 case RT_SCOPE_HOST: return "host";
2330 case RT_SCOPE_NOWHERE: return "nowhere";
2331 default:
Eric Dumazet28d36e32008-01-14 23:09:56 -08002332 snprintf(buf, len, "scope=%d", s);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002333 return buf;
2334 }
2335}
2336
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -07002337static const char *const rtn_type_names[__RTN_MAX] = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002338 [RTN_UNSPEC] = "UNSPEC",
2339 [RTN_UNICAST] = "UNICAST",
2340 [RTN_LOCAL] = "LOCAL",
2341 [RTN_BROADCAST] = "BROADCAST",
2342 [RTN_ANYCAST] = "ANYCAST",
2343 [RTN_MULTICAST] = "MULTICAST",
2344 [RTN_BLACKHOLE] = "BLACKHOLE",
2345 [RTN_UNREACHABLE] = "UNREACHABLE",
2346 [RTN_PROHIBIT] = "PROHIBIT",
2347 [RTN_THROW] = "THROW",
2348 [RTN_NAT] = "NAT",
2349 [RTN_XRESOLVE] = "XRESOLVE",
2350};
2351
Eric Dumazeta034ee32010-09-09 23:32:28 +00002352static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002353{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002354 if (t < __RTN_MAX && rtn_type_names[t])
2355 return rtn_type_names[t];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002356 snprintf(buf, len, "type %u", t);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002357 return buf;
2358}
2359
2360/* Pretty print the trie */
Robert Olsson19baf832005-06-21 12:43:18 -07002361static int fib_trie_seq_show(struct seq_file *seq, void *v)
2362{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002363 const struct fib_trie_iter *iter = seq->private;
David S. Millerb299e4f2011-02-02 20:48:10 -08002364 struct rt_trie_node *n = v;
Robert Olsson19baf832005-06-21 12:43:18 -07002365
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002366 if (!node_parent_rcu(n))
2367 fib_table_print(seq, iter->tb);
Robert Olsson095b8502007-01-26 19:06:01 -08002368
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002369 if (IS_TNODE(n)) {
2370 struct tnode *tn = (struct tnode *) n;
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -07002371 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002372
Robert Olsson1d25cd62005-09-19 15:29:52 -07002373 seq_indent(seq, iter->depth-1);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002374 seq_printf(seq, " +-- %pI4/%d %d %d %d\n",
2375 &prf, tn->pos, tn->bits, tn->full_children,
Robert Olsson1d25cd62005-09-19 15:29:52 -07002376 tn->empty_children);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002377
Olof Johansson91b9a272005-08-09 20:24:39 -07002378 } else {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002379 struct leaf *l = (struct leaf *) n;
Stephen Hemminger13280422008-01-22 21:54:37 -08002380 struct leaf_info *li;
2381 struct hlist_node *node;
Al Viro32ab5f82006-09-26 22:21:45 -07002382 __be32 val = htonl(l->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002383
2384 seq_indent(seq, iter->depth);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002385 seq_printf(seq, " |-- %pI4\n", &val);
Eric Dumazet28d36e32008-01-14 23:09:56 -08002386
Stephen Hemminger13280422008-01-22 21:54:37 -08002387 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2388 struct fib_alias *fa;
Eric Dumazet28d36e32008-01-14 23:09:56 -08002389
Stephen Hemminger13280422008-01-22 21:54:37 -08002390 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2391 char buf1[32], buf2[32];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002392
Stephen Hemminger13280422008-01-22 21:54:37 -08002393 seq_indent(seq, iter->depth+1);
2394 seq_printf(seq, " /%d %s %s", li->plen,
2395 rtn_scope(buf1, sizeof(buf1),
David S. Miller37e826c2011-03-24 18:06:47 -07002396 fa->fa_info->fib_scope),
Stephen Hemminger13280422008-01-22 21:54:37 -08002397 rtn_type(buf2, sizeof(buf2),
2398 fa->fa_type));
2399 if (fa->fa_tos)
Denis V. Lunevb9c4d822008-02-05 02:58:45 -08002400 seq_printf(seq, " tos=%d", fa->fa_tos);
Stephen Hemminger13280422008-01-22 21:54:37 -08002401 seq_putc(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002402 }
2403 }
Robert Olsson19baf832005-06-21 12:43:18 -07002404 }
2405
2406 return 0;
2407}
2408
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002409static const struct seq_operations fib_trie_seq_ops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002410 .start = fib_trie_seq_start,
2411 .next = fib_trie_seq_next,
2412 .stop = fib_trie_seq_stop,
2413 .show = fib_trie_seq_show,
Robert Olsson19baf832005-06-21 12:43:18 -07002414};
2415
2416static int fib_trie_seq_open(struct inode *inode, struct file *file)
2417{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002418 return seq_open_net(inode, file, &fib_trie_seq_ops,
2419 sizeof(struct fib_trie_iter));
Robert Olsson19baf832005-06-21 12:43:18 -07002420}
2421
Arjan van de Ven9a321442007-02-12 00:55:35 -08002422static const struct file_operations fib_trie_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002423 .owner = THIS_MODULE,
2424 .open = fib_trie_seq_open,
2425 .read = seq_read,
2426 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002427 .release = seq_release_net,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002428};
2429
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002430struct fib_route_iter {
2431 struct seq_net_private p;
2432 struct trie *main_trie;
2433 loff_t pos;
2434 t_key key;
2435};
2436
2437static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2438{
2439 struct leaf *l = NULL;
2440 struct trie *t = iter->main_trie;
2441
2442 /* use cache location of last found key */
2443 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2444 pos -= iter->pos;
2445 else {
2446 iter->pos = 0;
2447 l = trie_firstleaf(t);
2448 }
2449
2450 while (l && pos-- > 0) {
2451 iter->pos++;
2452 l = trie_nextleaf(l);
2453 }
2454
2455 if (l)
2456 iter->key = pos; /* remember it */
2457 else
2458 iter->pos = 0; /* forget it */
2459
2460 return l;
2461}
2462
2463static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2464 __acquires(RCU)
2465{
2466 struct fib_route_iter *iter = seq->private;
2467 struct fib_table *tb;
2468
2469 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002470 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002471 if (!tb)
2472 return NULL;
2473
2474 iter->main_trie = (struct trie *) tb->tb_data;
2475 if (*pos == 0)
2476 return SEQ_START_TOKEN;
2477 else
2478 return fib_route_get_idx(iter, *pos - 1);
2479}
2480
2481static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2482{
2483 struct fib_route_iter *iter = seq->private;
2484 struct leaf *l = v;
2485
2486 ++*pos;
2487 if (v == SEQ_START_TOKEN) {
2488 iter->pos = 0;
2489 l = trie_firstleaf(iter->main_trie);
2490 } else {
2491 iter->pos++;
2492 l = trie_nextleaf(l);
2493 }
2494
2495 if (l)
2496 iter->key = l->key;
2497 else
2498 iter->pos = 0;
2499 return l;
2500}
2501
2502static void fib_route_seq_stop(struct seq_file *seq, void *v)
2503 __releases(RCU)
2504{
2505 rcu_read_unlock();
2506}
2507
Eric Dumazeta034ee32010-09-09 23:32:28 +00002508static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002509{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002510 unsigned int flags = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002511
Eric Dumazeta034ee32010-09-09 23:32:28 +00002512 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2513 flags = RTF_REJECT;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002514 if (fi && fi->fib_nh->nh_gw)
2515 flags |= RTF_GATEWAY;
Al Viro32ab5f82006-09-26 22:21:45 -07002516 if (mask == htonl(0xFFFFFFFF))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002517 flags |= RTF_HOST;
2518 flags |= RTF_UP;
2519 return flags;
2520}
2521
2522/*
2523 * This outputs /proc/net/route.
2524 * The format of the file is not supposed to be changed
Eric Dumazeta034ee32010-09-09 23:32:28 +00002525 * and needs to be same as fib_hash output to avoid breaking
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002526 * legacy utilities
2527 */
2528static int fib_route_seq_show(struct seq_file *seq, void *v)
2529{
2530 struct leaf *l = v;
Stephen Hemminger13280422008-01-22 21:54:37 -08002531 struct leaf_info *li;
2532 struct hlist_node *node;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002533
2534 if (v == SEQ_START_TOKEN) {
2535 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2536 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2537 "\tWindow\tIRTT");
2538 return 0;
2539 }
2540
Stephen Hemminger13280422008-01-22 21:54:37 -08002541 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002542 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07002543 __be32 mask, prefix;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002544
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002545 mask = inet_make_mask(li->plen);
2546 prefix = htonl(l->key);
2547
2548 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
Herbert Xu1371e372005-10-15 09:42:39 +10002549 const struct fib_info *fi = fa->fa_info;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002550 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002551 int len;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002552
2553 if (fa->fa_type == RTN_BROADCAST
2554 || fa->fa_type == RTN_MULTICAST)
2555 continue;
2556
2557 if (fi)
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002558 seq_printf(seq,
2559 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2560 "%d\t%08X\t%d\t%u\t%u%n",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002561 fi->fib_dev ? fi->fib_dev->name : "*",
2562 prefix,
2563 fi->fib_nh->nh_gw, flags, 0, 0,
2564 fi->fib_priority,
2565 mask,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002566 (fi->fib_advmss ?
2567 fi->fib_advmss + 40 : 0),
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002568 fi->fib_window,
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002569 fi->fib_rtt >> 3, &len);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002570 else
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002571 seq_printf(seq,
2572 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2573 "%d\t%08X\t%d\t%u\t%u%n",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002574 prefix, 0, flags, 0, 0, 0,
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002575 mask, 0, 0, 0, &len);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002576
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002577 seq_printf(seq, "%*s\n", 127 - len, "");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002578 }
2579 }
2580
2581 return 0;
2582}
2583
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002584static const struct seq_operations fib_route_seq_ops = {
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002585 .start = fib_route_seq_start,
2586 .next = fib_route_seq_next,
2587 .stop = fib_route_seq_stop,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002588 .show = fib_route_seq_show,
2589};
2590
2591static int fib_route_seq_open(struct inode *inode, struct file *file)
2592{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002593 return seq_open_net(inode, file, &fib_route_seq_ops,
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002594 sizeof(struct fib_route_iter));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002595}
2596
Arjan van de Ven9a321442007-02-12 00:55:35 -08002597static const struct file_operations fib_route_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002598 .owner = THIS_MODULE,
2599 .open = fib_route_seq_open,
2600 .read = seq_read,
2601 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002602 .release = seq_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002603};
2604
Denis V. Lunev61a02652008-01-10 03:21:09 -08002605int __net_init fib_proc_init(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002606{
Denis V. Lunev61a02652008-01-10 03:21:09 -08002607 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002608 goto out1;
2609
Denis V. Lunev61a02652008-01-10 03:21:09 -08002610 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2611 &fib_triestat_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002612 goto out2;
2613
Denis V. Lunev61a02652008-01-10 03:21:09 -08002614 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002615 goto out3;
2616
Robert Olsson19baf832005-06-21 12:43:18 -07002617 return 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002618
2619out3:
Denis V. Lunev61a02652008-01-10 03:21:09 -08002620 proc_net_remove(net, "fib_triestat");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002621out2:
Denis V. Lunev61a02652008-01-10 03:21:09 -08002622 proc_net_remove(net, "fib_trie");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002623out1:
2624 return -ENOMEM;
Robert Olsson19baf832005-06-21 12:43:18 -07002625}
2626
Denis V. Lunev61a02652008-01-10 03:21:09 -08002627void __net_exit fib_proc_exit(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002628{
Denis V. Lunev61a02652008-01-10 03:21:09 -08002629 proc_net_remove(net, "fib_trie");
2630 proc_net_remove(net, "fib_triestat");
2631 proc_net_remove(net, "route");
Robert Olsson19baf832005-06-21 12:43:18 -07002632}
2633
2634#endif /* CONFIG_PROC_FS */