blob: 987b06d1effefc8176c0c303276ca69f52fef6ed [file] [log] [blame]
Robert Olsson19baf832005-06-21 12:43:18 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090010 * Jens Laas <jens.laas@data.slu.se> Swedish University of
Robert Olsson19baf832005-06-21 12:43:18 -070011 * Agricultural Sciences.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090012 *
Robert Olsson19baf832005-06-21 12:43:18 -070013 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030015 * This work is based on the LPC-trie which is originally described in:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 *
Robert Olsson19baf832005-06-21 12:43:18 -070017 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
Justin P. Mattock631dd1a2010-10-18 11:03:14 +020019 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
Robert Olsson19baf832005-06-21 12:43:18 -070020 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
Robert Olsson19baf832005-06-21 12:43:18 -070025 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
Robert Olssonfd966252005-12-22 11:25:10 -080042 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
Robert Olsson19baf832005-06-21 12:43:18 -070049 */
50
Jens Låås80b71b82009-08-28 23:57:15 -070051#define VERSION "0.409"
Robert Olsson19baf832005-06-21 12:43:18 -070052
Robert Olsson19baf832005-06-21 12:43:18 -070053#include <asm/uaccess.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070054#include <linux/bitops.h>
Robert Olsson19baf832005-06-21 12:43:18 -070055#include <linux/types.h>
56#include <linux/kernel.h>
Robert Olsson19baf832005-06-21 12:43:18 -070057#include <linux/mm.h>
58#include <linux/string.h>
59#include <linux/socket.h>
60#include <linux/sockios.h>
61#include <linux/errno.h>
62#include <linux/in.h>
63#include <linux/inet.h>
Stephen Hemmingercd8787a2006-01-03 14:38:34 -080064#include <linux/inetdevice.h>
Robert Olsson19baf832005-06-21 12:43:18 -070065#include <linux/netdevice.h>
66#include <linux/if_arp.h>
67#include <linux/proc_fs.h>
Robert Olsson2373ce12005-08-25 13:01:29 -070068#include <linux/rcupdate.h>
Robert Olsson19baf832005-06-21 12:43:18 -070069#include <linux/skbuff.h>
70#include <linux/netlink.h>
71#include <linux/init.h>
72#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040074#include <linux/export.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020075#include <net/net_namespace.h>
Robert Olsson19baf832005-06-21 12:43:18 -070076#include <net/ip.h>
77#include <net/protocol.h>
78#include <net/route.h>
79#include <net/tcp.h>
80#include <net/sock.h>
81#include <net/ip_fib.h>
82#include "fib_lookup.h"
83
Robert Olsson06ef9212006-03-20 21:35:01 -080084#define MAX_STAT_DEPTH 32
Robert Olsson19baf832005-06-21 12:43:18 -070085
Robert Olsson19baf832005-06-21 12:43:18 -070086#define KEYLENGTH (8*sizeof(t_key))
Robert Olsson19baf832005-06-21 12:43:18 -070087
Robert Olsson19baf832005-06-21 12:43:18 -070088typedef unsigned int t_key;
89
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080090#define IS_TNODE(n) ((n)->bits)
91#define IS_LEAF(n) (!(n)->bits)
Robert Olsson2373ce12005-08-25 13:01:29 -070092
Alexander Duycke9b44012014-12-31 10:56:12 -080093#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
Alexander Duyck9f9e6362014-12-31 10:55:54 -080094
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080095struct tnode {
96 t_key key;
97 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
98 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
99 struct tnode __rcu *parent;
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800100 struct rcu_head rcu;
Alexander Duyckadaf9812014-12-31 10:55:47 -0800101 union {
102 /* The fields in this struct are valid if bits > 0 (TNODE) */
103 struct {
104 unsigned int full_children; /* KEYLENGTH bits needed */
105 unsigned int empty_children; /* KEYLENGTH bits needed */
106 struct tnode __rcu *child[0];
107 };
108 /* This list pointer if valid if bits == 0 (LEAF) */
109 struct hlist_head list;
110 };
Robert Olsson19baf832005-06-21 12:43:18 -0700111};
112
113struct leaf_info {
114 struct hlist_node hlist;
115 int plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000116 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
Robert Olsson19baf832005-06-21 12:43:18 -0700117 struct list_head falh;
Eric Dumazet5c745012011-07-18 03:16:33 +0000118 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700119};
120
Robert Olsson19baf832005-06-21 12:43:18 -0700121#ifdef CONFIG_IP_FIB_TRIE_STATS
122struct trie_use_stats {
123 unsigned int gets;
124 unsigned int backtrack;
125 unsigned int semantic_match_passed;
126 unsigned int semantic_match_miss;
127 unsigned int null_node_hit;
Robert Olsson2f368952005-07-05 15:02:40 -0700128 unsigned int resize_node_skipped;
Robert Olsson19baf832005-06-21 12:43:18 -0700129};
130#endif
131
132struct trie_stat {
133 unsigned int totdepth;
134 unsigned int maxdepth;
135 unsigned int tnodes;
136 unsigned int leaves;
137 unsigned int nullpointers;
Stephen Hemminger93672292008-01-22 21:54:05 -0800138 unsigned int prefixes;
Robert Olsson06ef9212006-03-20 21:35:01 -0800139 unsigned int nodesizes[MAX_STAT_DEPTH];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700140};
Robert Olsson19baf832005-06-21 12:43:18 -0700141
142struct trie {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800143 struct tnode __rcu *trie;
Robert Olsson19baf832005-06-21 12:43:18 -0700144#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800145 struct trie_use_stats __percpu *stats;
Robert Olsson19baf832005-06-21 12:43:18 -0700146#endif
Robert Olsson19baf832005-06-21 12:43:18 -0700147};
148
Alexander Duyck98293e82014-12-31 10:56:18 -0800149static void tnode_put_child_reorg(struct tnode *tn, unsigned long i,
150 struct tnode *n, int wasfull);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800151static struct tnode *resize(struct trie *t, struct tnode *tn);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700152static struct tnode *inflate(struct trie *t, struct tnode *tn);
153static struct tnode *halve(struct trie *t, struct tnode *tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700154/* tnodes to free after resize(); protected by RTNL */
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800155static struct callback_head *tnode_free_head;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000156static size_t tnode_free_size;
157
158/*
159 * synchronize_rcu after call_rcu for that many pages; it should be especially
160 * useful before resizing the root node with PREEMPT_NONE configs; the value was
161 * obtained experimentally, aiming to avoid visible slowdown.
162 */
163static const int sync_pages = 128;
Robert Olsson19baf832005-06-21 12:43:18 -0700164
Christoph Lametere18b8902006-12-06 20:33:20 -0800165static struct kmem_cache *fn_alias_kmem __read_mostly;
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800166static struct kmem_cache *trie_leaf_kmem __read_mostly;
Robert Olsson19baf832005-06-21 12:43:18 -0700167
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800168/* caller must hold RTNL */
169#define node_parent(n) rtnl_dereference((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700170
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800171/* caller must hold RCU read lock or RTNL */
172#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700173
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800174/* wrapper for rcu_assign_pointer */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800175static inline void node_set_parent(struct tnode *n, struct tnode *tp)
Stephen Hemminger06801912007-08-10 15:22:13 -0700176{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800177 if (n)
178 rcu_assign_pointer(n->parent, tp);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800179}
180
181#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
182
183/* This provides us with the number of children in this node, in the case of a
184 * leaf this will return 0 meaning none of the children are accessible.
185 */
Alexander Duyck98293e82014-12-31 10:56:18 -0800186static inline unsigned long tnode_child_length(const struct tnode *tn)
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800187{
188 return (1ul << tn->bits) & ~(1ul);
Stephen Hemminger06801912007-08-10 15:22:13 -0700189}
Robert Olsson2373ce12005-08-25 13:01:29 -0700190
Alexander Duyck98293e82014-12-31 10:56:18 -0800191/* caller must hold RTNL */
192static inline struct tnode *tnode_get_child(const struct tnode *tn,
193 unsigned long i)
Robert Olsson19baf832005-06-21 12:43:18 -0700194{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800195 BUG_ON(i >= tnode_child_length(tn));
Robert Olsson19baf832005-06-21 12:43:18 -0700196
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700197 return rtnl_dereference(tn->child[i]);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800198}
199
Alexander Duyck98293e82014-12-31 10:56:18 -0800200/* caller must hold RCU read lock or RTNL */
201static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
202 unsigned long i)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800203{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800204 BUG_ON(i >= tnode_child_length(tn));
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800205
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700206 return rcu_dereference_rtnl(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700207}
208
Alexander Duycke9b44012014-12-31 10:56:12 -0800209/* To understand this stuff, an understanding of keys and all their bits is
210 * necessary. Every node in the trie has a key associated with it, but not
211 * all of the bits in that key are significant.
212 *
213 * Consider a node 'n' and its parent 'tp'.
214 *
215 * If n is a leaf, every bit in its key is significant. Its presence is
216 * necessitated by path compression, since during a tree traversal (when
217 * searching for a leaf - unless we are doing an insertion) we will completely
218 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
219 * a potentially successful search, that we have indeed been walking the
220 * correct key path.
221 *
222 * Note that we can never "miss" the correct key in the tree if present by
223 * following the wrong path. Path compression ensures that segments of the key
224 * that are the same for all keys with a given prefix are skipped, but the
225 * skipped part *is* identical for each node in the subtrie below the skipped
226 * bit! trie_insert() in this implementation takes care of that.
227 *
228 * if n is an internal node - a 'tnode' here, the various parts of its key
229 * have many different meanings.
230 *
231 * Example:
232 * _________________________________________________________________
233 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
234 * -----------------------------------------------------------------
235 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
236 *
237 * _________________________________________________________________
238 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
239 * -----------------------------------------------------------------
240 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
241 *
242 * tp->pos = 22
243 * tp->bits = 3
244 * n->pos = 13
245 * n->bits = 4
246 *
247 * First, let's just ignore the bits that come before the parent tp, that is
248 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
249 * point we do not use them for anything.
250 *
251 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
252 * index into the parent's child array. That is, they will be used to find
253 * 'n' among tp's children.
254 *
255 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
256 * for the node n.
257 *
258 * All the bits we have seen so far are significant to the node n. The rest
259 * of the bits are really not needed or indeed known in n->key.
260 *
261 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
262 * n's child array, and will of course be different for each child.
263 *
264 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
265 * at this point.
266 */
Robert Olsson19baf832005-06-21 12:43:18 -0700267
Denis V. Lunevf5026fa2007-12-13 09:47:57 -0800268static const int halve_threshold = 25;
269static const int inflate_threshold = 50;
Jarek Poplawski345aa032009-07-07 19:39:16 -0700270static const int halve_threshold_root = 15;
Jens Låås80b71b82009-08-28 23:57:15 -0700271static const int inflate_threshold_root = 30;
Robert Olsson2373ce12005-08-25 13:01:29 -0700272
273static void __alias_free_mem(struct rcu_head *head)
274{
275 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
276 kmem_cache_free(fn_alias_kmem, fa);
277}
278
279static inline void alias_free_mem_rcu(struct fib_alias *fa)
280{
281 call_rcu(&fa->rcu, __alias_free_mem);
282}
283
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800284#define TNODE_KMALLOC_MAX \
Alexander Duyckadaf9812014-12-31 10:55:47 -0800285 ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800286
287static void __node_free_rcu(struct rcu_head *head)
Robert Olsson2373ce12005-08-25 13:01:29 -0700288{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800289 struct tnode *n = container_of(head, struct tnode, rcu);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800290
291 if (IS_LEAF(n))
292 kmem_cache_free(trie_leaf_kmem, n);
293 else if (n->bits <= TNODE_KMALLOC_MAX)
294 kfree(n);
295 else
296 vfree(n);
Robert Olsson2373ce12005-08-25 13:01:29 -0700297}
298
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800299#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
Stephen Hemminger387a5482008-04-10 03:47:34 -0700300
Robert Olsson2373ce12005-08-25 13:01:29 -0700301static inline void free_leaf_info(struct leaf_info *leaf)
302{
Lai Jiangshanbceb0f42011-03-18 11:42:34 +0800303 kfree_rcu(leaf, rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700304}
305
Eric Dumazet8d965442008-01-13 22:31:44 -0800306static struct tnode *tnode_alloc(size_t size)
Robert Olsson2373ce12005-08-25 13:01:29 -0700307{
Robert Olsson2373ce12005-08-25 13:01:29 -0700308 if (size <= PAGE_SIZE)
Eric Dumazet8d965442008-01-13 22:31:44 -0800309 return kzalloc(size, GFP_KERNEL);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700310 else
Eric Dumazet7a1c8e52010-11-20 07:46:35 +0000311 return vzalloc(size);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700312}
Robert Olsson2373ce12005-08-25 13:01:29 -0700313
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700314static void tnode_free_safe(struct tnode *tn)
315{
316 BUG_ON(IS_LEAF(tn));
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800317 tn->rcu.next = tnode_free_head;
318 tnode_free_head = &tn->rcu;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700319}
320
321static void tnode_free_flush(void)
322{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800323 struct callback_head *head;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700324
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800325 while ((head = tnode_free_head)) {
326 struct tnode *tn = container_of(head, struct tnode, rcu);
327
328 tnode_free_head = head->next;
329 tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
330
331 node_free(tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700332 }
Jarek Poplawskic3059472009-07-14 08:33:08 +0000333
334 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
335 tnode_free_size = 0;
336 synchronize_rcu();
337 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700338}
339
Alexander Duyckadaf9812014-12-31 10:55:47 -0800340static struct tnode *leaf_new(t_key key)
Robert Olsson19baf832005-06-21 12:43:18 -0700341{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800342 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700343 if (l) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800344 l->parent = NULL;
345 /* set key and pos to reflect full key value
346 * any trailing zeros in the key should be ignored
347 * as the nodes are searched
348 */
349 l->key = key;
Alexander Duycke9b44012014-12-31 10:56:12 -0800350 l->pos = 0;
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800351 /* set bits to 0 indicating we are not a tnode */
352 l->bits = 0;
353
Robert Olsson19baf832005-06-21 12:43:18 -0700354 INIT_HLIST_HEAD(&l->list);
355 }
356 return l;
357}
358
359static struct leaf_info *leaf_info_new(int plen)
360{
361 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -0700362 if (li) {
363 li->plen = plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000364 li->mask_plen = ntohl(inet_make_mask(plen));
Robert Olsson2373ce12005-08-25 13:01:29 -0700365 INIT_LIST_HEAD(&li->falh);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700366 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700367 return li;
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700368}
369
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800370static struct tnode *tnode_new(t_key key, int pos, int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700371{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800372 size_t sz = offsetof(struct tnode, child[1 << bits]);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700373 struct tnode *tn = tnode_alloc(sz);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800374 unsigned int shift = pos + bits;
375
376 /* verify bits and pos their msb bits clear and values are valid */
377 BUG_ON(!bits || (shift > KEYLENGTH));
Robert Olsson19baf832005-06-21 12:43:18 -0700378
Olof Johansson91b9a272005-08-09 20:24:39 -0700379 if (tn) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800380 tn->parent = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700381 tn->pos = pos;
382 tn->bits = bits;
Alexander Duycke9b44012014-12-31 10:56:12 -0800383 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
Robert Olsson19baf832005-06-21 12:43:18 -0700384 tn->full_children = 0;
385 tn->empty_children = 1<<bits;
386 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700387
Eric Dumazeta034ee32010-09-09 23:32:28 +0000388 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
Alexander Duyckadaf9812014-12-31 10:55:47 -0800389 sizeof(struct tnode *) << bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700390 return tn;
391}
392
Alexander Duycke9b44012014-12-31 10:56:12 -0800393/* Check whether a tnode 'n' is "full", i.e. it is an internal node
Robert Olsson19baf832005-06-21 12:43:18 -0700394 * and no bits are skipped. See discussion in dyntree paper p. 6
395 */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800396static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700397{
Alexander Duycke9b44012014-12-31 10:56:12 -0800398 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
Robert Olsson19baf832005-06-21 12:43:18 -0700399}
400
Alexander Duyck98293e82014-12-31 10:56:18 -0800401static inline void put_child(struct tnode *tn, unsigned long i,
Alexander Duyckadaf9812014-12-31 10:55:47 -0800402 struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700403{
404 tnode_put_child_reorg(tn, i, n, -1);
405}
406
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700407 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700408 * Add a child at position i overwriting the old value.
409 * Update the value of full_children and empty_children.
410 */
411
Alexander Duyck98293e82014-12-31 10:56:18 -0800412static void tnode_put_child_reorg(struct tnode *tn, unsigned long i,
413 struct tnode *n, int wasfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700414{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800415 struct tnode *chi = rtnl_dereference(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700416 int isfull;
417
Alexander Duyck98293e82014-12-31 10:56:18 -0800418 BUG_ON(i >= tnode_child_length(tn));
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700419
Robert Olsson19baf832005-06-21 12:43:18 -0700420 /* update emptyChildren */
421 if (n == NULL && chi != NULL)
422 tn->empty_children++;
423 else if (n != NULL && chi == NULL)
424 tn->empty_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700425
Robert Olsson19baf832005-06-21 12:43:18 -0700426 /* update fullChildren */
Olof Johansson91b9a272005-08-09 20:24:39 -0700427 if (wasfull == -1)
Robert Olsson19baf832005-06-21 12:43:18 -0700428 wasfull = tnode_full(tn, chi);
429
430 isfull = tnode_full(tn, n);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700431 if (wasfull && !isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700432 tn->full_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700433 else if (!wasfull && isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700434 tn->full_children++;
Olof Johansson91b9a272005-08-09 20:24:39 -0700435
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800436 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700437
Eric Dumazetcf778b02012-01-12 04:41:32 +0000438 rcu_assign_pointer(tn->child[i], n);
Robert Olsson19baf832005-06-21 12:43:18 -0700439}
440
Alexander Duyck836a0122014-12-31 10:56:06 -0800441static void put_child_root(struct tnode *tp, struct trie *t,
442 t_key key, struct tnode *n)
443{
444 if (tp)
445 put_child(tp, get_index(key, tp), n);
446 else
447 rcu_assign_pointer(t->trie, n);
448}
449
Jens Låås80b71b82009-08-28 23:57:15 -0700450#define MAX_WORK 10
Alexander Duyckadaf9812014-12-31 10:55:47 -0800451static struct tnode *resize(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700452{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800453 struct tnode *old_tn, *n = NULL;
Robert Olssone6308be2005-10-04 13:01:58 -0700454 int inflate_threshold_use;
455 int halve_threshold_use;
Jens Låås80b71b82009-08-28 23:57:15 -0700456 int max_work;
Robert Olsson19baf832005-06-21 12:43:18 -0700457
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900458 if (!tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700459 return NULL;
460
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700461 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
462 tn, inflate_threshold, halve_threshold);
Robert Olsson19baf832005-06-21 12:43:18 -0700463
464 /* No children */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800465 if (tn->empty_children > (tnode_child_length(tn) - 1))
466 goto no_children;
467
Robert Olsson19baf832005-06-21 12:43:18 -0700468 /* One child */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800469 if (tn->empty_children == (tnode_child_length(tn) - 1))
Jens Låås80b71b82009-08-28 23:57:15 -0700470 goto one_child;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700471 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700472 * Double as long as the resulting node has a number of
473 * nonempty nodes that are above the threshold.
474 */
475
476 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700477 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
478 * the Helsinki University of Technology and Matti Tikkanen of Nokia
Robert Olsson19baf832005-06-21 12:43:18 -0700479 * Telecommunications, page 6:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700480 * "A node is doubled if the ratio of non-empty children to all
Robert Olsson19baf832005-06-21 12:43:18 -0700481 * children in the *doubled* node is at least 'high'."
482 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700483 * 'high' in this instance is the variable 'inflate_threshold'. It
484 * is expressed as a percentage, so we multiply it with
485 * tnode_child_length() and instead of multiplying by 2 (since the
486 * child array will be doubled by inflate()) and multiplying
487 * the left-hand side by 100 (to handle the percentage thing) we
Robert Olsson19baf832005-06-21 12:43:18 -0700488 * multiply the left-hand side by 50.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700489 *
490 * The left-hand side may look a bit weird: tnode_child_length(tn)
491 * - tn->empty_children is of course the number of non-null children
492 * in the current node. tn->full_children is the number of "full"
Robert Olsson19baf832005-06-21 12:43:18 -0700493 * children, that is non-null tnodes with a skip value of 0.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700494 * All of those will be doubled in the resulting inflated tnode, so
Robert Olsson19baf832005-06-21 12:43:18 -0700495 * we just count them one extra time here.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700496 *
Robert Olsson19baf832005-06-21 12:43:18 -0700497 * A clearer way to write this would be:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700498 *
Robert Olsson19baf832005-06-21 12:43:18 -0700499 * to_be_doubled = tn->full_children;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700500 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
Robert Olsson19baf832005-06-21 12:43:18 -0700501 * tn->full_children;
502 *
503 * new_child_length = tnode_child_length(tn) * 2;
504 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700505 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
Robert Olsson19baf832005-06-21 12:43:18 -0700506 * new_child_length;
507 * if (new_fill_factor >= inflate_threshold)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700508 *
509 * ...and so on, tho it would mess up the while () loop.
510 *
Robert Olsson19baf832005-06-21 12:43:18 -0700511 * anyway,
512 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
513 * inflate_threshold
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700514 *
Robert Olsson19baf832005-06-21 12:43:18 -0700515 * avoid a division:
516 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
517 * inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700518 *
Robert Olsson19baf832005-06-21 12:43:18 -0700519 * expand not_to_be_doubled and to_be_doubled, and shorten:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700520 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700521 * tn->full_children) >= inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700522 *
Robert Olsson19baf832005-06-21 12:43:18 -0700523 * expand new_child_length:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700524 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700525 * tn->full_children) >=
Robert Olsson19baf832005-06-21 12:43:18 -0700526 * inflate_threshold * tnode_child_length(tn) * 2
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700527 *
Robert Olsson19baf832005-06-21 12:43:18 -0700528 * shorten again:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700529 * 50 * (tn->full_children + tnode_child_length(tn) -
Olof Johansson91b9a272005-08-09 20:24:39 -0700530 * tn->empty_children) >= inflate_threshold *
Robert Olsson19baf832005-06-21 12:43:18 -0700531 * tnode_child_length(tn)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700532 *
Robert Olsson19baf832005-06-21 12:43:18 -0700533 */
534
Robert Olssone6308be2005-10-04 13:01:58 -0700535 /* Keep root node larger */
536
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800537 if (!node_parent(tn)) {
Jens Låås80b71b82009-08-28 23:57:15 -0700538 inflate_threshold_use = inflate_threshold_root;
539 halve_threshold_use = halve_threshold_root;
Eric Dumazeta034ee32010-09-09 23:32:28 +0000540 } else {
Robert Olssone6308be2005-10-04 13:01:58 -0700541 inflate_threshold_use = inflate_threshold;
Jens Låås80b71b82009-08-28 23:57:15 -0700542 halve_threshold_use = halve_threshold;
543 }
Robert Olssone6308be2005-10-04 13:01:58 -0700544
Jens Låås80b71b82009-08-28 23:57:15 -0700545 max_work = MAX_WORK;
546 while ((tn->full_children > 0 && max_work-- &&
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800547 50 * (tn->full_children + tnode_child_length(tn)
548 - tn->empty_children)
549 >= inflate_threshold_use * tnode_child_length(tn))) {
Robert Olsson19baf832005-06-21 12:43:18 -0700550
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700551 old_tn = tn;
552 tn = inflate(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800553
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700554 if (IS_ERR(tn)) {
555 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700556#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800557 this_cpu_inc(t->stats->resize_node_skipped);
Robert Olsson2f368952005-07-05 15:02:40 -0700558#endif
559 break;
560 }
Robert Olsson19baf832005-06-21 12:43:18 -0700561 }
562
Jens Låås80b71b82009-08-28 23:57:15 -0700563 /* Return if at least one inflate is run */
Eric Dumazeta034ee32010-09-09 23:32:28 +0000564 if (max_work != MAX_WORK)
Alexander Duyckadaf9812014-12-31 10:55:47 -0800565 return tn;
Jens Låås80b71b82009-08-28 23:57:15 -0700566
Robert Olsson19baf832005-06-21 12:43:18 -0700567 /*
568 * Halve as long as the number of empty children in this
569 * node is above threshold.
570 */
Robert Olsson2f368952005-07-05 15:02:40 -0700571
Jens Låås80b71b82009-08-28 23:57:15 -0700572 max_work = MAX_WORK;
573 while (tn->bits > 1 && max_work-- &&
Robert Olsson19baf832005-06-21 12:43:18 -0700574 100 * (tnode_child_length(tn) - tn->empty_children) <
Robert Olssone6308be2005-10-04 13:01:58 -0700575 halve_threshold_use * tnode_child_length(tn)) {
Robert Olsson19baf832005-06-21 12:43:18 -0700576
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700577 old_tn = tn;
578 tn = halve(t, tn);
579 if (IS_ERR(tn)) {
580 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700581#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800582 this_cpu_inc(t->stats->resize_node_skipped);
Robert Olsson2f368952005-07-05 15:02:40 -0700583#endif
584 break;
585 }
586 }
587
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700588
Robert Olsson19baf832005-06-21 12:43:18 -0700589 /* Only one child remains */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800590 if (tn->empty_children == (tnode_child_length(tn) - 1)) {
591 unsigned long i;
Jens Låås80b71b82009-08-28 23:57:15 -0700592one_child:
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800593 for (i = tnode_child_length(tn); !n && i;)
594 n = tnode_get_child(tn, --i);
595no_children:
596 /* compress one level */
597 node_set_parent(n, NULL);
598 tnode_free_safe(tn);
599 return n;
Jens Låås80b71b82009-08-28 23:57:15 -0700600 }
Alexander Duyckadaf9812014-12-31 10:55:47 -0800601 return tn;
Robert Olsson19baf832005-06-21 12:43:18 -0700602}
603
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700604
605static void tnode_clean_free(struct tnode *tn)
606{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800607 struct tnode *tofree;
Alexander Duyck98293e82014-12-31 10:56:18 -0800608 unsigned long i;
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700609
610 for (i = 0; i < tnode_child_length(tn); i++) {
Alexander Duyck98293e82014-12-31 10:56:18 -0800611 tofree = tnode_get_child(tn, i);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700612 if (tofree)
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800613 node_free(tofree);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700614 }
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800615 node_free(tn);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700616}
617
Alexander Duyckadaf9812014-12-31 10:55:47 -0800618static struct tnode *inflate(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700619{
Alexander Duyck98293e82014-12-31 10:56:18 -0800620 unsigned long olen = tnode_child_length(oldtnode);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800621 struct tnode *tn;
Alexander Duyck98293e82014-12-31 10:56:18 -0800622 unsigned long i;
Alexander Duycke9b44012014-12-31 10:56:12 -0800623 t_key m;
Robert Olsson19baf832005-06-21 12:43:18 -0700624
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700625 pr_debug("In inflate\n");
Robert Olsson19baf832005-06-21 12:43:18 -0700626
Alexander Duycke9b44012014-12-31 10:56:12 -0800627 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700628
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700629 if (!tn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700630 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700631
632 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700633 * Preallocate and store tnodes before the actual work so we
634 * don't get into an inconsistent state if memory allocation
635 * fails. In case of failure we return the oldnode and inflate
Robert Olsson2f368952005-07-05 15:02:40 -0700636 * of tnode is ignored.
637 */
Alexander Duycke9b44012014-12-31 10:56:12 -0800638 for (i = 0, m = 1u << tn->pos; i < olen; i++) {
639 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700640
Alexander Duycke9b44012014-12-31 10:56:12 -0800641 if (tnode_full(oldtnode, inode) && (inode->bits > 1)) {
Robert Olsson2f368952005-07-05 15:02:40 -0700642 struct tnode *left, *right;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700643
Alexander Duycke9b44012014-12-31 10:56:12 -0800644 left = tnode_new(inode->key & ~m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700645 inode->bits - 1);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700646 if (!left)
647 goto nomem;
Olof Johansson91b9a272005-08-09 20:24:39 -0700648
Alexander Duycke9b44012014-12-31 10:56:12 -0800649 right = tnode_new(inode->key | m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700650 inode->bits - 1);
651
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900652 if (!right) {
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800653 node_free(left);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700654 goto nomem;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900655 }
Robert Olsson2f368952005-07-05 15:02:40 -0700656
Alexander Duyckadaf9812014-12-31 10:55:47 -0800657 put_child(tn, 2*i, left);
658 put_child(tn, 2*i+1, right);
Robert Olsson2f368952005-07-05 15:02:40 -0700659 }
660 }
661
Olof Johansson91b9a272005-08-09 20:24:39 -0700662 for (i = 0; i < olen; i++) {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800663 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700664 struct tnode *left, *right;
Alexander Duyck98293e82014-12-31 10:56:18 -0800665 unsigned long size, j;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700666
Robert Olsson19baf832005-06-21 12:43:18 -0700667 /* An empty child */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800668 if (inode == NULL)
Robert Olsson19baf832005-06-21 12:43:18 -0700669 continue;
670
671 /* A leaf or an internal node with skipped bits */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800672 if (!tnode_full(oldtnode, inode)) {
Alexander Duycke9b44012014-12-31 10:56:12 -0800673 put_child(tn, get_index(inode->key, tn), inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700674 continue;
675 }
676
677 /* An internal node with two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700678 if (inode->bits == 1) {
Lin Ming61648d92012-07-29 02:00:03 +0000679 put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
680 put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
Robert Olsson19baf832005-06-21 12:43:18 -0700681
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700682 tnode_free_safe(inode);
Olof Johansson91b9a272005-08-09 20:24:39 -0700683 continue;
Robert Olsson19baf832005-06-21 12:43:18 -0700684 }
685
Olof Johansson91b9a272005-08-09 20:24:39 -0700686 /* An internal node with more than two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700687
Olof Johansson91b9a272005-08-09 20:24:39 -0700688 /* We will replace this node 'inode' with two new
689 * ones, 'left' and 'right', each with half of the
690 * original children. The two new nodes will have
691 * a position one bit further down the key and this
692 * means that the "significant" part of their keys
693 * (see the discussion near the top of this file)
694 * will differ by one bit, which will be "0" in
695 * left's key and "1" in right's key. Since we are
696 * moving the key position by one step, the bit that
697 * we are moving away from - the bit at position
698 * (inode->pos) - is the one that will differ between
699 * left and right. So... we synthesize that bit in the
700 * two new keys.
701 * The mask 'm' below will be a single "one" bit at
702 * the position (inode->pos)
703 */
Robert Olsson19baf832005-06-21 12:43:18 -0700704
Olof Johansson91b9a272005-08-09 20:24:39 -0700705 /* Use the old key, but set the new significant
706 * bit to zero.
707 */
Robert Olsson19baf832005-06-21 12:43:18 -0700708
Alexander Duyckadaf9812014-12-31 10:55:47 -0800709 left = tnode_get_child(tn, 2*i);
Lin Ming61648d92012-07-29 02:00:03 +0000710 put_child(tn, 2*i, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -0700711
Olof Johansson91b9a272005-08-09 20:24:39 -0700712 BUG_ON(!left);
Robert Olsson2f368952005-07-05 15:02:40 -0700713
Alexander Duyckadaf9812014-12-31 10:55:47 -0800714 right = tnode_get_child(tn, 2*i+1);
Lin Ming61648d92012-07-29 02:00:03 +0000715 put_child(tn, 2*i+1, NULL);
Robert Olsson2f368952005-07-05 15:02:40 -0700716
Olof Johansson91b9a272005-08-09 20:24:39 -0700717 BUG_ON(!right);
Robert Olsson2f368952005-07-05 15:02:40 -0700718
Olof Johansson91b9a272005-08-09 20:24:39 -0700719 size = tnode_child_length(left);
720 for (j = 0; j < size; j++) {
Lin Ming61648d92012-07-29 02:00:03 +0000721 put_child(left, j, rtnl_dereference(inode->child[j]));
722 put_child(right, j, rtnl_dereference(inode->child[j + size]));
Robert Olsson19baf832005-06-21 12:43:18 -0700723 }
Lin Ming61648d92012-07-29 02:00:03 +0000724 put_child(tn, 2*i, resize(t, left));
725 put_child(tn, 2*i+1, resize(t, right));
Olof Johansson91b9a272005-08-09 20:24:39 -0700726
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700727 tnode_free_safe(inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700728 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700729 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700730 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700731nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700732 tnode_clean_free(tn);
733 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700734}
735
Alexander Duyckadaf9812014-12-31 10:55:47 -0800736static struct tnode *halve(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700737{
Alexander Duyck98293e82014-12-31 10:56:18 -0800738 unsigned long olen = tnode_child_length(oldtnode);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800739 struct tnode *tn, *left, *right;
Robert Olsson19baf832005-06-21 12:43:18 -0700740 int i;
Robert Olsson19baf832005-06-21 12:43:18 -0700741
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700742 pr_debug("In halve\n");
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700743
Alexander Duycke9b44012014-12-31 10:56:12 -0800744 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700745
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700746 if (!tn)
747 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700748
749 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700750 * Preallocate and store tnodes before the actual work so we
751 * don't get into an inconsistent state if memory allocation
752 * fails. In case of failure we return the oldnode and halve
Robert Olsson2f368952005-07-05 15:02:40 -0700753 * of tnode is ignored.
754 */
755
Olof Johansson91b9a272005-08-09 20:24:39 -0700756 for (i = 0; i < olen; i += 2) {
Robert Olsson2f368952005-07-05 15:02:40 -0700757 left = tnode_get_child(oldtnode, i);
758 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700759
Robert Olsson2f368952005-07-05 15:02:40 -0700760 /* Two nonempty children */
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700761 if (left && right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700762 struct tnode *newn;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700763
Alexander Duycke9b44012014-12-31 10:56:12 -0800764 newn = tnode_new(left->key, oldtnode->pos, 1);
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700765
766 if (!newn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700767 goto nomem;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700768
Alexander Duyckadaf9812014-12-31 10:55:47 -0800769 put_child(tn, i/2, newn);
Robert Olsson2f368952005-07-05 15:02:40 -0700770 }
Robert Olsson2f368952005-07-05 15:02:40 -0700771
Robert Olsson2f368952005-07-05 15:02:40 -0700772 }
Robert Olsson19baf832005-06-21 12:43:18 -0700773
Olof Johansson91b9a272005-08-09 20:24:39 -0700774 for (i = 0; i < olen; i += 2) {
775 struct tnode *newBinNode;
776
Robert Olsson19baf832005-06-21 12:43:18 -0700777 left = tnode_get_child(oldtnode, i);
778 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700779
Robert Olsson19baf832005-06-21 12:43:18 -0700780 /* At least one of the children is empty */
781 if (left == NULL) {
782 if (right == NULL) /* Both are empty */
783 continue;
Lin Ming61648d92012-07-29 02:00:03 +0000784 put_child(tn, i/2, right);
Olof Johansson91b9a272005-08-09 20:24:39 -0700785 continue;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700786 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700787
788 if (right == NULL) {
Lin Ming61648d92012-07-29 02:00:03 +0000789 put_child(tn, i/2, left);
Olof Johansson91b9a272005-08-09 20:24:39 -0700790 continue;
791 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700792
Robert Olsson19baf832005-06-21 12:43:18 -0700793 /* Two nonempty children */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800794 newBinNode = tnode_get_child(tn, i/2);
Lin Ming61648d92012-07-29 02:00:03 +0000795 put_child(tn, i/2, NULL);
796 put_child(newBinNode, 0, left);
797 put_child(newBinNode, 1, right);
798 put_child(tn, i/2, resize(t, newBinNode));
Robert Olsson19baf832005-06-21 12:43:18 -0700799 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700800 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700801 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700802nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700803 tnode_clean_free(tn);
804 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700805}
806
Robert Olsson772cb712005-09-19 15:31:18 -0700807/* readside must use rcu_read_lock currently dump routines
Robert Olsson2373ce12005-08-25 13:01:29 -0700808 via get_fa_head and dump */
809
Alexander Duyckadaf9812014-12-31 10:55:47 -0800810static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700811{
Robert Olsson772cb712005-09-19 15:31:18 -0700812 struct hlist_head *head = &l->list;
Robert Olsson19baf832005-06-21 12:43:18 -0700813 struct leaf_info *li;
814
Sasha Levinb67bfe02013-02-27 17:06:00 -0800815 hlist_for_each_entry_rcu(li, head, hlist)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700816 if (li->plen == plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700817 return li;
Olof Johansson91b9a272005-08-09 20:24:39 -0700818
Robert Olsson19baf832005-06-21 12:43:18 -0700819 return NULL;
820}
821
Alexander Duyckadaf9812014-12-31 10:55:47 -0800822static inline struct list_head *get_fa_head(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700823{
Robert Olsson772cb712005-09-19 15:31:18 -0700824 struct leaf_info *li = find_leaf_info(l, plen);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700825
Olof Johansson91b9a272005-08-09 20:24:39 -0700826 if (!li)
827 return NULL;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700828
Olof Johansson91b9a272005-08-09 20:24:39 -0700829 return &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -0700830}
831
832static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
833{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900834 struct leaf_info *li = NULL, *last = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700835
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900836 if (hlist_empty(head)) {
837 hlist_add_head_rcu(&new->hlist, head);
838 } else {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800839 hlist_for_each_entry(li, head, hlist) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900840 if (new->plen > li->plen)
841 break;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700842
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900843 last = li;
844 }
845 if (last)
Ken Helias1d023282014-08-06 16:09:16 -0700846 hlist_add_behind_rcu(&new->hlist, &last->hlist);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900847 else
848 hlist_add_before_rcu(&new->hlist, &li->hlist);
849 }
Robert Olsson19baf832005-06-21 12:43:18 -0700850}
851
Robert Olsson2373ce12005-08-25 13:01:29 -0700852/* rcu_read_lock needs to be hold by caller from readside */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800853static struct tnode *fib_find_node(struct trie *t, u32 key)
Robert Olsson19baf832005-06-21 12:43:18 -0700854{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800855 struct tnode *n = rcu_dereference_rtnl(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700856
Alexander Duyck939afb02014-12-31 10:56:00 -0800857 while (n) {
858 unsigned long index = get_index(key, n);
859
860 /* This bit of code is a bit tricky but it combines multiple
861 * checks into a single check. The prefix consists of the
862 * prefix plus zeros for the bits in the cindex. The index
863 * is the difference between the key and this value. From
864 * this we can actually derive several pieces of data.
865 * if !(index >> bits)
866 * we know the value is cindex
867 * else
868 * we have a mismatch in skip bits and failed
869 */
870 if (index >> n->bits)
871 return NULL;
872
873 /* we have found a leaf. Prefixes have already been compared */
874 if (IS_LEAF(n))
Robert Olsson19baf832005-06-21 12:43:18 -0700875 break;
Alexander Duyck939afb02014-12-31 10:56:00 -0800876
877 n = rcu_dereference_rtnl(n->child[index]);
Robert Olsson19baf832005-06-21 12:43:18 -0700878 }
Robert Olsson19baf832005-06-21 12:43:18 -0700879
Alexander Duyck939afb02014-12-31 10:56:00 -0800880 return n;
Robert Olsson19baf832005-06-21 12:43:18 -0700881}
882
Jarek Poplawski7b855762009-06-18 00:28:51 -0700883static void trie_rebalance(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700884{
Robert Olsson19baf832005-06-21 12:43:18 -0700885 int wasfull;
Robert Olsson3ed18d72009-05-21 15:20:59 -0700886 t_key cindex, key;
Stephen Hemminger06801912007-08-10 15:22:13 -0700887 struct tnode *tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700888
Robert Olsson3ed18d72009-05-21 15:20:59 -0700889 key = tn->key;
890
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800891 while (tn != NULL && (tp = node_parent(tn)) != NULL) {
Alexander Duycke9b44012014-12-31 10:56:12 -0800892 cindex = get_index(key, tp);
Robert Olsson19baf832005-06-21 12:43:18 -0700893 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
Alexander Duyckadaf9812014-12-31 10:55:47 -0800894 tn = resize(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800895
Alexander Duyckadaf9812014-12-31 10:55:47 -0800896 tnode_put_child_reorg(tp, cindex, tn, wasfull);
Olof Johansson91b9a272005-08-09 20:24:39 -0700897
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800898 tp = node_parent(tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -0700899 if (!tp)
Alexander Duyckadaf9812014-12-31 10:55:47 -0800900 rcu_assign_pointer(t->trie, tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -0700901
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700902 tnode_free_flush();
Stephen Hemminger06801912007-08-10 15:22:13 -0700903 if (!tp)
Robert Olsson19baf832005-06-21 12:43:18 -0700904 break;
Stephen Hemminger06801912007-08-10 15:22:13 -0700905 tn = tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700906 }
Stephen Hemminger06801912007-08-10 15:22:13 -0700907
Robert Olsson19baf832005-06-21 12:43:18 -0700908 /* Handle last (top) tnode */
Jarek Poplawski7b855762009-06-18 00:28:51 -0700909 if (IS_TNODE(tn))
Alexander Duyckadaf9812014-12-31 10:55:47 -0800910 tn = resize(t, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700911
Alexander Duyckadaf9812014-12-31 10:55:47 -0800912 rcu_assign_pointer(t->trie, tn);
Jarek Poplawski7b855762009-06-18 00:28:51 -0700913 tnode_free_flush();
Robert Olsson19baf832005-06-21 12:43:18 -0700914}
915
Robert Olsson2373ce12005-08-25 13:01:29 -0700916/* only used from updater-side */
917
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800918static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700919{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700920 struct list_head *fa_head = NULL;
Alexander Duyck836a0122014-12-31 10:56:06 -0800921 struct tnode *l, *n, *tp = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700922 struct leaf_info *li;
Robert Olsson19baf832005-06-21 12:43:18 -0700923
Alexander Duyck836a0122014-12-31 10:56:06 -0800924 li = leaf_info_new(plen);
925 if (!li)
926 return NULL;
927 fa_head = &li->falh;
928
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700929 n = rtnl_dereference(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700930
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700931 /* If we point to NULL, stop. Either the tree is empty and we should
932 * just put a new leaf in if, or we have reached an empty child slot,
Robert Olsson19baf832005-06-21 12:43:18 -0700933 * and we should just put our new leaf in that.
Robert Olsson19baf832005-06-21 12:43:18 -0700934 *
Alexander Duyck836a0122014-12-31 10:56:06 -0800935 * If we hit a node with a key that does't match then we should stop
936 * and create a new tnode to replace that node and insert ourselves
937 * and the other node into the new tnode.
Robert Olsson19baf832005-06-21 12:43:18 -0700938 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800939 while (n) {
940 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700941
Alexander Duyck836a0122014-12-31 10:56:06 -0800942 /* This bit of code is a bit tricky but it combines multiple
943 * checks into a single check. The prefix consists of the
944 * prefix plus zeros for the "bits" in the prefix. The index
945 * is the difference between the key and this value. From
946 * this we can actually derive several pieces of data.
947 * if !(index >> bits)
948 * we know the value is child index
949 * else
950 * we have a mismatch in skip bits and failed
Robert Olsson19baf832005-06-21 12:43:18 -0700951 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800952 if (index >> n->bits)
953 break;
Robert Olsson19baf832005-06-21 12:43:18 -0700954
Alexander Duyck836a0122014-12-31 10:56:06 -0800955 /* we have found a leaf. Prefixes have already been compared */
956 if (IS_LEAF(n)) {
957 /* Case 1: n is a leaf, and prefixes match*/
958 insert_leaf_info(&n->list, li);
959 return fa_head;
Robert Olsson19baf832005-06-21 12:43:18 -0700960 }
Robert Olsson19baf832005-06-21 12:43:18 -0700961
Alexander Duyck836a0122014-12-31 10:56:06 -0800962 tp = n;
963 n = rcu_dereference_rtnl(n->child[index]);
964 }
965
966 l = leaf_new(key);
967 if (!l) {
968 free_leaf_info(li);
969 return NULL;
970 }
971
972 insert_leaf_info(&l->list, li);
973
974 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
975 *
976 * Add a new tnode here
977 * first tnode need some special handling
978 * leaves us in position for handling as case 3
979 */
980 if (n) {
981 struct tnode *tn;
Alexander Duyck836a0122014-12-31 10:56:06 -0800982
Alexander Duycke9b44012014-12-31 10:56:12 -0800983 tn = tnode_new(key, __fls(key ^ n->key), 1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700984 if (!tn) {
Robert Olssonf835e472005-06-28 15:00:39 -0700985 free_leaf_info(li);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800986 node_free(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800987 return NULL;
Olof Johansson91b9a272005-08-09 20:24:39 -0700988 }
989
Alexander Duyck836a0122014-12-31 10:56:06 -0800990 /* initialize routes out of node */
991 NODE_INIT_PARENT(tn, tp);
992 put_child(tn, get_index(key, tn) ^ 1, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700993
Alexander Duyck836a0122014-12-31 10:56:06 -0800994 /* start adding routes into the node */
995 put_child_root(tp, t, key, tn);
996 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700997
Alexander Duyck836a0122014-12-31 10:56:06 -0800998 /* parent now has a NULL spot where the leaf can go */
Alexander Duycke962f302014-12-10 21:49:22 -0800999 tp = tn;
Robert Olsson19baf832005-06-21 12:43:18 -07001000 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001001
Alexander Duyck836a0122014-12-31 10:56:06 -08001002 /* Case 3: n is NULL, and will just insert a new leaf */
1003 if (tp) {
1004 NODE_INIT_PARENT(l, tp);
1005 put_child(tp, get_index(key, tp), l);
1006 trie_rebalance(t, tp);
1007 } else {
1008 rcu_assign_pointer(t->trie, l);
1009 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001010
Robert Olsson19baf832005-06-21 12:43:18 -07001011 return fa_head;
1012}
1013
Robert Olssond562f1f2007-03-26 14:22:22 -07001014/*
1015 * Caller must hold RTNL.
1016 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001017int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001018{
1019 struct trie *t = (struct trie *) tb->tb_data;
1020 struct fib_alias *fa, *new_fa;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001021 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001022 struct fib_info *fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001023 int plen = cfg->fc_dst_len;
1024 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001025 u32 key, mask;
1026 int err;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001027 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001028
1029 if (plen > 32)
1030 return -EINVAL;
1031
Thomas Graf4e902c52006-08-17 18:14:52 -07001032 key = ntohl(cfg->fc_dst);
Robert Olsson19baf832005-06-21 12:43:18 -07001033
Patrick McHardy2dfe55b2006-08-10 23:08:33 -07001034 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001035
Olof Johansson91b9a272005-08-09 20:24:39 -07001036 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001037
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001038 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001039 return -EINVAL;
1040
1041 key = key & mask;
1042
Thomas Graf4e902c52006-08-17 18:14:52 -07001043 fi = fib_create_info(cfg);
1044 if (IS_ERR(fi)) {
1045 err = PTR_ERR(fi);
Robert Olsson19baf832005-06-21 12:43:18 -07001046 goto err;
Thomas Graf4e902c52006-08-17 18:14:52 -07001047 }
Robert Olsson19baf832005-06-21 12:43:18 -07001048
1049 l = fib_find_node(t, key);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001050 fa = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001051
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001052 if (l) {
Robert Olsson19baf832005-06-21 12:43:18 -07001053 fa_head = get_fa_head(l, plen);
1054 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1055 }
1056
1057 /* Now fa, if non-NULL, points to the first fib alias
1058 * with the same keys [prefix,tos,priority], if such key already
1059 * exists or to the node before which we will insert new one.
1060 *
1061 * If fa is NULL, we will need to allocate a new one and
1062 * insert to the head of f.
1063 *
1064 * If f is NULL, no fib node matched the destination key
1065 * and we need to allocate a new one of those as well.
1066 */
1067
Julian Anastasov936f6f82008-01-28 21:18:06 -08001068 if (fa && fa->fa_tos == tos &&
1069 fa->fa_info->fib_priority == fi->fib_priority) {
1070 struct fib_alias *fa_first, *fa_match;
Robert Olsson19baf832005-06-21 12:43:18 -07001071
1072 err = -EEXIST;
Thomas Graf4e902c52006-08-17 18:14:52 -07001073 if (cfg->fc_nlflags & NLM_F_EXCL)
Robert Olsson19baf832005-06-21 12:43:18 -07001074 goto out;
1075
Julian Anastasov936f6f82008-01-28 21:18:06 -08001076 /* We have 2 goals:
1077 * 1. Find exact match for type, scope, fib_info to avoid
1078 * duplicate routes
1079 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1080 */
1081 fa_match = NULL;
1082 fa_first = fa;
1083 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1084 list_for_each_entry_continue(fa, fa_head, fa_list) {
1085 if (fa->fa_tos != tos)
1086 break;
1087 if (fa->fa_info->fib_priority != fi->fib_priority)
1088 break;
1089 if (fa->fa_type == cfg->fc_type &&
Julian Anastasov936f6f82008-01-28 21:18:06 -08001090 fa->fa_info == fi) {
1091 fa_match = fa;
1092 break;
1093 }
1094 }
1095
Thomas Graf4e902c52006-08-17 18:14:52 -07001096 if (cfg->fc_nlflags & NLM_F_REPLACE) {
Robert Olsson19baf832005-06-21 12:43:18 -07001097 struct fib_info *fi_drop;
1098 u8 state;
1099
Julian Anastasov936f6f82008-01-28 21:18:06 -08001100 fa = fa_first;
1101 if (fa_match) {
1102 if (fa == fa_match)
1103 err = 0;
Joonwoo Park67250332008-01-18 03:45:18 -08001104 goto out;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001105 }
Robert Olsson2373ce12005-08-25 13:01:29 -07001106 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001107 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -07001108 if (new_fa == NULL)
1109 goto out;
Robert Olsson19baf832005-06-21 12:43:18 -07001110
1111 fi_drop = fa->fa_info;
Robert Olsson2373ce12005-08-25 13:01:29 -07001112 new_fa->fa_tos = fa->fa_tos;
1113 new_fa->fa_info = fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001114 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001115 state = fa->fa_state;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001116 new_fa->fa_state = state & ~FA_S_ACCESSED;
Robert Olsson19baf832005-06-21 12:43:18 -07001117
Robert Olsson2373ce12005-08-25 13:01:29 -07001118 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1119 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001120
1121 fib_release_info(fi_drop);
1122 if (state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001123 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Milan Kocianb8f55832007-05-23 14:55:06 -07001124 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1125 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
Robert Olsson19baf832005-06-21 12:43:18 -07001126
Olof Johansson91b9a272005-08-09 20:24:39 -07001127 goto succeeded;
Robert Olsson19baf832005-06-21 12:43:18 -07001128 }
1129 /* Error if we find a perfect match which
1130 * uses the same scope, type, and nexthop
1131 * information.
1132 */
Julian Anastasov936f6f82008-01-28 21:18:06 -08001133 if (fa_match)
1134 goto out;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001135
Thomas Graf4e902c52006-08-17 18:14:52 -07001136 if (!(cfg->fc_nlflags & NLM_F_APPEND))
Julian Anastasov936f6f82008-01-28 21:18:06 -08001137 fa = fa_first;
Robert Olsson19baf832005-06-21 12:43:18 -07001138 }
1139 err = -ENOENT;
Thomas Graf4e902c52006-08-17 18:14:52 -07001140 if (!(cfg->fc_nlflags & NLM_F_CREATE))
Robert Olsson19baf832005-06-21 12:43:18 -07001141 goto out;
1142
1143 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001144 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson19baf832005-06-21 12:43:18 -07001145 if (new_fa == NULL)
1146 goto out;
1147
1148 new_fa->fa_info = fi;
1149 new_fa->fa_tos = tos;
Thomas Graf4e902c52006-08-17 18:14:52 -07001150 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001151 new_fa->fa_state = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001152 /*
1153 * Insert new entry to the list.
1154 */
1155
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001156 if (!fa_head) {
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001157 fa_head = fib_insert_node(t, key, plen);
1158 if (unlikely(!fa_head)) {
1159 err = -ENOMEM;
Robert Olssonf835e472005-06-28 15:00:39 -07001160 goto out_free_new_fa;
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001161 }
Robert Olssonf835e472005-06-28 15:00:39 -07001162 }
Robert Olsson19baf832005-06-21 12:43:18 -07001163
David S. Miller21d8c492011-04-14 14:49:37 -07001164 if (!plen)
1165 tb->tb_num_default++;
1166
Robert Olsson2373ce12005-08-25 13:01:29 -07001167 list_add_tail_rcu(&new_fa->fa_list,
1168 (fa ? &fa->fa_list : fa_head));
Robert Olsson19baf832005-06-21 12:43:18 -07001169
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001170 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Thomas Graf4e902c52006-08-17 18:14:52 -07001171 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001172 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001173succeeded:
1174 return 0;
Robert Olssonf835e472005-06-28 15:00:39 -07001175
1176out_free_new_fa:
1177 kmem_cache_free(fn_alias_kmem, new_fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001178out:
1179 fib_release_info(fi);
Olof Johansson91b9a272005-08-09 20:24:39 -07001180err:
Robert Olsson19baf832005-06-21 12:43:18 -07001181 return err;
1182}
1183
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001184static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1185{
1186 t_key prefix = n->key;
1187
1188 return (key ^ prefix) & (prefix | -prefix);
1189}
1190
Alexander Duyck345e9b52014-12-31 10:56:24 -08001191/* should be called with rcu_read_lock */
David S. Miller22bd5b92011-03-11 19:54:08 -05001192int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001193 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001194{
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001195 struct trie *t = (struct trie *)tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001196#ifdef CONFIG_IP_FIB_TRIE_STATS
1197 struct trie_use_stats __percpu *stats = t->stats;
1198#endif
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001199 const t_key key = ntohl(flp->daddr);
1200 struct tnode *n, *pn;
Alexander Duyck345e9b52014-12-31 10:56:24 -08001201 struct leaf_info *li;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001202 t_key cindex;
Robert Olsson19baf832005-06-21 12:43:18 -07001203
Robert Olsson2373ce12005-08-25 13:01:29 -07001204 n = rcu_dereference(t->trie);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001205 if (!n)
Alexander Duyck345e9b52014-12-31 10:56:24 -08001206 return -EAGAIN;
Robert Olsson19baf832005-06-21 12:43:18 -07001207
1208#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001209 this_cpu_inc(stats->gets);
Robert Olsson19baf832005-06-21 12:43:18 -07001210#endif
1211
Alexander Duyckadaf9812014-12-31 10:55:47 -08001212 pn = n;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001213 cindex = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001214
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001215 /* Step 1: Travel to the longest prefix match in the trie */
1216 for (;;) {
1217 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -07001218
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001219 /* This bit of code is a bit tricky but it combines multiple
1220 * checks into a single check. The prefix consists of the
1221 * prefix plus zeros for the "bits" in the prefix. The index
1222 * is the difference between the key and this value. From
1223 * this we can actually derive several pieces of data.
1224 * if !(index >> bits)
1225 * we know the value is child index
1226 * else
1227 * we have a mismatch in skip bits and failed
1228 */
1229 if (index >> n->bits)
1230 break;
Robert Olsson19baf832005-06-21 12:43:18 -07001231
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001232 /* we have found a leaf. Prefixes have already been compared */
1233 if (IS_LEAF(n))
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001234 goto found;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001235
1236 /* only record pn and cindex if we are going to be chopping
1237 * bits later. Otherwise we are just wasting cycles.
1238 */
1239 if (index) {
1240 pn = n;
1241 cindex = index;
Olof Johansson91b9a272005-08-09 20:24:39 -07001242 }
1243
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001244 n = rcu_dereference(n->child[index]);
1245 if (unlikely(!n))
Robert Olsson19baf832005-06-21 12:43:18 -07001246 goto backtrace;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001247 }
1248
1249 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1250 for (;;) {
1251 /* record the pointer where our next node pointer is stored */
1252 struct tnode __rcu **cptr = n->child;
1253
1254 /* This test verifies that none of the bits that differ
1255 * between the key and the prefix exist in the region of
1256 * the lsb and higher in the prefix.
1257 */
1258 if (unlikely(prefix_mismatch(key, n)))
1259 goto backtrace;
1260
1261 /* exit out and process leaf */
1262 if (unlikely(IS_LEAF(n)))
1263 break;
1264
1265 /* Don't bother recording parent info. Since we are in
1266 * prefix match mode we will have to come back to wherever
1267 * we started this traversal anyway
1268 */
1269
1270 while ((n = rcu_dereference(*cptr)) == NULL) {
1271backtrace:
1272#ifdef CONFIG_IP_FIB_TRIE_STATS
1273 if (!n)
1274 this_cpu_inc(stats->null_node_hit);
1275#endif
1276 /* If we are at cindex 0 there are no more bits for
1277 * us to strip at this level so we must ascend back
1278 * up one level to see if there are any more bits to
1279 * be stripped there.
1280 */
1281 while (!cindex) {
1282 t_key pkey = pn->key;
1283
1284 pn = node_parent_rcu(pn);
1285 if (unlikely(!pn))
Alexander Duyck345e9b52014-12-31 10:56:24 -08001286 return -EAGAIN;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001287#ifdef CONFIG_IP_FIB_TRIE_STATS
1288 this_cpu_inc(stats->backtrack);
1289#endif
1290 /* Get Child's index */
1291 cindex = get_index(pkey, pn);
1292 }
1293
1294 /* strip the least significant bit from the cindex */
1295 cindex &= cindex - 1;
1296
1297 /* grab pointer for next child node */
1298 cptr = &pn->child[cindex];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001299 }
Robert Olsson19baf832005-06-21 12:43:18 -07001300 }
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001301
Robert Olsson19baf832005-06-21 12:43:18 -07001302found:
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001303 /* Step 3: Process the leaf, if that fails fall back to backtracing */
Alexander Duyck345e9b52014-12-31 10:56:24 -08001304 hlist_for_each_entry_rcu(li, &n->list, hlist) {
1305 struct fib_alias *fa;
1306
1307 if ((key ^ n->key) & li->mask_plen)
1308 continue;
1309
1310 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1311 struct fib_info *fi = fa->fa_info;
1312 int nhsel, err;
1313
1314 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1315 continue;
1316 if (fi->fib_dead)
1317 continue;
1318 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1319 continue;
1320 fib_alias_accessed(fa);
1321 err = fib_props[fa->fa_type].error;
1322 if (unlikely(err < 0)) {
1323#ifdef CONFIG_IP_FIB_TRIE_STATS
1324 this_cpu_inc(stats->semantic_match_passed);
1325#endif
1326 return err;
1327 }
1328 if (fi->fib_flags & RTNH_F_DEAD)
1329 continue;
1330 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1331 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1332
1333 if (nh->nh_flags & RTNH_F_DEAD)
1334 continue;
1335 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
1336 continue;
1337
1338 if (!(fib_flags & FIB_LOOKUP_NOREF))
1339 atomic_inc(&fi->fib_clntref);
1340
1341 res->prefixlen = li->plen;
1342 res->nh_sel = nhsel;
1343 res->type = fa->fa_type;
1344 res->scope = fi->fib_scope;
1345 res->fi = fi;
1346 res->table = tb;
1347 res->fa_head = &li->falh;
1348#ifdef CONFIG_IP_FIB_TRIE_STATS
1349 this_cpu_inc(stats->semantic_match_passed);
1350#endif
1351 return err;
1352 }
1353 }
1354
1355#ifdef CONFIG_IP_FIB_TRIE_STATS
1356 this_cpu_inc(stats->semantic_match_miss);
1357#endif
1358 }
1359 goto backtrace;
Robert Olsson19baf832005-06-21 12:43:18 -07001360}
Florian Westphal6fc01432011-08-25 13:46:12 +02001361EXPORT_SYMBOL_GPL(fib_table_lookup);
Robert Olsson19baf832005-06-21 12:43:18 -07001362
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001363/*
1364 * Remove the leaf and return parent.
1365 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001366static void trie_leaf_remove(struct trie *t, struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001367{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -08001368 struct tnode *tp = node_parent(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001369
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001370 pr_debug("entering trie_leaf_remove(%p)\n", l);
Robert Olsson19baf832005-06-21 12:43:18 -07001371
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001372 if (tp) {
Alexander Duyck836a0122014-12-31 10:56:06 -08001373 put_child(tp, get_index(l->key, tp), NULL);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001374 trie_rebalance(t, tp);
Alexander Duyck836a0122014-12-31 10:56:06 -08001375 } else {
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001376 RCU_INIT_POINTER(t->trie, NULL);
Alexander Duyck836a0122014-12-31 10:56:06 -08001377 }
Robert Olsson19baf832005-06-21 12:43:18 -07001378
Alexander Duyck37fd30f2014-12-31 10:55:41 -08001379 node_free(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001380}
1381
Robert Olssond562f1f2007-03-26 14:22:22 -07001382/*
1383 * Caller must hold RTNL.
1384 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001385int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001386{
1387 struct trie *t = (struct trie *) tb->tb_data;
1388 u32 key, mask;
Thomas Graf4e902c52006-08-17 18:14:52 -07001389 int plen = cfg->fc_dst_len;
1390 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001391 struct fib_alias *fa, *fa_to_delete;
1392 struct list_head *fa_head;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001393 struct tnode *l;
Olof Johansson91b9a272005-08-09 20:24:39 -07001394 struct leaf_info *li;
1395
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001396 if (plen > 32)
Robert Olsson19baf832005-06-21 12:43:18 -07001397 return -EINVAL;
1398
Thomas Graf4e902c52006-08-17 18:14:52 -07001399 key = ntohl(cfg->fc_dst);
Olof Johansson91b9a272005-08-09 20:24:39 -07001400 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001401
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001402 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001403 return -EINVAL;
1404
1405 key = key & mask;
1406 l = fib_find_node(t, key);
1407
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001408 if (!l)
Robert Olsson19baf832005-06-21 12:43:18 -07001409 return -ESRCH;
1410
Igor Maravicad5b3102012-08-13 10:26:08 +02001411 li = find_leaf_info(l, plen);
1412
1413 if (!li)
1414 return -ESRCH;
1415
1416 fa_head = &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -07001417 fa = fib_find_alias(fa_head, tos, 0);
1418
1419 if (!fa)
1420 return -ESRCH;
1421
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001422 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
Robert Olsson19baf832005-06-21 12:43:18 -07001423
1424 fa_to_delete = NULL;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001425 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1426 list_for_each_entry_continue(fa, fa_head, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001427 struct fib_info *fi = fa->fa_info;
1428
1429 if (fa->fa_tos != tos)
1430 break;
1431
Thomas Graf4e902c52006-08-17 18:14:52 -07001432 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1433 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
David S. Miller37e826c2011-03-24 18:06:47 -07001434 fa->fa_info->fib_scope == cfg->fc_scope) &&
Julian Anastasov74cb3c12011-03-19 12:13:46 +00001435 (!cfg->fc_prefsrc ||
1436 fi->fib_prefsrc == cfg->fc_prefsrc) &&
Thomas Graf4e902c52006-08-17 18:14:52 -07001437 (!cfg->fc_protocol ||
1438 fi->fib_protocol == cfg->fc_protocol) &&
1439 fib_nh_match(cfg, fi) == 0) {
Robert Olsson19baf832005-06-21 12:43:18 -07001440 fa_to_delete = fa;
1441 break;
1442 }
1443 }
1444
Olof Johansson91b9a272005-08-09 20:24:39 -07001445 if (!fa_to_delete)
1446 return -ESRCH;
Robert Olsson19baf832005-06-21 12:43:18 -07001447
Olof Johansson91b9a272005-08-09 20:24:39 -07001448 fa = fa_to_delete;
Thomas Graf4e902c52006-08-17 18:14:52 -07001449 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001450 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001451
Robert Olsson2373ce12005-08-25 13:01:29 -07001452 list_del_rcu(&fa->fa_list);
Robert Olsson19baf832005-06-21 12:43:18 -07001453
David S. Miller21d8c492011-04-14 14:49:37 -07001454 if (!plen)
1455 tb->tb_num_default--;
1456
Olof Johansson91b9a272005-08-09 20:24:39 -07001457 if (list_empty(fa_head)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001458 hlist_del_rcu(&li->hlist);
Olof Johansson91b9a272005-08-09 20:24:39 -07001459 free_leaf_info(li);
Robert Olsson2373ce12005-08-25 13:01:29 -07001460 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001461
1462 if (hlist_empty(&l->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001463 trie_leaf_remove(t, l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001464
1465 if (fa->fa_state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001466 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Olof Johansson91b9a272005-08-09 20:24:39 -07001467
Robert Olsson2373ce12005-08-25 13:01:29 -07001468 fib_release_info(fa->fa_info);
1469 alias_free_mem_rcu(fa);
Olof Johansson91b9a272005-08-09 20:24:39 -07001470 return 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001471}
1472
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001473static int trie_flush_list(struct list_head *head)
Robert Olsson19baf832005-06-21 12:43:18 -07001474{
1475 struct fib_alias *fa, *fa_node;
1476 int found = 0;
1477
1478 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1479 struct fib_info *fi = fa->fa_info;
Robert Olsson19baf832005-06-21 12:43:18 -07001480
Robert Olsson2373ce12005-08-25 13:01:29 -07001481 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1482 list_del_rcu(&fa->fa_list);
1483 fib_release_info(fa->fa_info);
1484 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001485 found++;
1486 }
1487 }
1488 return found;
1489}
1490
Alexander Duyckadaf9812014-12-31 10:55:47 -08001491static int trie_flush_leaf(struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001492{
1493 int found = 0;
1494 struct hlist_head *lih = &l->list;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001495 struct hlist_node *tmp;
Robert Olsson19baf832005-06-21 12:43:18 -07001496 struct leaf_info *li = NULL;
1497
Sasha Levinb67bfe02013-02-27 17:06:00 -08001498 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001499 found += trie_flush_list(&li->falh);
Robert Olsson19baf832005-06-21 12:43:18 -07001500
1501 if (list_empty(&li->falh)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001502 hlist_del_rcu(&li->hlist);
Robert Olsson19baf832005-06-21 12:43:18 -07001503 free_leaf_info(li);
1504 }
1505 }
1506 return found;
1507}
1508
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001509/*
1510 * Scan for the next right leaf starting at node p->child[idx]
1511 * Since we have back pointer, no recursion necessary.
1512 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001513static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
Robert Olsson19baf832005-06-21 12:43:18 -07001514{
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001515 do {
Alexander Duyck98293e82014-12-31 10:56:18 -08001516 unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001517
Alexander Duyck98293e82014-12-31 10:56:18 -08001518 while (idx < tnode_child_length(p)) {
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001519 c = tnode_get_child_rcu(p, idx++);
Robert Olsson2373ce12005-08-25 13:01:29 -07001520 if (!c)
Olof Johansson91b9a272005-08-09 20:24:39 -07001521 continue;
Robert Olsson19baf832005-06-21 12:43:18 -07001522
Eric Dumazetaab515d2013-08-05 11:18:49 -07001523 if (IS_LEAF(c))
Alexander Duyckadaf9812014-12-31 10:55:47 -08001524 return c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001525
1526 /* Rescan start scanning in new node */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001527 p = c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001528 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001529 }
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001530
1531 /* Node empty, walk back up to parent */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001532 c = p;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001533 } while ((p = node_parent_rcu(c)) != NULL);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001534
1535 return NULL; /* Root of trie */
1536}
1537
Alexander Duyckadaf9812014-12-31 10:55:47 -08001538static struct tnode *trie_firstleaf(struct trie *t)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001539{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001540 struct tnode *n = rcu_dereference_rtnl(t->trie);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001541
1542 if (!n)
1543 return NULL;
1544
1545 if (IS_LEAF(n)) /* trie is just a leaf */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001546 return n;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001547
1548 return leaf_walk_rcu(n, NULL);
1549}
1550
Alexander Duyckadaf9812014-12-31 10:55:47 -08001551static struct tnode *trie_nextleaf(struct tnode *l)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001552{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001553 struct tnode *p = node_parent_rcu(l);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001554
1555 if (!p)
1556 return NULL; /* trie with just one leaf */
1557
Alexander Duyckadaf9812014-12-31 10:55:47 -08001558 return leaf_walk_rcu(p, l);
Robert Olsson19baf832005-06-21 12:43:18 -07001559}
1560
Alexander Duyckadaf9812014-12-31 10:55:47 -08001561static struct tnode *trie_leafindex(struct trie *t, int index)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001562{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001563 struct tnode *l = trie_firstleaf(t);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001564
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001565 while (l && index-- > 0)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001566 l = trie_nextleaf(l);
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001567
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001568 return l;
1569}
1570
1571
Robert Olssond562f1f2007-03-26 14:22:22 -07001572/*
1573 * Caller must hold RTNL.
1574 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001575int fib_table_flush(struct fib_table *tb)
Robert Olsson19baf832005-06-21 12:43:18 -07001576{
1577 struct trie *t = (struct trie *) tb->tb_data;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001578 struct tnode *l, *ll = NULL;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001579 int found = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001580
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001581 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001582 found += trie_flush_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001583
1584 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001585 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001586 ll = l;
1587 }
1588
1589 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001590 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001591
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001592 pr_debug("trie_flush found=%d\n", found);
Robert Olsson19baf832005-06-21 12:43:18 -07001593 return found;
1594}
1595
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001596void fib_free_table(struct fib_table *tb)
1597{
Alexander Duyck8274a972014-12-31 10:55:29 -08001598#ifdef CONFIG_IP_FIB_TRIE_STATS
1599 struct trie *t = (struct trie *)tb->tb_data;
1600
1601 free_percpu(t->stats);
1602#endif /* CONFIG_IP_FIB_TRIE_STATS */
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001603 kfree(tb);
1604}
1605
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001606static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1607 struct fib_table *tb,
Robert Olsson19baf832005-06-21 12:43:18 -07001608 struct sk_buff *skb, struct netlink_callback *cb)
1609{
1610 int i, s_i;
1611 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07001612 __be32 xkey = htonl(key);
Robert Olsson19baf832005-06-21 12:43:18 -07001613
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001614 s_i = cb->args[5];
Robert Olsson19baf832005-06-21 12:43:18 -07001615 i = 0;
1616
Robert Olsson2373ce12005-08-25 13:01:29 -07001617 /* rcu_read_lock is hold by caller */
1618
1619 list_for_each_entry_rcu(fa, fah, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001620 if (i < s_i) {
1621 i++;
1622 continue;
1623 }
Robert Olsson19baf832005-06-21 12:43:18 -07001624
Eric W. Biederman15e47302012-09-07 20:12:54 +00001625 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
Robert Olsson19baf832005-06-21 12:43:18 -07001626 cb->nlh->nlmsg_seq,
1627 RTM_NEWROUTE,
1628 tb->tb_id,
1629 fa->fa_type,
Thomas Grafbe403ea2006-08-17 18:15:17 -07001630 xkey,
Robert Olsson19baf832005-06-21 12:43:18 -07001631 plen,
1632 fa->fa_tos,
Stephen Hemminger64347f72008-01-22 21:55:01 -08001633 fa->fa_info, NLM_F_MULTI) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001634 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001635 return -1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001636 }
Robert Olsson19baf832005-06-21 12:43:18 -07001637 i++;
1638 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001639 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001640 return skb->len;
1641}
1642
Alexander Duyckadaf9812014-12-31 10:55:47 -08001643static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001644 struct sk_buff *skb, struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001645{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001646 struct leaf_info *li;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001647 int i, s_i;
Robert Olsson19baf832005-06-21 12:43:18 -07001648
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001649 s_i = cb->args[4];
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001650 i = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001651
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001652 /* rcu_read_lock is hold by caller */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001653 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001654 if (i < s_i) {
1655 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001656 continue;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001657 }
Robert Olsson19baf832005-06-21 12:43:18 -07001658
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001659 if (i > s_i)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001660 cb->args[5] = 0;
Olof Johansson91b9a272005-08-09 20:24:39 -07001661
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001662 if (list_empty(&li->falh))
Robert Olsson19baf832005-06-21 12:43:18 -07001663 continue;
1664
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001665 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001666 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001667 return -1;
1668 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001669 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001670 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001671
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001672 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001673 return skb->len;
1674}
1675
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001676int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1677 struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001678{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001679 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001680 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001681 t_key key = cb->args[2];
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001682 int count = cb->args[3];
Robert Olsson19baf832005-06-21 12:43:18 -07001683
Robert Olsson2373ce12005-08-25 13:01:29 -07001684 rcu_read_lock();
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001685 /* Dump starting at last key.
1686 * Note: 0.0.0.0/0 (ie default) is first key.
1687 */
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001688 if (count == 0)
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001689 l = trie_firstleaf(t);
1690 else {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001691 /* Normally, continue from last key, but if that is missing
1692 * fallback to using slow rescan
1693 */
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001694 l = fib_find_node(t, key);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001695 if (!l)
1696 l = trie_leafindex(t, count);
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001697 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001698
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001699 while (l) {
1700 cb->args[2] = l->key;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001701 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001702 cb->args[3] = count;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001703 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001704 return -1;
Robert Olsson19baf832005-06-21 12:43:18 -07001705 }
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001706
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001707 ++count;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001708 l = trie_nextleaf(l);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001709 memset(&cb->args[4], 0,
1710 sizeof(cb->args) - 4*sizeof(cb->args[0]));
Robert Olsson19baf832005-06-21 12:43:18 -07001711 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001712 cb->args[3] = count;
Robert Olsson2373ce12005-08-25 13:01:29 -07001713 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001714
Robert Olsson19baf832005-06-21 12:43:18 -07001715 return skb->len;
Robert Olsson19baf832005-06-21 12:43:18 -07001716}
1717
David S. Miller5348ba82011-02-01 15:30:56 -08001718void __init fib_trie_init(void)
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001719{
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001720 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1721 sizeof(struct fib_alias),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001722 0, SLAB_PANIC, NULL);
1723
1724 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001725 max(sizeof(struct tnode),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001726 sizeof(struct leaf_info)),
1727 0, SLAB_PANIC, NULL);
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001728}
Robert Olsson19baf832005-06-21 12:43:18 -07001729
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001730
David S. Miller5348ba82011-02-01 15:30:56 -08001731struct fib_table *fib_trie_table(u32 id)
Robert Olsson19baf832005-06-21 12:43:18 -07001732{
1733 struct fib_table *tb;
1734 struct trie *t;
1735
Robert Olsson19baf832005-06-21 12:43:18 -07001736 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1737 GFP_KERNEL);
1738 if (tb == NULL)
1739 return NULL;
1740
1741 tb->tb_id = id;
Denis V. Lunev971b8932007-12-08 00:32:23 -08001742 tb->tb_default = -1;
David S. Miller21d8c492011-04-14 14:49:37 -07001743 tb->tb_num_default = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001744
1745 t = (struct trie *) tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001746 RCU_INIT_POINTER(t->trie, NULL);
1747#ifdef CONFIG_IP_FIB_TRIE_STATS
1748 t->stats = alloc_percpu(struct trie_use_stats);
1749 if (!t->stats) {
1750 kfree(tb);
1751 tb = NULL;
1752 }
1753#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001754
Robert Olsson19baf832005-06-21 12:43:18 -07001755 return tb;
1756}
1757
Robert Olsson19baf832005-06-21 12:43:18 -07001758#ifdef CONFIG_PROC_FS
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001759/* Depth first Trie walk iterator */
1760struct fib_trie_iter {
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001761 struct seq_net_private p;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001762 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001763 struct tnode *tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001764 unsigned int index;
1765 unsigned int depth;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001766};
Robert Olsson19baf832005-06-21 12:43:18 -07001767
Alexander Duyckadaf9812014-12-31 10:55:47 -08001768static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
Robert Olsson19baf832005-06-21 12:43:18 -07001769{
Alexander Duyck98293e82014-12-31 10:56:18 -08001770 unsigned long cindex = iter->index;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001771 struct tnode *tn = iter->tnode;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001772 struct tnode *p;
1773
Eric W. Biederman6640e692007-01-24 14:42:04 -08001774 /* A single entry routing table */
1775 if (!tn)
1776 return NULL;
1777
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001778 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1779 iter->tnode, iter->index, iter->depth);
1780rescan:
Alexander Duyck98293e82014-12-31 10:56:18 -08001781 while (cindex < tnode_child_length(tn)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001782 struct tnode *n = tnode_get_child_rcu(tn, cindex);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001783
1784 if (n) {
1785 if (IS_LEAF(n)) {
1786 iter->tnode = tn;
1787 iter->index = cindex + 1;
1788 } else {
1789 /* push down one level */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001790 iter->tnode = n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001791 iter->index = 0;
1792 ++iter->depth;
1793 }
1794 return n;
1795 }
1796
1797 ++cindex;
1798 }
1799
1800 /* Current node exhausted, pop back up */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001801 p = node_parent_rcu(tn);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001802 if (p) {
Alexander Duycke9b44012014-12-31 10:56:12 -08001803 cindex = get_index(tn->key, p) + 1;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001804 tn = p;
1805 --iter->depth;
1806 goto rescan;
1807 }
1808
1809 /* got root? */
Robert Olsson19baf832005-06-21 12:43:18 -07001810 return NULL;
1811}
1812
Alexander Duyckadaf9812014-12-31 10:55:47 -08001813static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001814 struct trie *t)
Robert Olsson19baf832005-06-21 12:43:18 -07001815{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001816 struct tnode *n;
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001817
Stephen Hemminger132adf52007-03-08 20:44:43 -08001818 if (!t)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001819 return NULL;
1820
1821 n = rcu_dereference(t->trie);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001822 if (!n)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001823 return NULL;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001824
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001825 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001826 iter->tnode = n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001827 iter->index = 0;
1828 iter->depth = 1;
1829 } else {
1830 iter->tnode = NULL;
1831 iter->index = 0;
1832 iter->depth = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001833 }
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001834
1835 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07001836}
1837
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001838static void trie_collect_stats(struct trie *t, struct trie_stat *s)
Robert Olsson19baf832005-06-21 12:43:18 -07001839{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001840 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001841 struct fib_trie_iter iter;
Robert Olsson19baf832005-06-21 12:43:18 -07001842
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001843 memset(s, 0, sizeof(*s));
Robert Olsson19baf832005-06-21 12:43:18 -07001844
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001845 rcu_read_lock();
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001846 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001847 if (IS_LEAF(n)) {
Stephen Hemminger93672292008-01-22 21:54:05 -08001848 struct leaf_info *li;
Stephen Hemminger93672292008-01-22 21:54:05 -08001849
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001850 s->leaves++;
1851 s->totdepth += iter.depth;
1852 if (iter.depth > s->maxdepth)
1853 s->maxdepth = iter.depth;
Stephen Hemminger93672292008-01-22 21:54:05 -08001854
Alexander Duyckadaf9812014-12-31 10:55:47 -08001855 hlist_for_each_entry_rcu(li, &n->list, hlist)
Stephen Hemminger93672292008-01-22 21:54:05 -08001856 ++s->prefixes;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001857 } else {
Alexander Duyck98293e82014-12-31 10:56:18 -08001858 unsigned long i;
Robert Olsson19baf832005-06-21 12:43:18 -07001859
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001860 s->tnodes++;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001861 if (n->bits < MAX_STAT_DEPTH)
1862 s->nodesizes[n->bits]++;
Robert Olsson06ef9212006-03-20 21:35:01 -08001863
Alexander Duyck98293e82014-12-31 10:56:18 -08001864 for (i = 0; i < tnode_child_length(n); i++) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001865 if (!rcu_access_pointer(n->child[i]))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001866 s->nullpointers++;
Alexander Duyck98293e82014-12-31 10:56:18 -08001867 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001868 }
1869 }
1870 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001871}
1872
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001873/*
Robert Olsson19baf832005-06-21 12:43:18 -07001874 * This outputs /proc/net/fib_triestats
Robert Olsson19baf832005-06-21 12:43:18 -07001875 */
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001876static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
Robert Olsson19baf832005-06-21 12:43:18 -07001877{
Eric Dumazeta034ee32010-09-09 23:32:28 +00001878 unsigned int i, max, pointers, bytes, avdepth;
Robert Olsson19baf832005-06-21 12:43:18 -07001879
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001880 if (stat->leaves)
1881 avdepth = stat->totdepth*100 / stat->leaves;
1882 else
1883 avdepth = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001884
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001885 seq_printf(seq, "\tAver depth: %u.%02d\n",
1886 avdepth / 100, avdepth % 100);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001887 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
Robert Olsson19baf832005-06-21 12:43:18 -07001888
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001889 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
Alexander Duyckadaf9812014-12-31 10:55:47 -08001890 bytes = sizeof(struct tnode) * stat->leaves;
Stephen Hemminger93672292008-01-22 21:54:05 -08001891
1892 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1893 bytes += sizeof(struct leaf_info) * stat->prefixes;
1894
Stephen Hemminger187b5182008-01-12 20:55:55 -08001895 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001896 bytes += sizeof(struct tnode) * stat->tnodes;
Robert Olsson19baf832005-06-21 12:43:18 -07001897
Robert Olsson06ef9212006-03-20 21:35:01 -08001898 max = MAX_STAT_DEPTH;
1899 while (max > 0 && stat->nodesizes[max-1] == 0)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001900 max--;
Robert Olsson19baf832005-06-21 12:43:18 -07001901
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001902 pointers = 0;
Jerry Snitselaarf585a992013-07-22 12:01:58 -07001903 for (i = 1; i < max; i++)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001904 if (stat->nodesizes[i] != 0) {
Stephen Hemminger187b5182008-01-12 20:55:55 -08001905 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001906 pointers += (1<<i) * stat->nodesizes[i];
1907 }
1908 seq_putc(seq, '\n');
Stephen Hemminger187b5182008-01-12 20:55:55 -08001909 seq_printf(seq, "\tPointers: %u\n", pointers);
Robert Olsson19baf832005-06-21 12:43:18 -07001910
Alexander Duyckadaf9812014-12-31 10:55:47 -08001911 bytes += sizeof(struct tnode *) * pointers;
Stephen Hemminger187b5182008-01-12 20:55:55 -08001912 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1913 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001914}
Robert Olsson19baf832005-06-21 12:43:18 -07001915
1916#ifdef CONFIG_IP_FIB_TRIE_STATS
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001917static void trie_show_usage(struct seq_file *seq,
Alexander Duyck8274a972014-12-31 10:55:29 -08001918 const struct trie_use_stats __percpu *stats)
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001919{
Alexander Duyck8274a972014-12-31 10:55:29 -08001920 struct trie_use_stats s = { 0 };
1921 int cpu;
1922
1923 /* loop through all of the CPUs and gather up the stats */
1924 for_each_possible_cpu(cpu) {
1925 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
1926
1927 s.gets += pcpu->gets;
1928 s.backtrack += pcpu->backtrack;
1929 s.semantic_match_passed += pcpu->semantic_match_passed;
1930 s.semantic_match_miss += pcpu->semantic_match_miss;
1931 s.null_node_hit += pcpu->null_node_hit;
1932 s.resize_node_skipped += pcpu->resize_node_skipped;
1933 }
1934
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001935 seq_printf(seq, "\nCounters:\n---------\n");
Alexander Duyck8274a972014-12-31 10:55:29 -08001936 seq_printf(seq, "gets = %u\n", s.gets);
1937 seq_printf(seq, "backtracks = %u\n", s.backtrack);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001938 seq_printf(seq, "semantic match passed = %u\n",
Alexander Duyck8274a972014-12-31 10:55:29 -08001939 s.semantic_match_passed);
1940 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
1941 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
1942 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
Robert Olsson19baf832005-06-21 12:43:18 -07001943}
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001944#endif /* CONFIG_IP_FIB_TRIE_STATS */
1945
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001946static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001947{
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001948 if (tb->tb_id == RT_TABLE_LOCAL)
1949 seq_puts(seq, "Local:\n");
1950 else if (tb->tb_id == RT_TABLE_MAIN)
1951 seq_puts(seq, "Main:\n");
1952 else
1953 seq_printf(seq, "Id %d:\n", tb->tb_id);
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001954}
Robert Olsson19baf832005-06-21 12:43:18 -07001955
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001956
Robert Olsson19baf832005-06-21 12:43:18 -07001957static int fib_triestat_seq_show(struct seq_file *seq, void *v)
1958{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001959 struct net *net = (struct net *)seq->private;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001960 unsigned int h;
Eric W. Biederman877a9bf2007-12-07 00:47:47 -08001961
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001962 seq_printf(seq,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001963 "Basic info: size of leaf:"
1964 " %Zd bytes, size of tnode: %Zd bytes.\n",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001965 sizeof(struct tnode), sizeof(struct tnode));
Olof Johansson91b9a272005-08-09 20:24:39 -07001966
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001967 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1968 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001969 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001970
Sasha Levinb67bfe02013-02-27 17:06:00 -08001971 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001972 struct trie *t = (struct trie *) tb->tb_data;
1973 struct trie_stat stat;
1974
1975 if (!t)
1976 continue;
1977
1978 fib_table_print(seq, tb);
1979
1980 trie_collect_stats(t, &stat);
1981 trie_show_stats(seq, &stat);
1982#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001983 trie_show_usage(seq, t->stats);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001984#endif
1985 }
1986 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001987
Robert Olsson19baf832005-06-21 12:43:18 -07001988 return 0;
1989}
1990
Robert Olsson19baf832005-06-21 12:43:18 -07001991static int fib_triestat_seq_open(struct inode *inode, struct file *file)
1992{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07001993 return single_open_net(inode, file, fib_triestat_seq_show);
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001994}
1995
Arjan van de Ven9a321442007-02-12 00:55:35 -08001996static const struct file_operations fib_triestat_fops = {
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001997 .owner = THIS_MODULE,
1998 .open = fib_triestat_seq_open,
1999 .read = seq_read,
2000 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07002001 .release = single_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002002};
2003
Alexander Duyckadaf9812014-12-31 10:55:47 -08002004static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
Robert Olsson19baf832005-06-21 12:43:18 -07002005{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002006 struct fib_trie_iter *iter = seq->private;
2007 struct net *net = seq_file_net(seq);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002008 loff_t idx = 0;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002009 unsigned int h;
Robert Olsson19baf832005-06-21 12:43:18 -07002010
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002011 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2012 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002013 struct fib_table *tb;
2014
Sasha Levinb67bfe02013-02-27 17:06:00 -08002015 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08002016 struct tnode *n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002017
2018 for (n = fib_trie_get_first(iter,
2019 (struct trie *) tb->tb_data);
2020 n; n = fib_trie_get_next(iter))
2021 if (pos == idx++) {
2022 iter->tb = tb;
2023 return n;
2024 }
2025 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002026 }
Robert Olsson19baf832005-06-21 12:43:18 -07002027
Robert Olsson19baf832005-06-21 12:43:18 -07002028 return NULL;
2029}
2030
2031static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002032 __acquires(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002033{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002034 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002035 return fib_trie_get_idx(seq, *pos);
Robert Olsson19baf832005-06-21 12:43:18 -07002036}
2037
2038static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2039{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002040 struct fib_trie_iter *iter = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002041 struct net *net = seq_file_net(seq);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002042 struct fib_table *tb = iter->tb;
2043 struct hlist_node *tb_node;
2044 unsigned int h;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002045 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002046
Robert Olsson19baf832005-06-21 12:43:18 -07002047 ++*pos;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002048 /* next node in same table */
2049 n = fib_trie_get_next(iter);
2050 if (n)
2051 return n;
Olof Johansson91b9a272005-08-09 20:24:39 -07002052
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002053 /* walk rest of this hash chain */
2054 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
Eric Dumazet0a5c0472011-03-31 01:51:35 -07002055 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002056 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2057 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2058 if (n)
2059 goto found;
2060 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002061
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002062 /* new hash chain */
2063 while (++h < FIB_TABLE_HASHSZ) {
2064 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Sasha Levinb67bfe02013-02-27 17:06:00 -08002065 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002066 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2067 if (n)
2068 goto found;
2069 }
2070 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002071 return NULL;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002072
2073found:
2074 iter->tb = tb;
2075 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002076}
2077
2078static void fib_trie_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002079 __releases(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002080{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002081 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002082}
2083
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002084static void seq_indent(struct seq_file *seq, int n)
2085{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002086 while (n-- > 0)
2087 seq_puts(seq, " ");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002088}
Robert Olsson19baf832005-06-21 12:43:18 -07002089
Eric Dumazet28d36e32008-01-14 23:09:56 -08002090static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002091{
Stephen Hemminger132adf52007-03-08 20:44:43 -08002092 switch (s) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002093 case RT_SCOPE_UNIVERSE: return "universe";
2094 case RT_SCOPE_SITE: return "site";
2095 case RT_SCOPE_LINK: return "link";
2096 case RT_SCOPE_HOST: return "host";
2097 case RT_SCOPE_NOWHERE: return "nowhere";
2098 default:
Eric Dumazet28d36e32008-01-14 23:09:56 -08002099 snprintf(buf, len, "scope=%d", s);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002100 return buf;
2101 }
2102}
2103
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -07002104static const char *const rtn_type_names[__RTN_MAX] = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002105 [RTN_UNSPEC] = "UNSPEC",
2106 [RTN_UNICAST] = "UNICAST",
2107 [RTN_LOCAL] = "LOCAL",
2108 [RTN_BROADCAST] = "BROADCAST",
2109 [RTN_ANYCAST] = "ANYCAST",
2110 [RTN_MULTICAST] = "MULTICAST",
2111 [RTN_BLACKHOLE] = "BLACKHOLE",
2112 [RTN_UNREACHABLE] = "UNREACHABLE",
2113 [RTN_PROHIBIT] = "PROHIBIT",
2114 [RTN_THROW] = "THROW",
2115 [RTN_NAT] = "NAT",
2116 [RTN_XRESOLVE] = "XRESOLVE",
2117};
2118
Eric Dumazeta034ee32010-09-09 23:32:28 +00002119static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002120{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002121 if (t < __RTN_MAX && rtn_type_names[t])
2122 return rtn_type_names[t];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002123 snprintf(buf, len, "type %u", t);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002124 return buf;
2125}
2126
2127/* Pretty print the trie */
Robert Olsson19baf832005-06-21 12:43:18 -07002128static int fib_trie_seq_show(struct seq_file *seq, void *v)
2129{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002130 const struct fib_trie_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002131 struct tnode *n = v;
Robert Olsson19baf832005-06-21 12:43:18 -07002132
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002133 if (!node_parent_rcu(n))
2134 fib_table_print(seq, iter->tb);
Robert Olsson095b8502007-01-26 19:06:01 -08002135
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002136 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08002137 __be32 prf = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002138
Alexander Duycke9b44012014-12-31 10:56:12 -08002139 seq_indent(seq, iter->depth-1);
2140 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2141 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2142 n->full_children, n->empty_children);
Olof Johansson91b9a272005-08-09 20:24:39 -07002143 } else {
Stephen Hemminger13280422008-01-22 21:54:37 -08002144 struct leaf_info *li;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002145 __be32 val = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002146
2147 seq_indent(seq, iter->depth);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002148 seq_printf(seq, " |-- %pI4\n", &val);
Eric Dumazet28d36e32008-01-14 23:09:56 -08002149
Alexander Duyckadaf9812014-12-31 10:55:47 -08002150 hlist_for_each_entry_rcu(li, &n->list, hlist) {
Stephen Hemminger13280422008-01-22 21:54:37 -08002151 struct fib_alias *fa;
Eric Dumazet28d36e32008-01-14 23:09:56 -08002152
Stephen Hemminger13280422008-01-22 21:54:37 -08002153 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2154 char buf1[32], buf2[32];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002155
Stephen Hemminger13280422008-01-22 21:54:37 -08002156 seq_indent(seq, iter->depth+1);
2157 seq_printf(seq, " /%d %s %s", li->plen,
2158 rtn_scope(buf1, sizeof(buf1),
David S. Miller37e826c2011-03-24 18:06:47 -07002159 fa->fa_info->fib_scope),
Stephen Hemminger13280422008-01-22 21:54:37 -08002160 rtn_type(buf2, sizeof(buf2),
2161 fa->fa_type));
2162 if (fa->fa_tos)
Denis V. Lunevb9c4d822008-02-05 02:58:45 -08002163 seq_printf(seq, " tos=%d", fa->fa_tos);
Stephen Hemminger13280422008-01-22 21:54:37 -08002164 seq_putc(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002165 }
2166 }
Robert Olsson19baf832005-06-21 12:43:18 -07002167 }
2168
2169 return 0;
2170}
2171
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002172static const struct seq_operations fib_trie_seq_ops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002173 .start = fib_trie_seq_start,
2174 .next = fib_trie_seq_next,
2175 .stop = fib_trie_seq_stop,
2176 .show = fib_trie_seq_show,
Robert Olsson19baf832005-06-21 12:43:18 -07002177};
2178
2179static int fib_trie_seq_open(struct inode *inode, struct file *file)
2180{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002181 return seq_open_net(inode, file, &fib_trie_seq_ops,
2182 sizeof(struct fib_trie_iter));
Robert Olsson19baf832005-06-21 12:43:18 -07002183}
2184
Arjan van de Ven9a321442007-02-12 00:55:35 -08002185static const struct file_operations fib_trie_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002186 .owner = THIS_MODULE,
2187 .open = fib_trie_seq_open,
2188 .read = seq_read,
2189 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002190 .release = seq_release_net,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002191};
2192
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002193struct fib_route_iter {
2194 struct seq_net_private p;
2195 struct trie *main_trie;
2196 loff_t pos;
2197 t_key key;
2198};
2199
Alexander Duyckadaf9812014-12-31 10:55:47 -08002200static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002201{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002202 struct tnode *l = NULL;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002203 struct trie *t = iter->main_trie;
2204
2205 /* use cache location of last found key */
2206 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2207 pos -= iter->pos;
2208 else {
2209 iter->pos = 0;
2210 l = trie_firstleaf(t);
2211 }
2212
2213 while (l && pos-- > 0) {
2214 iter->pos++;
2215 l = trie_nextleaf(l);
2216 }
2217
2218 if (l)
2219 iter->key = pos; /* remember it */
2220 else
2221 iter->pos = 0; /* forget it */
2222
2223 return l;
2224}
2225
2226static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2227 __acquires(RCU)
2228{
2229 struct fib_route_iter *iter = seq->private;
2230 struct fib_table *tb;
2231
2232 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002233 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002234 if (!tb)
2235 return NULL;
2236
2237 iter->main_trie = (struct trie *) tb->tb_data;
2238 if (*pos == 0)
2239 return SEQ_START_TOKEN;
2240 else
2241 return fib_route_get_idx(iter, *pos - 1);
2242}
2243
2244static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2245{
2246 struct fib_route_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002247 struct tnode *l = v;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002248
2249 ++*pos;
2250 if (v == SEQ_START_TOKEN) {
2251 iter->pos = 0;
2252 l = trie_firstleaf(iter->main_trie);
2253 } else {
2254 iter->pos++;
2255 l = trie_nextleaf(l);
2256 }
2257
2258 if (l)
2259 iter->key = l->key;
2260 else
2261 iter->pos = 0;
2262 return l;
2263}
2264
2265static void fib_route_seq_stop(struct seq_file *seq, void *v)
2266 __releases(RCU)
2267{
2268 rcu_read_unlock();
2269}
2270
Eric Dumazeta034ee32010-09-09 23:32:28 +00002271static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002272{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002273 unsigned int flags = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002274
Eric Dumazeta034ee32010-09-09 23:32:28 +00002275 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2276 flags = RTF_REJECT;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002277 if (fi && fi->fib_nh->nh_gw)
2278 flags |= RTF_GATEWAY;
Al Viro32ab5f82006-09-26 22:21:45 -07002279 if (mask == htonl(0xFFFFFFFF))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002280 flags |= RTF_HOST;
2281 flags |= RTF_UP;
2282 return flags;
2283}
2284
2285/*
2286 * This outputs /proc/net/route.
2287 * The format of the file is not supposed to be changed
Eric Dumazeta034ee32010-09-09 23:32:28 +00002288 * and needs to be same as fib_hash output to avoid breaking
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002289 * legacy utilities
2290 */
2291static int fib_route_seq_show(struct seq_file *seq, void *v)
2292{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002293 struct tnode *l = v;
Stephen Hemminger13280422008-01-22 21:54:37 -08002294 struct leaf_info *li;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002295
2296 if (v == SEQ_START_TOKEN) {
2297 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2298 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2299 "\tWindow\tIRTT");
2300 return 0;
2301 }
2302
Sasha Levinb67bfe02013-02-27 17:06:00 -08002303 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002304 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07002305 __be32 mask, prefix;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002306
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002307 mask = inet_make_mask(li->plen);
2308 prefix = htonl(l->key);
2309
2310 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
Herbert Xu1371e372005-10-15 09:42:39 +10002311 const struct fib_info *fi = fa->fa_info;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002312 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002313
2314 if (fa->fa_type == RTN_BROADCAST
2315 || fa->fa_type == RTN_MULTICAST)
2316 continue;
2317
Tetsuo Handa652586d2013-11-14 14:31:57 -08002318 seq_setwidth(seq, 127);
2319
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002320 if (fi)
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002321 seq_printf(seq,
2322 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002323 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002324 fi->fib_dev ? fi->fib_dev->name : "*",
2325 prefix,
2326 fi->fib_nh->nh_gw, flags, 0, 0,
2327 fi->fib_priority,
2328 mask,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002329 (fi->fib_advmss ?
2330 fi->fib_advmss + 40 : 0),
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002331 fi->fib_window,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002332 fi->fib_rtt >> 3);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002333 else
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002334 seq_printf(seq,
2335 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002336 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002337 prefix, 0, flags, 0, 0, 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002338 mask, 0, 0, 0);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002339
Tetsuo Handa652586d2013-11-14 14:31:57 -08002340 seq_pad(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002341 }
2342 }
2343
2344 return 0;
2345}
2346
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002347static const struct seq_operations fib_route_seq_ops = {
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002348 .start = fib_route_seq_start,
2349 .next = fib_route_seq_next,
2350 .stop = fib_route_seq_stop,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002351 .show = fib_route_seq_show,
2352};
2353
2354static int fib_route_seq_open(struct inode *inode, struct file *file)
2355{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002356 return seq_open_net(inode, file, &fib_route_seq_ops,
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002357 sizeof(struct fib_route_iter));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002358}
2359
Arjan van de Ven9a321442007-02-12 00:55:35 -08002360static const struct file_operations fib_route_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002361 .owner = THIS_MODULE,
2362 .open = fib_route_seq_open,
2363 .read = seq_read,
2364 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002365 .release = seq_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002366};
2367
Denis V. Lunev61a02652008-01-10 03:21:09 -08002368int __net_init fib_proc_init(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002369{
Gao fengd4beaa62013-02-18 01:34:54 +00002370 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002371 goto out1;
2372
Gao fengd4beaa62013-02-18 01:34:54 +00002373 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2374 &fib_triestat_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002375 goto out2;
2376
Gao fengd4beaa62013-02-18 01:34:54 +00002377 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002378 goto out3;
2379
Robert Olsson19baf832005-06-21 12:43:18 -07002380 return 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002381
2382out3:
Gao fengece31ff2013-02-18 01:34:56 +00002383 remove_proc_entry("fib_triestat", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002384out2:
Gao fengece31ff2013-02-18 01:34:56 +00002385 remove_proc_entry("fib_trie", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002386out1:
2387 return -ENOMEM;
Robert Olsson19baf832005-06-21 12:43:18 -07002388}
2389
Denis V. Lunev61a02652008-01-10 03:21:09 -08002390void __net_exit fib_proc_exit(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002391{
Gao fengece31ff2013-02-18 01:34:56 +00002392 remove_proc_entry("fib_trie", net->proc_net);
2393 remove_proc_entry("fib_triestat", net->proc_net);
2394 remove_proc_entry("route", net->proc_net);
Robert Olsson19baf832005-06-21 12:43:18 -07002395}
2396
2397#endif /* CONFIG_PROC_FS */