blob: 9d675486b38cc4de5eab85590dd4eceebf549609 [file] [log] [blame]
Robert Olsson19baf832005-06-21 12:43:18 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090010 * Jens Laas <jens.laas@data.slu.se> Swedish University of
Robert Olsson19baf832005-06-21 12:43:18 -070011 * Agricultural Sciences.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090012 *
Robert Olsson19baf832005-06-21 12:43:18 -070013 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030015 * This work is based on the LPC-trie which is originally described in:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 *
Robert Olsson19baf832005-06-21 12:43:18 -070017 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
Justin P. Mattock631dd1a2010-10-18 11:03:14 +020019 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
Robert Olsson19baf832005-06-21 12:43:18 -070020 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
Robert Olsson19baf832005-06-21 12:43:18 -070025 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
Robert Olssonfd966252005-12-22 11:25:10 -080042 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
Robert Olsson19baf832005-06-21 12:43:18 -070049 */
50
Jens Låås80b71b82009-08-28 23:57:15 -070051#define VERSION "0.409"
Robert Olsson19baf832005-06-21 12:43:18 -070052
Robert Olsson19baf832005-06-21 12:43:18 -070053#include <asm/uaccess.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070054#include <linux/bitops.h>
Robert Olsson19baf832005-06-21 12:43:18 -070055#include <linux/types.h>
56#include <linux/kernel.h>
Robert Olsson19baf832005-06-21 12:43:18 -070057#include <linux/mm.h>
58#include <linux/string.h>
59#include <linux/socket.h>
60#include <linux/sockios.h>
61#include <linux/errno.h>
62#include <linux/in.h>
63#include <linux/inet.h>
Stephen Hemmingercd8787a2006-01-03 14:38:34 -080064#include <linux/inetdevice.h>
Robert Olsson19baf832005-06-21 12:43:18 -070065#include <linux/netdevice.h>
66#include <linux/if_arp.h>
67#include <linux/proc_fs.h>
Robert Olsson2373ce12005-08-25 13:01:29 -070068#include <linux/rcupdate.h>
Robert Olsson19baf832005-06-21 12:43:18 -070069#include <linux/skbuff.h>
70#include <linux/netlink.h>
71#include <linux/init.h>
72#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040074#include <linux/export.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020075#include <net/net_namespace.h>
Robert Olsson19baf832005-06-21 12:43:18 -070076#include <net/ip.h>
77#include <net/protocol.h>
78#include <net/route.h>
79#include <net/tcp.h>
80#include <net/sock.h>
81#include <net/ip_fib.h>
82#include "fib_lookup.h"
83
Robert Olsson06ef9212006-03-20 21:35:01 -080084#define MAX_STAT_DEPTH 32
Robert Olsson19baf832005-06-21 12:43:18 -070085
Robert Olsson19baf832005-06-21 12:43:18 -070086#define KEYLENGTH (8*sizeof(t_key))
Robert Olsson19baf832005-06-21 12:43:18 -070087
Robert Olsson19baf832005-06-21 12:43:18 -070088typedef unsigned int t_key;
89
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080090#define IS_TNODE(n) ((n)->bits)
91#define IS_LEAF(n) (!(n)->bits)
Robert Olsson2373ce12005-08-25 13:01:29 -070092
Alexander Duycke9b44012014-12-31 10:56:12 -080093#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
Alexander Duyck9f9e6362014-12-31 10:55:54 -080094
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080095struct tnode {
96 t_key key;
97 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
98 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
99 struct tnode __rcu *parent;
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800100 struct rcu_head rcu;
Alexander Duyckadaf9812014-12-31 10:55:47 -0800101 union {
102 /* The fields in this struct are valid if bits > 0 (TNODE) */
103 struct {
104 unsigned int full_children; /* KEYLENGTH bits needed */
105 unsigned int empty_children; /* KEYLENGTH bits needed */
106 struct tnode __rcu *child[0];
107 };
108 /* This list pointer if valid if bits == 0 (LEAF) */
109 struct hlist_head list;
110 };
Robert Olsson19baf832005-06-21 12:43:18 -0700111};
112
113struct leaf_info {
114 struct hlist_node hlist;
115 int plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000116 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
Robert Olsson19baf832005-06-21 12:43:18 -0700117 struct list_head falh;
Eric Dumazet5c745012011-07-18 03:16:33 +0000118 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700119};
120
Robert Olsson19baf832005-06-21 12:43:18 -0700121#ifdef CONFIG_IP_FIB_TRIE_STATS
122struct trie_use_stats {
123 unsigned int gets;
124 unsigned int backtrack;
125 unsigned int semantic_match_passed;
126 unsigned int semantic_match_miss;
127 unsigned int null_node_hit;
Robert Olsson2f368952005-07-05 15:02:40 -0700128 unsigned int resize_node_skipped;
Robert Olsson19baf832005-06-21 12:43:18 -0700129};
130#endif
131
132struct trie_stat {
133 unsigned int totdepth;
134 unsigned int maxdepth;
135 unsigned int tnodes;
136 unsigned int leaves;
137 unsigned int nullpointers;
Stephen Hemminger93672292008-01-22 21:54:05 -0800138 unsigned int prefixes;
Robert Olsson06ef9212006-03-20 21:35:01 -0800139 unsigned int nodesizes[MAX_STAT_DEPTH];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700140};
Robert Olsson19baf832005-06-21 12:43:18 -0700141
142struct trie {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800143 struct tnode __rcu *trie;
Robert Olsson19baf832005-06-21 12:43:18 -0700144#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800145 struct trie_use_stats __percpu *stats;
Robert Olsson19baf832005-06-21 12:43:18 -0700146#endif
Robert Olsson19baf832005-06-21 12:43:18 -0700147};
148
Alexander Duyckadaf9812014-12-31 10:55:47 -0800149static void tnode_put_child_reorg(struct tnode *tn, int i, struct tnode *n,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800150 int wasfull);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800151static struct tnode *resize(struct trie *t, struct tnode *tn);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700152static struct tnode *inflate(struct trie *t, struct tnode *tn);
153static struct tnode *halve(struct trie *t, struct tnode *tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700154/* tnodes to free after resize(); protected by RTNL */
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800155static struct callback_head *tnode_free_head;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000156static size_t tnode_free_size;
157
158/*
159 * synchronize_rcu after call_rcu for that many pages; it should be especially
160 * useful before resizing the root node with PREEMPT_NONE configs; the value was
161 * obtained experimentally, aiming to avoid visible slowdown.
162 */
163static const int sync_pages = 128;
Robert Olsson19baf832005-06-21 12:43:18 -0700164
Christoph Lametere18b8902006-12-06 20:33:20 -0800165static struct kmem_cache *fn_alias_kmem __read_mostly;
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800166static struct kmem_cache *trie_leaf_kmem __read_mostly;
Robert Olsson19baf832005-06-21 12:43:18 -0700167
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800168/* caller must hold RTNL */
169#define node_parent(n) rtnl_dereference((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700170
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800171/* caller must hold RCU read lock or RTNL */
172#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700173
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800174/* wrapper for rcu_assign_pointer */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800175static inline void node_set_parent(struct tnode *n, struct tnode *tp)
Stephen Hemminger06801912007-08-10 15:22:13 -0700176{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800177 if (n)
178 rcu_assign_pointer(n->parent, tp);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800179}
180
181#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
182
183/* This provides us with the number of children in this node, in the case of a
184 * leaf this will return 0 meaning none of the children are accessible.
185 */
186static inline int tnode_child_length(const struct tnode *tn)
187{
188 return (1ul << tn->bits) & ~(1ul);
Stephen Hemminger06801912007-08-10 15:22:13 -0700189}
Robert Olsson2373ce12005-08-25 13:01:29 -0700190
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700191/*
192 * caller must hold RTNL
193 */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800194static inline struct tnode *tnode_get_child(const struct tnode *tn, unsigned int i)
Robert Olsson19baf832005-06-21 12:43:18 -0700195{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800196 BUG_ON(i >= tnode_child_length(tn));
Robert Olsson19baf832005-06-21 12:43:18 -0700197
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700198 return rtnl_dereference(tn->child[i]);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800199}
200
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700201/*
202 * caller must hold RCU read lock or RTNL
203 */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800204static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800205{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800206 BUG_ON(i >= tnode_child_length(tn));
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800207
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700208 return rcu_dereference_rtnl(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700209}
210
Alexander Duycke9b44012014-12-31 10:56:12 -0800211/* To understand this stuff, an understanding of keys and all their bits is
212 * necessary. Every node in the trie has a key associated with it, but not
213 * all of the bits in that key are significant.
214 *
215 * Consider a node 'n' and its parent 'tp'.
216 *
217 * If n is a leaf, every bit in its key is significant. Its presence is
218 * necessitated by path compression, since during a tree traversal (when
219 * searching for a leaf - unless we are doing an insertion) we will completely
220 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
221 * a potentially successful search, that we have indeed been walking the
222 * correct key path.
223 *
224 * Note that we can never "miss" the correct key in the tree if present by
225 * following the wrong path. Path compression ensures that segments of the key
226 * that are the same for all keys with a given prefix are skipped, but the
227 * skipped part *is* identical for each node in the subtrie below the skipped
228 * bit! trie_insert() in this implementation takes care of that.
229 *
230 * if n is an internal node - a 'tnode' here, the various parts of its key
231 * have many different meanings.
232 *
233 * Example:
234 * _________________________________________________________________
235 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
236 * -----------------------------------------------------------------
237 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
238 *
239 * _________________________________________________________________
240 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
241 * -----------------------------------------------------------------
242 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
243 *
244 * tp->pos = 22
245 * tp->bits = 3
246 * n->pos = 13
247 * n->bits = 4
248 *
249 * First, let's just ignore the bits that come before the parent tp, that is
250 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
251 * point we do not use them for anything.
252 *
253 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
254 * index into the parent's child array. That is, they will be used to find
255 * 'n' among tp's children.
256 *
257 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
258 * for the node n.
259 *
260 * All the bits we have seen so far are significant to the node n. The rest
261 * of the bits are really not needed or indeed known in n->key.
262 *
263 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
264 * n's child array, and will of course be different for each child.
265 *
266 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
267 * at this point.
268 */
Robert Olsson19baf832005-06-21 12:43:18 -0700269
Denis V. Lunevf5026fa2007-12-13 09:47:57 -0800270static const int halve_threshold = 25;
271static const int inflate_threshold = 50;
Jarek Poplawski345aa032009-07-07 19:39:16 -0700272static const int halve_threshold_root = 15;
Jens Låås80b71b82009-08-28 23:57:15 -0700273static const int inflate_threshold_root = 30;
Robert Olsson2373ce12005-08-25 13:01:29 -0700274
275static void __alias_free_mem(struct rcu_head *head)
276{
277 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
278 kmem_cache_free(fn_alias_kmem, fa);
279}
280
281static inline void alias_free_mem_rcu(struct fib_alias *fa)
282{
283 call_rcu(&fa->rcu, __alias_free_mem);
284}
285
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800286#define TNODE_KMALLOC_MAX \
Alexander Duyckadaf9812014-12-31 10:55:47 -0800287 ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800288
289static void __node_free_rcu(struct rcu_head *head)
Robert Olsson2373ce12005-08-25 13:01:29 -0700290{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800291 struct tnode *n = container_of(head, struct tnode, rcu);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800292
293 if (IS_LEAF(n))
294 kmem_cache_free(trie_leaf_kmem, n);
295 else if (n->bits <= TNODE_KMALLOC_MAX)
296 kfree(n);
297 else
298 vfree(n);
Robert Olsson2373ce12005-08-25 13:01:29 -0700299}
300
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800301#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
Stephen Hemminger387a5482008-04-10 03:47:34 -0700302
Robert Olsson2373ce12005-08-25 13:01:29 -0700303static inline void free_leaf_info(struct leaf_info *leaf)
304{
Lai Jiangshanbceb0f42011-03-18 11:42:34 +0800305 kfree_rcu(leaf, rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700306}
307
Eric Dumazet8d965442008-01-13 22:31:44 -0800308static struct tnode *tnode_alloc(size_t size)
Robert Olsson2373ce12005-08-25 13:01:29 -0700309{
Robert Olsson2373ce12005-08-25 13:01:29 -0700310 if (size <= PAGE_SIZE)
Eric Dumazet8d965442008-01-13 22:31:44 -0800311 return kzalloc(size, GFP_KERNEL);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700312 else
Eric Dumazet7a1c8e52010-11-20 07:46:35 +0000313 return vzalloc(size);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700314}
Robert Olsson2373ce12005-08-25 13:01:29 -0700315
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700316static void tnode_free_safe(struct tnode *tn)
317{
318 BUG_ON(IS_LEAF(tn));
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800319 tn->rcu.next = tnode_free_head;
320 tnode_free_head = &tn->rcu;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700321}
322
323static void tnode_free_flush(void)
324{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800325 struct callback_head *head;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700326
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800327 while ((head = tnode_free_head)) {
328 struct tnode *tn = container_of(head, struct tnode, rcu);
329
330 tnode_free_head = head->next;
331 tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
332
333 node_free(tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700334 }
Jarek Poplawskic3059472009-07-14 08:33:08 +0000335
336 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
337 tnode_free_size = 0;
338 synchronize_rcu();
339 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700340}
341
Alexander Duyckadaf9812014-12-31 10:55:47 -0800342static struct tnode *leaf_new(t_key key)
Robert Olsson19baf832005-06-21 12:43:18 -0700343{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800344 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700345 if (l) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800346 l->parent = NULL;
347 /* set key and pos to reflect full key value
348 * any trailing zeros in the key should be ignored
349 * as the nodes are searched
350 */
351 l->key = key;
Alexander Duycke9b44012014-12-31 10:56:12 -0800352 l->pos = 0;
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800353 /* set bits to 0 indicating we are not a tnode */
354 l->bits = 0;
355
Robert Olsson19baf832005-06-21 12:43:18 -0700356 INIT_HLIST_HEAD(&l->list);
357 }
358 return l;
359}
360
361static struct leaf_info *leaf_info_new(int plen)
362{
363 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -0700364 if (li) {
365 li->plen = plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000366 li->mask_plen = ntohl(inet_make_mask(plen));
Robert Olsson2373ce12005-08-25 13:01:29 -0700367 INIT_LIST_HEAD(&li->falh);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700368 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700369 return li;
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700370}
371
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800372static struct tnode *tnode_new(t_key key, int pos, int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700373{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800374 size_t sz = offsetof(struct tnode, child[1 << bits]);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700375 struct tnode *tn = tnode_alloc(sz);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800376 unsigned int shift = pos + bits;
377
378 /* verify bits and pos their msb bits clear and values are valid */
379 BUG_ON(!bits || (shift > KEYLENGTH));
Robert Olsson19baf832005-06-21 12:43:18 -0700380
Olof Johansson91b9a272005-08-09 20:24:39 -0700381 if (tn) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800382 tn->parent = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700383 tn->pos = pos;
384 tn->bits = bits;
Alexander Duycke9b44012014-12-31 10:56:12 -0800385 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
Robert Olsson19baf832005-06-21 12:43:18 -0700386 tn->full_children = 0;
387 tn->empty_children = 1<<bits;
388 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700389
Eric Dumazeta034ee32010-09-09 23:32:28 +0000390 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
Alexander Duyckadaf9812014-12-31 10:55:47 -0800391 sizeof(struct tnode *) << bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700392 return tn;
393}
394
Alexander Duycke9b44012014-12-31 10:56:12 -0800395/* Check whether a tnode 'n' is "full", i.e. it is an internal node
Robert Olsson19baf832005-06-21 12:43:18 -0700396 * and no bits are skipped. See discussion in dyntree paper p. 6
397 */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800398static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700399{
Alexander Duycke9b44012014-12-31 10:56:12 -0800400 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
Robert Olsson19baf832005-06-21 12:43:18 -0700401}
402
Lin Ming61648d92012-07-29 02:00:03 +0000403static inline void put_child(struct tnode *tn, int i,
Alexander Duyckadaf9812014-12-31 10:55:47 -0800404 struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700405{
406 tnode_put_child_reorg(tn, i, n, -1);
407}
408
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700409 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700410 * Add a child at position i overwriting the old value.
411 * Update the value of full_children and empty_children.
412 */
413
Alexander Duyckadaf9812014-12-31 10:55:47 -0800414static void tnode_put_child_reorg(struct tnode *tn, int i, struct tnode *n,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800415 int wasfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700416{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800417 struct tnode *chi = rtnl_dereference(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700418 int isfull;
419
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700420 BUG_ON(i >= 1<<tn->bits);
421
Robert Olsson19baf832005-06-21 12:43:18 -0700422 /* update emptyChildren */
423 if (n == NULL && chi != NULL)
424 tn->empty_children++;
425 else if (n != NULL && chi == NULL)
426 tn->empty_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700427
Robert Olsson19baf832005-06-21 12:43:18 -0700428 /* update fullChildren */
Olof Johansson91b9a272005-08-09 20:24:39 -0700429 if (wasfull == -1)
Robert Olsson19baf832005-06-21 12:43:18 -0700430 wasfull = tnode_full(tn, chi);
431
432 isfull = tnode_full(tn, n);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700433 if (wasfull && !isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700434 tn->full_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700435 else if (!wasfull && isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700436 tn->full_children++;
Olof Johansson91b9a272005-08-09 20:24:39 -0700437
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800438 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700439
Eric Dumazetcf778b02012-01-12 04:41:32 +0000440 rcu_assign_pointer(tn->child[i], n);
Robert Olsson19baf832005-06-21 12:43:18 -0700441}
442
Alexander Duyck836a0122014-12-31 10:56:06 -0800443static void put_child_root(struct tnode *tp, struct trie *t,
444 t_key key, struct tnode *n)
445{
446 if (tp)
447 put_child(tp, get_index(key, tp), n);
448 else
449 rcu_assign_pointer(t->trie, n);
450}
451
Jens Låås80b71b82009-08-28 23:57:15 -0700452#define MAX_WORK 10
Alexander Duyckadaf9812014-12-31 10:55:47 -0800453static struct tnode *resize(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700454{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800455 struct tnode *old_tn, *n = NULL;
Robert Olssone6308be2005-10-04 13:01:58 -0700456 int inflate_threshold_use;
457 int halve_threshold_use;
Jens Låås80b71b82009-08-28 23:57:15 -0700458 int max_work;
Robert Olsson19baf832005-06-21 12:43:18 -0700459
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900460 if (!tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700461 return NULL;
462
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700463 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
464 tn, inflate_threshold, halve_threshold);
Robert Olsson19baf832005-06-21 12:43:18 -0700465
466 /* No children */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800467 if (tn->empty_children > (tnode_child_length(tn) - 1))
468 goto no_children;
469
Robert Olsson19baf832005-06-21 12:43:18 -0700470 /* One child */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800471 if (tn->empty_children == (tnode_child_length(tn) - 1))
Jens Låås80b71b82009-08-28 23:57:15 -0700472 goto one_child;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700473 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700474 * Double as long as the resulting node has a number of
475 * nonempty nodes that are above the threshold.
476 */
477
478 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700479 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
480 * the Helsinki University of Technology and Matti Tikkanen of Nokia
Robert Olsson19baf832005-06-21 12:43:18 -0700481 * Telecommunications, page 6:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700482 * "A node is doubled if the ratio of non-empty children to all
Robert Olsson19baf832005-06-21 12:43:18 -0700483 * children in the *doubled* node is at least 'high'."
484 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700485 * 'high' in this instance is the variable 'inflate_threshold'. It
486 * is expressed as a percentage, so we multiply it with
487 * tnode_child_length() and instead of multiplying by 2 (since the
488 * child array will be doubled by inflate()) and multiplying
489 * the left-hand side by 100 (to handle the percentage thing) we
Robert Olsson19baf832005-06-21 12:43:18 -0700490 * multiply the left-hand side by 50.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700491 *
492 * The left-hand side may look a bit weird: tnode_child_length(tn)
493 * - tn->empty_children is of course the number of non-null children
494 * in the current node. tn->full_children is the number of "full"
Robert Olsson19baf832005-06-21 12:43:18 -0700495 * children, that is non-null tnodes with a skip value of 0.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700496 * All of those will be doubled in the resulting inflated tnode, so
Robert Olsson19baf832005-06-21 12:43:18 -0700497 * we just count them one extra time here.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700498 *
Robert Olsson19baf832005-06-21 12:43:18 -0700499 * A clearer way to write this would be:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700500 *
Robert Olsson19baf832005-06-21 12:43:18 -0700501 * to_be_doubled = tn->full_children;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700502 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
Robert Olsson19baf832005-06-21 12:43:18 -0700503 * tn->full_children;
504 *
505 * new_child_length = tnode_child_length(tn) * 2;
506 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700507 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
Robert Olsson19baf832005-06-21 12:43:18 -0700508 * new_child_length;
509 * if (new_fill_factor >= inflate_threshold)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700510 *
511 * ...and so on, tho it would mess up the while () loop.
512 *
Robert Olsson19baf832005-06-21 12:43:18 -0700513 * anyway,
514 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
515 * inflate_threshold
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700516 *
Robert Olsson19baf832005-06-21 12:43:18 -0700517 * avoid a division:
518 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
519 * inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700520 *
Robert Olsson19baf832005-06-21 12:43:18 -0700521 * expand not_to_be_doubled and to_be_doubled, and shorten:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700522 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700523 * tn->full_children) >= inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700524 *
Robert Olsson19baf832005-06-21 12:43:18 -0700525 * expand new_child_length:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700526 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700527 * tn->full_children) >=
Robert Olsson19baf832005-06-21 12:43:18 -0700528 * inflate_threshold * tnode_child_length(tn) * 2
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700529 *
Robert Olsson19baf832005-06-21 12:43:18 -0700530 * shorten again:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700531 * 50 * (tn->full_children + tnode_child_length(tn) -
Olof Johansson91b9a272005-08-09 20:24:39 -0700532 * tn->empty_children) >= inflate_threshold *
Robert Olsson19baf832005-06-21 12:43:18 -0700533 * tnode_child_length(tn)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700534 *
Robert Olsson19baf832005-06-21 12:43:18 -0700535 */
536
Robert Olssone6308be2005-10-04 13:01:58 -0700537 /* Keep root node larger */
538
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800539 if (!node_parent(tn)) {
Jens Låås80b71b82009-08-28 23:57:15 -0700540 inflate_threshold_use = inflate_threshold_root;
541 halve_threshold_use = halve_threshold_root;
Eric Dumazeta034ee32010-09-09 23:32:28 +0000542 } else {
Robert Olssone6308be2005-10-04 13:01:58 -0700543 inflate_threshold_use = inflate_threshold;
Jens Låås80b71b82009-08-28 23:57:15 -0700544 halve_threshold_use = halve_threshold;
545 }
Robert Olssone6308be2005-10-04 13:01:58 -0700546
Jens Låås80b71b82009-08-28 23:57:15 -0700547 max_work = MAX_WORK;
548 while ((tn->full_children > 0 && max_work-- &&
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800549 50 * (tn->full_children + tnode_child_length(tn)
550 - tn->empty_children)
551 >= inflate_threshold_use * tnode_child_length(tn))) {
Robert Olsson19baf832005-06-21 12:43:18 -0700552
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700553 old_tn = tn;
554 tn = inflate(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800555
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700556 if (IS_ERR(tn)) {
557 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700558#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800559 this_cpu_inc(t->stats->resize_node_skipped);
Robert Olsson2f368952005-07-05 15:02:40 -0700560#endif
561 break;
562 }
Robert Olsson19baf832005-06-21 12:43:18 -0700563 }
564
Jens Låås80b71b82009-08-28 23:57:15 -0700565 /* Return if at least one inflate is run */
Eric Dumazeta034ee32010-09-09 23:32:28 +0000566 if (max_work != MAX_WORK)
Alexander Duyckadaf9812014-12-31 10:55:47 -0800567 return tn;
Jens Låås80b71b82009-08-28 23:57:15 -0700568
Robert Olsson19baf832005-06-21 12:43:18 -0700569 /*
570 * Halve as long as the number of empty children in this
571 * node is above threshold.
572 */
Robert Olsson2f368952005-07-05 15:02:40 -0700573
Jens Låås80b71b82009-08-28 23:57:15 -0700574 max_work = MAX_WORK;
575 while (tn->bits > 1 && max_work-- &&
Robert Olsson19baf832005-06-21 12:43:18 -0700576 100 * (tnode_child_length(tn) - tn->empty_children) <
Robert Olssone6308be2005-10-04 13:01:58 -0700577 halve_threshold_use * tnode_child_length(tn)) {
Robert Olsson19baf832005-06-21 12:43:18 -0700578
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700579 old_tn = tn;
580 tn = halve(t, tn);
581 if (IS_ERR(tn)) {
582 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700583#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800584 this_cpu_inc(t->stats->resize_node_skipped);
Robert Olsson2f368952005-07-05 15:02:40 -0700585#endif
586 break;
587 }
588 }
589
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700590
Robert Olsson19baf832005-06-21 12:43:18 -0700591 /* Only one child remains */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800592 if (tn->empty_children == (tnode_child_length(tn) - 1)) {
593 unsigned long i;
Jens Låås80b71b82009-08-28 23:57:15 -0700594one_child:
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800595 for (i = tnode_child_length(tn); !n && i;)
596 n = tnode_get_child(tn, --i);
597no_children:
598 /* compress one level */
599 node_set_parent(n, NULL);
600 tnode_free_safe(tn);
601 return n;
Jens Låås80b71b82009-08-28 23:57:15 -0700602 }
Alexander Duyckadaf9812014-12-31 10:55:47 -0800603 return tn;
Robert Olsson19baf832005-06-21 12:43:18 -0700604}
605
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700606
607static void tnode_clean_free(struct tnode *tn)
608{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800609 struct tnode *tofree;
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700610 int i;
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700611
612 for (i = 0; i < tnode_child_length(tn); i++) {
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800613 tofree = rtnl_dereference(tn->child[i]);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700614 if (tofree)
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800615 node_free(tofree);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700616 }
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800617 node_free(tn);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700618}
619
Alexander Duyckadaf9812014-12-31 10:55:47 -0800620static struct tnode *inflate(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700621{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800622 int olen = tnode_child_length(oldtnode);
623 struct tnode *tn;
Alexander Duycke9b44012014-12-31 10:56:12 -0800624 t_key m;
Robert Olsson19baf832005-06-21 12:43:18 -0700625 int i;
626
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700627 pr_debug("In inflate\n");
Robert Olsson19baf832005-06-21 12:43:18 -0700628
Alexander Duycke9b44012014-12-31 10:56:12 -0800629 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700630
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700631 if (!tn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700632 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700633
634 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700635 * Preallocate and store tnodes before the actual work so we
636 * don't get into an inconsistent state if memory allocation
637 * fails. In case of failure we return the oldnode and inflate
Robert Olsson2f368952005-07-05 15:02:40 -0700638 * of tnode is ignored.
639 */
Alexander Duycke9b44012014-12-31 10:56:12 -0800640 for (i = 0, m = 1u << tn->pos; i < olen; i++) {
641 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700642
Alexander Duycke9b44012014-12-31 10:56:12 -0800643 if (tnode_full(oldtnode, inode) && (inode->bits > 1)) {
Robert Olsson2f368952005-07-05 15:02:40 -0700644 struct tnode *left, *right;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700645
Alexander Duycke9b44012014-12-31 10:56:12 -0800646 left = tnode_new(inode->key & ~m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700647 inode->bits - 1);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700648 if (!left)
649 goto nomem;
Olof Johansson91b9a272005-08-09 20:24:39 -0700650
Alexander Duycke9b44012014-12-31 10:56:12 -0800651 right = tnode_new(inode->key | m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700652 inode->bits - 1);
653
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900654 if (!right) {
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800655 node_free(left);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700656 goto nomem;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900657 }
Robert Olsson2f368952005-07-05 15:02:40 -0700658
Alexander Duyckadaf9812014-12-31 10:55:47 -0800659 put_child(tn, 2*i, left);
660 put_child(tn, 2*i+1, right);
Robert Olsson2f368952005-07-05 15:02:40 -0700661 }
662 }
663
Olof Johansson91b9a272005-08-09 20:24:39 -0700664 for (i = 0; i < olen; i++) {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800665 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700666 struct tnode *left, *right;
667 int size, j;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700668
Robert Olsson19baf832005-06-21 12:43:18 -0700669 /* An empty child */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800670 if (inode == NULL)
Robert Olsson19baf832005-06-21 12:43:18 -0700671 continue;
672
673 /* A leaf or an internal node with skipped bits */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800674 if (!tnode_full(oldtnode, inode)) {
Alexander Duycke9b44012014-12-31 10:56:12 -0800675 put_child(tn, get_index(inode->key, tn), inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700676 continue;
677 }
678
679 /* An internal node with two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700680 if (inode->bits == 1) {
Lin Ming61648d92012-07-29 02:00:03 +0000681 put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
682 put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
Robert Olsson19baf832005-06-21 12:43:18 -0700683
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700684 tnode_free_safe(inode);
Olof Johansson91b9a272005-08-09 20:24:39 -0700685 continue;
Robert Olsson19baf832005-06-21 12:43:18 -0700686 }
687
Olof Johansson91b9a272005-08-09 20:24:39 -0700688 /* An internal node with more than two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700689
Olof Johansson91b9a272005-08-09 20:24:39 -0700690 /* We will replace this node 'inode' with two new
691 * ones, 'left' and 'right', each with half of the
692 * original children. The two new nodes will have
693 * a position one bit further down the key and this
694 * means that the "significant" part of their keys
695 * (see the discussion near the top of this file)
696 * will differ by one bit, which will be "0" in
697 * left's key and "1" in right's key. Since we are
698 * moving the key position by one step, the bit that
699 * we are moving away from - the bit at position
700 * (inode->pos) - is the one that will differ between
701 * left and right. So... we synthesize that bit in the
702 * two new keys.
703 * The mask 'm' below will be a single "one" bit at
704 * the position (inode->pos)
705 */
Robert Olsson19baf832005-06-21 12:43:18 -0700706
Olof Johansson91b9a272005-08-09 20:24:39 -0700707 /* Use the old key, but set the new significant
708 * bit to zero.
709 */
Robert Olsson19baf832005-06-21 12:43:18 -0700710
Alexander Duyckadaf9812014-12-31 10:55:47 -0800711 left = tnode_get_child(tn, 2*i);
Lin Ming61648d92012-07-29 02:00:03 +0000712 put_child(tn, 2*i, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -0700713
Olof Johansson91b9a272005-08-09 20:24:39 -0700714 BUG_ON(!left);
Robert Olsson2f368952005-07-05 15:02:40 -0700715
Alexander Duyckadaf9812014-12-31 10:55:47 -0800716 right = tnode_get_child(tn, 2*i+1);
Lin Ming61648d92012-07-29 02:00:03 +0000717 put_child(tn, 2*i+1, NULL);
Robert Olsson2f368952005-07-05 15:02:40 -0700718
Olof Johansson91b9a272005-08-09 20:24:39 -0700719 BUG_ON(!right);
Robert Olsson2f368952005-07-05 15:02:40 -0700720
Olof Johansson91b9a272005-08-09 20:24:39 -0700721 size = tnode_child_length(left);
722 for (j = 0; j < size; j++) {
Lin Ming61648d92012-07-29 02:00:03 +0000723 put_child(left, j, rtnl_dereference(inode->child[j]));
724 put_child(right, j, rtnl_dereference(inode->child[j + size]));
Robert Olsson19baf832005-06-21 12:43:18 -0700725 }
Lin Ming61648d92012-07-29 02:00:03 +0000726 put_child(tn, 2*i, resize(t, left));
727 put_child(tn, 2*i+1, resize(t, right));
Olof Johansson91b9a272005-08-09 20:24:39 -0700728
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700729 tnode_free_safe(inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700730 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700731 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700732 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700733nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700734 tnode_clean_free(tn);
735 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700736}
737
Alexander Duyckadaf9812014-12-31 10:55:47 -0800738static struct tnode *halve(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700739{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800740 int olen = tnode_child_length(oldtnode);
741 struct tnode *tn, *left, *right;
Robert Olsson19baf832005-06-21 12:43:18 -0700742 int i;
Robert Olsson19baf832005-06-21 12:43:18 -0700743
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700744 pr_debug("In halve\n");
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700745
Alexander Duycke9b44012014-12-31 10:56:12 -0800746 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700747
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700748 if (!tn)
749 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700750
751 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700752 * Preallocate and store tnodes before the actual work so we
753 * don't get into an inconsistent state if memory allocation
754 * fails. In case of failure we return the oldnode and halve
Robert Olsson2f368952005-07-05 15:02:40 -0700755 * of tnode is ignored.
756 */
757
Olof Johansson91b9a272005-08-09 20:24:39 -0700758 for (i = 0; i < olen; i += 2) {
Robert Olsson2f368952005-07-05 15:02:40 -0700759 left = tnode_get_child(oldtnode, i);
760 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700761
Robert Olsson2f368952005-07-05 15:02:40 -0700762 /* Two nonempty children */
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700763 if (left && right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700764 struct tnode *newn;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700765
Alexander Duycke9b44012014-12-31 10:56:12 -0800766 newn = tnode_new(left->key, oldtnode->pos, 1);
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700767
768 if (!newn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700769 goto nomem;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700770
Alexander Duyckadaf9812014-12-31 10:55:47 -0800771 put_child(tn, i/2, newn);
Robert Olsson2f368952005-07-05 15:02:40 -0700772 }
Robert Olsson2f368952005-07-05 15:02:40 -0700773
Robert Olsson2f368952005-07-05 15:02:40 -0700774 }
Robert Olsson19baf832005-06-21 12:43:18 -0700775
Olof Johansson91b9a272005-08-09 20:24:39 -0700776 for (i = 0; i < olen; i += 2) {
777 struct tnode *newBinNode;
778
Robert Olsson19baf832005-06-21 12:43:18 -0700779 left = tnode_get_child(oldtnode, i);
780 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700781
Robert Olsson19baf832005-06-21 12:43:18 -0700782 /* At least one of the children is empty */
783 if (left == NULL) {
784 if (right == NULL) /* Both are empty */
785 continue;
Lin Ming61648d92012-07-29 02:00:03 +0000786 put_child(tn, i/2, right);
Olof Johansson91b9a272005-08-09 20:24:39 -0700787 continue;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700788 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700789
790 if (right == NULL) {
Lin Ming61648d92012-07-29 02:00:03 +0000791 put_child(tn, i/2, left);
Olof Johansson91b9a272005-08-09 20:24:39 -0700792 continue;
793 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700794
Robert Olsson19baf832005-06-21 12:43:18 -0700795 /* Two nonempty children */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800796 newBinNode = tnode_get_child(tn, i/2);
Lin Ming61648d92012-07-29 02:00:03 +0000797 put_child(tn, i/2, NULL);
798 put_child(newBinNode, 0, left);
799 put_child(newBinNode, 1, right);
800 put_child(tn, i/2, resize(t, newBinNode));
Robert Olsson19baf832005-06-21 12:43:18 -0700801 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700802 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700803 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700804nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700805 tnode_clean_free(tn);
806 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700807}
808
Robert Olsson772cb712005-09-19 15:31:18 -0700809/* readside must use rcu_read_lock currently dump routines
Robert Olsson2373ce12005-08-25 13:01:29 -0700810 via get_fa_head and dump */
811
Alexander Duyckadaf9812014-12-31 10:55:47 -0800812static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700813{
Robert Olsson772cb712005-09-19 15:31:18 -0700814 struct hlist_head *head = &l->list;
Robert Olsson19baf832005-06-21 12:43:18 -0700815 struct leaf_info *li;
816
Sasha Levinb67bfe02013-02-27 17:06:00 -0800817 hlist_for_each_entry_rcu(li, head, hlist)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700818 if (li->plen == plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700819 return li;
Olof Johansson91b9a272005-08-09 20:24:39 -0700820
Robert Olsson19baf832005-06-21 12:43:18 -0700821 return NULL;
822}
823
Alexander Duyckadaf9812014-12-31 10:55:47 -0800824static inline struct list_head *get_fa_head(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700825{
Robert Olsson772cb712005-09-19 15:31:18 -0700826 struct leaf_info *li = find_leaf_info(l, plen);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700827
Olof Johansson91b9a272005-08-09 20:24:39 -0700828 if (!li)
829 return NULL;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700830
Olof Johansson91b9a272005-08-09 20:24:39 -0700831 return &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -0700832}
833
834static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
835{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900836 struct leaf_info *li = NULL, *last = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700837
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900838 if (hlist_empty(head)) {
839 hlist_add_head_rcu(&new->hlist, head);
840 } else {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800841 hlist_for_each_entry(li, head, hlist) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900842 if (new->plen > li->plen)
843 break;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700844
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900845 last = li;
846 }
847 if (last)
Ken Helias1d023282014-08-06 16:09:16 -0700848 hlist_add_behind_rcu(&new->hlist, &last->hlist);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900849 else
850 hlist_add_before_rcu(&new->hlist, &li->hlist);
851 }
Robert Olsson19baf832005-06-21 12:43:18 -0700852}
853
Robert Olsson2373ce12005-08-25 13:01:29 -0700854/* rcu_read_lock needs to be hold by caller from readside */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800855static struct tnode *fib_find_node(struct trie *t, u32 key)
Robert Olsson19baf832005-06-21 12:43:18 -0700856{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800857 struct tnode *n = rcu_dereference_rtnl(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700858
Alexander Duyck939afb02014-12-31 10:56:00 -0800859 while (n) {
860 unsigned long index = get_index(key, n);
861
862 /* This bit of code is a bit tricky but it combines multiple
863 * checks into a single check. The prefix consists of the
864 * prefix plus zeros for the bits in the cindex. The index
865 * is the difference between the key and this value. From
866 * this we can actually derive several pieces of data.
867 * if !(index >> bits)
868 * we know the value is cindex
869 * else
870 * we have a mismatch in skip bits and failed
871 */
872 if (index >> n->bits)
873 return NULL;
874
875 /* we have found a leaf. Prefixes have already been compared */
876 if (IS_LEAF(n))
Robert Olsson19baf832005-06-21 12:43:18 -0700877 break;
Alexander Duyck939afb02014-12-31 10:56:00 -0800878
879 n = rcu_dereference_rtnl(n->child[index]);
Robert Olsson19baf832005-06-21 12:43:18 -0700880 }
Robert Olsson19baf832005-06-21 12:43:18 -0700881
Alexander Duyck939afb02014-12-31 10:56:00 -0800882 return n;
Robert Olsson19baf832005-06-21 12:43:18 -0700883}
884
Jarek Poplawski7b855762009-06-18 00:28:51 -0700885static void trie_rebalance(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700886{
Robert Olsson19baf832005-06-21 12:43:18 -0700887 int wasfull;
Robert Olsson3ed18d72009-05-21 15:20:59 -0700888 t_key cindex, key;
Stephen Hemminger06801912007-08-10 15:22:13 -0700889 struct tnode *tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700890
Robert Olsson3ed18d72009-05-21 15:20:59 -0700891 key = tn->key;
892
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800893 while (tn != NULL && (tp = node_parent(tn)) != NULL) {
Alexander Duycke9b44012014-12-31 10:56:12 -0800894 cindex = get_index(key, tp);
Robert Olsson19baf832005-06-21 12:43:18 -0700895 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
Alexander Duyckadaf9812014-12-31 10:55:47 -0800896 tn = resize(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800897
Alexander Duyckadaf9812014-12-31 10:55:47 -0800898 tnode_put_child_reorg(tp, cindex, tn, wasfull);
Olof Johansson91b9a272005-08-09 20:24:39 -0700899
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800900 tp = node_parent(tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -0700901 if (!tp)
Alexander Duyckadaf9812014-12-31 10:55:47 -0800902 rcu_assign_pointer(t->trie, tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -0700903
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700904 tnode_free_flush();
Stephen Hemminger06801912007-08-10 15:22:13 -0700905 if (!tp)
Robert Olsson19baf832005-06-21 12:43:18 -0700906 break;
Stephen Hemminger06801912007-08-10 15:22:13 -0700907 tn = tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700908 }
Stephen Hemminger06801912007-08-10 15:22:13 -0700909
Robert Olsson19baf832005-06-21 12:43:18 -0700910 /* Handle last (top) tnode */
Jarek Poplawski7b855762009-06-18 00:28:51 -0700911 if (IS_TNODE(tn))
Alexander Duyckadaf9812014-12-31 10:55:47 -0800912 tn = resize(t, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700913
Alexander Duyckadaf9812014-12-31 10:55:47 -0800914 rcu_assign_pointer(t->trie, tn);
Jarek Poplawski7b855762009-06-18 00:28:51 -0700915 tnode_free_flush();
Robert Olsson19baf832005-06-21 12:43:18 -0700916}
917
Robert Olsson2373ce12005-08-25 13:01:29 -0700918/* only used from updater-side */
919
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800920static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700921{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700922 struct list_head *fa_head = NULL;
Alexander Duyck836a0122014-12-31 10:56:06 -0800923 struct tnode *l, *n, *tp = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700924 struct leaf_info *li;
Robert Olsson19baf832005-06-21 12:43:18 -0700925
Alexander Duyck836a0122014-12-31 10:56:06 -0800926 li = leaf_info_new(plen);
927 if (!li)
928 return NULL;
929 fa_head = &li->falh;
930
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700931 n = rtnl_dereference(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700932
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700933 /* If we point to NULL, stop. Either the tree is empty and we should
934 * just put a new leaf in if, or we have reached an empty child slot,
Robert Olsson19baf832005-06-21 12:43:18 -0700935 * and we should just put our new leaf in that.
Robert Olsson19baf832005-06-21 12:43:18 -0700936 *
Alexander Duyck836a0122014-12-31 10:56:06 -0800937 * If we hit a node with a key that does't match then we should stop
938 * and create a new tnode to replace that node and insert ourselves
939 * and the other node into the new tnode.
Robert Olsson19baf832005-06-21 12:43:18 -0700940 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800941 while (n) {
942 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700943
Alexander Duyck836a0122014-12-31 10:56:06 -0800944 /* This bit of code is a bit tricky but it combines multiple
945 * checks into a single check. The prefix consists of the
946 * prefix plus zeros for the "bits" in the prefix. The index
947 * is the difference between the key and this value. From
948 * this we can actually derive several pieces of data.
949 * if !(index >> bits)
950 * we know the value is child index
951 * else
952 * we have a mismatch in skip bits and failed
Robert Olsson19baf832005-06-21 12:43:18 -0700953 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800954 if (index >> n->bits)
955 break;
Robert Olsson19baf832005-06-21 12:43:18 -0700956
Alexander Duyck836a0122014-12-31 10:56:06 -0800957 /* we have found a leaf. Prefixes have already been compared */
958 if (IS_LEAF(n)) {
959 /* Case 1: n is a leaf, and prefixes match*/
960 insert_leaf_info(&n->list, li);
961 return fa_head;
Robert Olsson19baf832005-06-21 12:43:18 -0700962 }
Robert Olsson19baf832005-06-21 12:43:18 -0700963
Alexander Duyck836a0122014-12-31 10:56:06 -0800964 tp = n;
965 n = rcu_dereference_rtnl(n->child[index]);
966 }
967
968 l = leaf_new(key);
969 if (!l) {
970 free_leaf_info(li);
971 return NULL;
972 }
973
974 insert_leaf_info(&l->list, li);
975
976 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
977 *
978 * Add a new tnode here
979 * first tnode need some special handling
980 * leaves us in position for handling as case 3
981 */
982 if (n) {
983 struct tnode *tn;
Alexander Duyck836a0122014-12-31 10:56:06 -0800984
Alexander Duycke9b44012014-12-31 10:56:12 -0800985 tn = tnode_new(key, __fls(key ^ n->key), 1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700986 if (!tn) {
Robert Olssonf835e472005-06-28 15:00:39 -0700987 free_leaf_info(li);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800988 node_free(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800989 return NULL;
Olof Johansson91b9a272005-08-09 20:24:39 -0700990 }
991
Alexander Duyck836a0122014-12-31 10:56:06 -0800992 /* initialize routes out of node */
993 NODE_INIT_PARENT(tn, tp);
994 put_child(tn, get_index(key, tn) ^ 1, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700995
Alexander Duyck836a0122014-12-31 10:56:06 -0800996 /* start adding routes into the node */
997 put_child_root(tp, t, key, tn);
998 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700999
Alexander Duyck836a0122014-12-31 10:56:06 -08001000 /* parent now has a NULL spot where the leaf can go */
Alexander Duycke962f302014-12-10 21:49:22 -08001001 tp = tn;
Robert Olsson19baf832005-06-21 12:43:18 -07001002 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001003
Alexander Duyck836a0122014-12-31 10:56:06 -08001004 /* Case 3: n is NULL, and will just insert a new leaf */
1005 if (tp) {
1006 NODE_INIT_PARENT(l, tp);
1007 put_child(tp, get_index(key, tp), l);
1008 trie_rebalance(t, tp);
1009 } else {
1010 rcu_assign_pointer(t->trie, l);
1011 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001012
Robert Olsson19baf832005-06-21 12:43:18 -07001013 return fa_head;
1014}
1015
Robert Olssond562f1f2007-03-26 14:22:22 -07001016/*
1017 * Caller must hold RTNL.
1018 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001019int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001020{
1021 struct trie *t = (struct trie *) tb->tb_data;
1022 struct fib_alias *fa, *new_fa;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001023 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001024 struct fib_info *fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001025 int plen = cfg->fc_dst_len;
1026 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001027 u32 key, mask;
1028 int err;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001029 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001030
1031 if (plen > 32)
1032 return -EINVAL;
1033
Thomas Graf4e902c52006-08-17 18:14:52 -07001034 key = ntohl(cfg->fc_dst);
Robert Olsson19baf832005-06-21 12:43:18 -07001035
Patrick McHardy2dfe55b2006-08-10 23:08:33 -07001036 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001037
Olof Johansson91b9a272005-08-09 20:24:39 -07001038 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001039
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001040 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001041 return -EINVAL;
1042
1043 key = key & mask;
1044
Thomas Graf4e902c52006-08-17 18:14:52 -07001045 fi = fib_create_info(cfg);
1046 if (IS_ERR(fi)) {
1047 err = PTR_ERR(fi);
Robert Olsson19baf832005-06-21 12:43:18 -07001048 goto err;
Thomas Graf4e902c52006-08-17 18:14:52 -07001049 }
Robert Olsson19baf832005-06-21 12:43:18 -07001050
1051 l = fib_find_node(t, key);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001052 fa = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001053
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001054 if (l) {
Robert Olsson19baf832005-06-21 12:43:18 -07001055 fa_head = get_fa_head(l, plen);
1056 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1057 }
1058
1059 /* Now fa, if non-NULL, points to the first fib alias
1060 * with the same keys [prefix,tos,priority], if such key already
1061 * exists or to the node before which we will insert new one.
1062 *
1063 * If fa is NULL, we will need to allocate a new one and
1064 * insert to the head of f.
1065 *
1066 * If f is NULL, no fib node matched the destination key
1067 * and we need to allocate a new one of those as well.
1068 */
1069
Julian Anastasov936f6f82008-01-28 21:18:06 -08001070 if (fa && fa->fa_tos == tos &&
1071 fa->fa_info->fib_priority == fi->fib_priority) {
1072 struct fib_alias *fa_first, *fa_match;
Robert Olsson19baf832005-06-21 12:43:18 -07001073
1074 err = -EEXIST;
Thomas Graf4e902c52006-08-17 18:14:52 -07001075 if (cfg->fc_nlflags & NLM_F_EXCL)
Robert Olsson19baf832005-06-21 12:43:18 -07001076 goto out;
1077
Julian Anastasov936f6f82008-01-28 21:18:06 -08001078 /* We have 2 goals:
1079 * 1. Find exact match for type, scope, fib_info to avoid
1080 * duplicate routes
1081 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1082 */
1083 fa_match = NULL;
1084 fa_first = fa;
1085 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1086 list_for_each_entry_continue(fa, fa_head, fa_list) {
1087 if (fa->fa_tos != tos)
1088 break;
1089 if (fa->fa_info->fib_priority != fi->fib_priority)
1090 break;
1091 if (fa->fa_type == cfg->fc_type &&
Julian Anastasov936f6f82008-01-28 21:18:06 -08001092 fa->fa_info == fi) {
1093 fa_match = fa;
1094 break;
1095 }
1096 }
1097
Thomas Graf4e902c52006-08-17 18:14:52 -07001098 if (cfg->fc_nlflags & NLM_F_REPLACE) {
Robert Olsson19baf832005-06-21 12:43:18 -07001099 struct fib_info *fi_drop;
1100 u8 state;
1101
Julian Anastasov936f6f82008-01-28 21:18:06 -08001102 fa = fa_first;
1103 if (fa_match) {
1104 if (fa == fa_match)
1105 err = 0;
Joonwoo Park67250332008-01-18 03:45:18 -08001106 goto out;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001107 }
Robert Olsson2373ce12005-08-25 13:01:29 -07001108 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001109 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -07001110 if (new_fa == NULL)
1111 goto out;
Robert Olsson19baf832005-06-21 12:43:18 -07001112
1113 fi_drop = fa->fa_info;
Robert Olsson2373ce12005-08-25 13:01:29 -07001114 new_fa->fa_tos = fa->fa_tos;
1115 new_fa->fa_info = fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001116 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001117 state = fa->fa_state;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001118 new_fa->fa_state = state & ~FA_S_ACCESSED;
Robert Olsson19baf832005-06-21 12:43:18 -07001119
Robert Olsson2373ce12005-08-25 13:01:29 -07001120 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1121 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001122
1123 fib_release_info(fi_drop);
1124 if (state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001125 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Milan Kocianb8f55832007-05-23 14:55:06 -07001126 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1127 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
Robert Olsson19baf832005-06-21 12:43:18 -07001128
Olof Johansson91b9a272005-08-09 20:24:39 -07001129 goto succeeded;
Robert Olsson19baf832005-06-21 12:43:18 -07001130 }
1131 /* Error if we find a perfect match which
1132 * uses the same scope, type, and nexthop
1133 * information.
1134 */
Julian Anastasov936f6f82008-01-28 21:18:06 -08001135 if (fa_match)
1136 goto out;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001137
Thomas Graf4e902c52006-08-17 18:14:52 -07001138 if (!(cfg->fc_nlflags & NLM_F_APPEND))
Julian Anastasov936f6f82008-01-28 21:18:06 -08001139 fa = fa_first;
Robert Olsson19baf832005-06-21 12:43:18 -07001140 }
1141 err = -ENOENT;
Thomas Graf4e902c52006-08-17 18:14:52 -07001142 if (!(cfg->fc_nlflags & NLM_F_CREATE))
Robert Olsson19baf832005-06-21 12:43:18 -07001143 goto out;
1144
1145 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001146 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson19baf832005-06-21 12:43:18 -07001147 if (new_fa == NULL)
1148 goto out;
1149
1150 new_fa->fa_info = fi;
1151 new_fa->fa_tos = tos;
Thomas Graf4e902c52006-08-17 18:14:52 -07001152 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001153 new_fa->fa_state = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001154 /*
1155 * Insert new entry to the list.
1156 */
1157
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001158 if (!fa_head) {
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001159 fa_head = fib_insert_node(t, key, plen);
1160 if (unlikely(!fa_head)) {
1161 err = -ENOMEM;
Robert Olssonf835e472005-06-28 15:00:39 -07001162 goto out_free_new_fa;
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001163 }
Robert Olssonf835e472005-06-28 15:00:39 -07001164 }
Robert Olsson19baf832005-06-21 12:43:18 -07001165
David S. Miller21d8c492011-04-14 14:49:37 -07001166 if (!plen)
1167 tb->tb_num_default++;
1168
Robert Olsson2373ce12005-08-25 13:01:29 -07001169 list_add_tail_rcu(&new_fa->fa_list,
1170 (fa ? &fa->fa_list : fa_head));
Robert Olsson19baf832005-06-21 12:43:18 -07001171
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001172 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Thomas Graf4e902c52006-08-17 18:14:52 -07001173 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001174 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001175succeeded:
1176 return 0;
Robert Olssonf835e472005-06-28 15:00:39 -07001177
1178out_free_new_fa:
1179 kmem_cache_free(fn_alias_kmem, new_fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001180out:
1181 fib_release_info(fi);
Olof Johansson91b9a272005-08-09 20:24:39 -07001182err:
Robert Olsson19baf832005-06-21 12:43:18 -07001183 return err;
1184}
1185
Robert Olsson772cb712005-09-19 15:31:18 -07001186/* should be called with rcu_read_lock */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001187static int check_leaf(struct fib_table *tb, struct trie *t, struct tnode *l,
David S. Miller22bd5b92011-03-11 19:54:08 -05001188 t_key key, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001189 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001190{
Robert Olsson19baf832005-06-21 12:43:18 -07001191 struct leaf_info *li;
1192 struct hlist_head *hhead = &l->list;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001193
Sasha Levinb67bfe02013-02-27 17:06:00 -08001194 hlist_for_each_entry_rcu(li, hhead, hlist) {
David S. Miller3be06862011-03-07 15:01:10 -08001195 struct fib_alias *fa;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001196
Eric Dumazet5c745012011-07-18 03:16:33 +00001197 if (l->key != (key & li->mask_plen))
Robert Olsson19baf832005-06-21 12:43:18 -07001198 continue;
1199
David S. Miller3be06862011-03-07 15:01:10 -08001200 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1201 struct fib_info *fi = fa->fa_info;
1202 int nhsel, err;
1203
David S. Miller22bd5b92011-03-11 19:54:08 -05001204 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
David S. Miller3be06862011-03-07 15:01:10 -08001205 continue;
David S. Millerdccd9ecc2012-05-10 22:16:32 -04001206 if (fi->fib_dead)
1207 continue;
David S. Miller37e826c2011-03-24 18:06:47 -07001208 if (fa->fa_info->fib_scope < flp->flowi4_scope)
David S. Miller3be06862011-03-07 15:01:10 -08001209 continue;
1210 fib_alias_accessed(fa);
1211 err = fib_props[fa->fa_type].error;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001212 if (unlikely(err < 0)) {
David S. Miller3be06862011-03-07 15:01:10 -08001213#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001214 this_cpu_inc(t->stats->semantic_match_passed);
David S. Miller3be06862011-03-07 15:01:10 -08001215#endif
Julian Anastasov1fbc7842011-03-25 20:33:23 -07001216 return err;
David S. Miller3be06862011-03-07 15:01:10 -08001217 }
1218 if (fi->fib_flags & RTNH_F_DEAD)
1219 continue;
1220 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1221 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1222
1223 if (nh->nh_flags & RTNH_F_DEAD)
1224 continue;
David S. Miller22bd5b92011-03-11 19:54:08 -05001225 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
David S. Miller3be06862011-03-07 15:01:10 -08001226 continue;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001227
Robert Olsson19baf832005-06-21 12:43:18 -07001228#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001229 this_cpu_inc(t->stats->semantic_match_passed);
Robert Olsson19baf832005-06-21 12:43:18 -07001230#endif
Eric Dumazet5c745012011-07-18 03:16:33 +00001231 res->prefixlen = li->plen;
David S. Miller3be06862011-03-07 15:01:10 -08001232 res->nh_sel = nhsel;
1233 res->type = fa->fa_type;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001234 res->scope = fi->fib_scope;
David S. Miller3be06862011-03-07 15:01:10 -08001235 res->fi = fi;
1236 res->table = tb;
1237 res->fa_head = &li->falh;
1238 if (!(fib_flags & FIB_LOOKUP_NOREF))
Eric Dumazet5c745012011-07-18 03:16:33 +00001239 atomic_inc(&fi->fib_clntref);
David S. Miller3be06862011-03-07 15:01:10 -08001240 return 0;
1241 }
1242 }
1243
1244#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001245 this_cpu_inc(t->stats->semantic_match_miss);
David S. Miller3be06862011-03-07 15:01:10 -08001246#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001247 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001248
Ben Hutchings2e655572008-07-10 16:52:52 -07001249 return 1;
Robert Olsson19baf832005-06-21 12:43:18 -07001250}
1251
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001252static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1253{
1254 t_key prefix = n->key;
1255
1256 return (key ^ prefix) & (prefix | -prefix);
1257}
1258
David S. Miller22bd5b92011-03-11 19:54:08 -05001259int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001260 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001261{
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001262 struct trie *t = (struct trie *)tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001263#ifdef CONFIG_IP_FIB_TRIE_STATS
1264 struct trie_use_stats __percpu *stats = t->stats;
1265#endif
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001266 const t_key key = ntohl(flp->daddr);
1267 struct tnode *n, *pn;
1268 t_key cindex;
1269 int ret = 1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001270
Robert Olsson2373ce12005-08-25 13:01:29 -07001271 rcu_read_lock();
Robert Olsson19baf832005-06-21 12:43:18 -07001272
Robert Olsson2373ce12005-08-25 13:01:29 -07001273 n = rcu_dereference(t->trie);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001274 if (!n)
Robert Olsson19baf832005-06-21 12:43:18 -07001275 goto failed;
1276
1277#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001278 this_cpu_inc(stats->gets);
Robert Olsson19baf832005-06-21 12:43:18 -07001279#endif
1280
Alexander Duyckadaf9812014-12-31 10:55:47 -08001281 pn = n;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001282 cindex = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001283
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001284 /* Step 1: Travel to the longest prefix match in the trie */
1285 for (;;) {
1286 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -07001287
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001288 /* This bit of code is a bit tricky but it combines multiple
1289 * checks into a single check. The prefix consists of the
1290 * prefix plus zeros for the "bits" in the prefix. The index
1291 * is the difference between the key and this value. From
1292 * this we can actually derive several pieces of data.
1293 * if !(index >> bits)
1294 * we know the value is child index
1295 * else
1296 * we have a mismatch in skip bits and failed
1297 */
1298 if (index >> n->bits)
1299 break;
Robert Olsson19baf832005-06-21 12:43:18 -07001300
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001301 /* we have found a leaf. Prefixes have already been compared */
1302 if (IS_LEAF(n))
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001303 goto found;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001304
1305 /* only record pn and cindex if we are going to be chopping
1306 * bits later. Otherwise we are just wasting cycles.
1307 */
1308 if (index) {
1309 pn = n;
1310 cindex = index;
Olof Johansson91b9a272005-08-09 20:24:39 -07001311 }
1312
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001313 n = rcu_dereference(n->child[index]);
1314 if (unlikely(!n))
Robert Olsson19baf832005-06-21 12:43:18 -07001315 goto backtrace;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001316 }
1317
1318 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1319 for (;;) {
1320 /* record the pointer where our next node pointer is stored */
1321 struct tnode __rcu **cptr = n->child;
1322
1323 /* This test verifies that none of the bits that differ
1324 * between the key and the prefix exist in the region of
1325 * the lsb and higher in the prefix.
1326 */
1327 if (unlikely(prefix_mismatch(key, n)))
1328 goto backtrace;
1329
1330 /* exit out and process leaf */
1331 if (unlikely(IS_LEAF(n)))
1332 break;
1333
1334 /* Don't bother recording parent info. Since we are in
1335 * prefix match mode we will have to come back to wherever
1336 * we started this traversal anyway
1337 */
1338
1339 while ((n = rcu_dereference(*cptr)) == NULL) {
1340backtrace:
1341#ifdef CONFIG_IP_FIB_TRIE_STATS
1342 if (!n)
1343 this_cpu_inc(stats->null_node_hit);
1344#endif
1345 /* If we are at cindex 0 there are no more bits for
1346 * us to strip at this level so we must ascend back
1347 * up one level to see if there are any more bits to
1348 * be stripped there.
1349 */
1350 while (!cindex) {
1351 t_key pkey = pn->key;
1352
1353 pn = node_parent_rcu(pn);
1354 if (unlikely(!pn))
1355 goto failed;
1356#ifdef CONFIG_IP_FIB_TRIE_STATS
1357 this_cpu_inc(stats->backtrack);
1358#endif
1359 /* Get Child's index */
1360 cindex = get_index(pkey, pn);
1361 }
1362
1363 /* strip the least significant bit from the cindex */
1364 cindex &= cindex - 1;
1365
1366 /* grab pointer for next child node */
1367 cptr = &pn->child[cindex];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001368 }
Robert Olsson19baf832005-06-21 12:43:18 -07001369 }
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001370
Robert Olsson19baf832005-06-21 12:43:18 -07001371found:
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001372 /* Step 3: Process the leaf, if that fails fall back to backtracing */
1373 ret = check_leaf(tb, t, n, key, flp, res, fib_flags);
1374 if (unlikely(ret > 0))
1375 goto backtrace;
1376failed:
Robert Olsson2373ce12005-08-25 13:01:29 -07001377 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001378 return ret;
1379}
Florian Westphal6fc01432011-08-25 13:46:12 +02001380EXPORT_SYMBOL_GPL(fib_table_lookup);
Robert Olsson19baf832005-06-21 12:43:18 -07001381
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001382/*
1383 * Remove the leaf and return parent.
1384 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001385static void trie_leaf_remove(struct trie *t, struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001386{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -08001387 struct tnode *tp = node_parent(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001388
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001389 pr_debug("entering trie_leaf_remove(%p)\n", l);
Robert Olsson19baf832005-06-21 12:43:18 -07001390
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001391 if (tp) {
Alexander Duyck836a0122014-12-31 10:56:06 -08001392 put_child(tp, get_index(l->key, tp), NULL);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001393 trie_rebalance(t, tp);
Alexander Duyck836a0122014-12-31 10:56:06 -08001394 } else {
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001395 RCU_INIT_POINTER(t->trie, NULL);
Alexander Duyck836a0122014-12-31 10:56:06 -08001396 }
Robert Olsson19baf832005-06-21 12:43:18 -07001397
Alexander Duyck37fd30f2014-12-31 10:55:41 -08001398 node_free(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001399}
1400
Robert Olssond562f1f2007-03-26 14:22:22 -07001401/*
1402 * Caller must hold RTNL.
1403 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001404int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001405{
1406 struct trie *t = (struct trie *) tb->tb_data;
1407 u32 key, mask;
Thomas Graf4e902c52006-08-17 18:14:52 -07001408 int plen = cfg->fc_dst_len;
1409 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001410 struct fib_alias *fa, *fa_to_delete;
1411 struct list_head *fa_head;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001412 struct tnode *l;
Olof Johansson91b9a272005-08-09 20:24:39 -07001413 struct leaf_info *li;
1414
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001415 if (plen > 32)
Robert Olsson19baf832005-06-21 12:43:18 -07001416 return -EINVAL;
1417
Thomas Graf4e902c52006-08-17 18:14:52 -07001418 key = ntohl(cfg->fc_dst);
Olof Johansson91b9a272005-08-09 20:24:39 -07001419 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001420
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001421 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001422 return -EINVAL;
1423
1424 key = key & mask;
1425 l = fib_find_node(t, key);
1426
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001427 if (!l)
Robert Olsson19baf832005-06-21 12:43:18 -07001428 return -ESRCH;
1429
Igor Maravicad5b3102012-08-13 10:26:08 +02001430 li = find_leaf_info(l, plen);
1431
1432 if (!li)
1433 return -ESRCH;
1434
1435 fa_head = &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -07001436 fa = fib_find_alias(fa_head, tos, 0);
1437
1438 if (!fa)
1439 return -ESRCH;
1440
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001441 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
Robert Olsson19baf832005-06-21 12:43:18 -07001442
1443 fa_to_delete = NULL;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001444 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1445 list_for_each_entry_continue(fa, fa_head, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001446 struct fib_info *fi = fa->fa_info;
1447
1448 if (fa->fa_tos != tos)
1449 break;
1450
Thomas Graf4e902c52006-08-17 18:14:52 -07001451 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1452 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
David S. Miller37e826c2011-03-24 18:06:47 -07001453 fa->fa_info->fib_scope == cfg->fc_scope) &&
Julian Anastasov74cb3c12011-03-19 12:13:46 +00001454 (!cfg->fc_prefsrc ||
1455 fi->fib_prefsrc == cfg->fc_prefsrc) &&
Thomas Graf4e902c52006-08-17 18:14:52 -07001456 (!cfg->fc_protocol ||
1457 fi->fib_protocol == cfg->fc_protocol) &&
1458 fib_nh_match(cfg, fi) == 0) {
Robert Olsson19baf832005-06-21 12:43:18 -07001459 fa_to_delete = fa;
1460 break;
1461 }
1462 }
1463
Olof Johansson91b9a272005-08-09 20:24:39 -07001464 if (!fa_to_delete)
1465 return -ESRCH;
Robert Olsson19baf832005-06-21 12:43:18 -07001466
Olof Johansson91b9a272005-08-09 20:24:39 -07001467 fa = fa_to_delete;
Thomas Graf4e902c52006-08-17 18:14:52 -07001468 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001469 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001470
Robert Olsson2373ce12005-08-25 13:01:29 -07001471 list_del_rcu(&fa->fa_list);
Robert Olsson19baf832005-06-21 12:43:18 -07001472
David S. Miller21d8c492011-04-14 14:49:37 -07001473 if (!plen)
1474 tb->tb_num_default--;
1475
Olof Johansson91b9a272005-08-09 20:24:39 -07001476 if (list_empty(fa_head)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001477 hlist_del_rcu(&li->hlist);
Olof Johansson91b9a272005-08-09 20:24:39 -07001478 free_leaf_info(li);
Robert Olsson2373ce12005-08-25 13:01:29 -07001479 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001480
1481 if (hlist_empty(&l->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001482 trie_leaf_remove(t, l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001483
1484 if (fa->fa_state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001485 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Olof Johansson91b9a272005-08-09 20:24:39 -07001486
Robert Olsson2373ce12005-08-25 13:01:29 -07001487 fib_release_info(fa->fa_info);
1488 alias_free_mem_rcu(fa);
Olof Johansson91b9a272005-08-09 20:24:39 -07001489 return 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001490}
1491
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001492static int trie_flush_list(struct list_head *head)
Robert Olsson19baf832005-06-21 12:43:18 -07001493{
1494 struct fib_alias *fa, *fa_node;
1495 int found = 0;
1496
1497 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1498 struct fib_info *fi = fa->fa_info;
Robert Olsson19baf832005-06-21 12:43:18 -07001499
Robert Olsson2373ce12005-08-25 13:01:29 -07001500 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1501 list_del_rcu(&fa->fa_list);
1502 fib_release_info(fa->fa_info);
1503 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001504 found++;
1505 }
1506 }
1507 return found;
1508}
1509
Alexander Duyckadaf9812014-12-31 10:55:47 -08001510static int trie_flush_leaf(struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001511{
1512 int found = 0;
1513 struct hlist_head *lih = &l->list;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001514 struct hlist_node *tmp;
Robert Olsson19baf832005-06-21 12:43:18 -07001515 struct leaf_info *li = NULL;
1516
Sasha Levinb67bfe02013-02-27 17:06:00 -08001517 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001518 found += trie_flush_list(&li->falh);
Robert Olsson19baf832005-06-21 12:43:18 -07001519
1520 if (list_empty(&li->falh)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001521 hlist_del_rcu(&li->hlist);
Robert Olsson19baf832005-06-21 12:43:18 -07001522 free_leaf_info(li);
1523 }
1524 }
1525 return found;
1526}
1527
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001528/*
1529 * Scan for the next right leaf starting at node p->child[idx]
1530 * Since we have back pointer, no recursion necessary.
1531 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001532static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
Robert Olsson19baf832005-06-21 12:43:18 -07001533{
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001534 do {
Alexander Duycke9b44012014-12-31 10:56:12 -08001535 t_key idx = c ? idx = get_index(c->key, p) + 1 : 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001536
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001537 while (idx < 1u << p->bits) {
1538 c = tnode_get_child_rcu(p, idx++);
Robert Olsson2373ce12005-08-25 13:01:29 -07001539 if (!c)
Olof Johansson91b9a272005-08-09 20:24:39 -07001540 continue;
Robert Olsson19baf832005-06-21 12:43:18 -07001541
Eric Dumazetaab515d2013-08-05 11:18:49 -07001542 if (IS_LEAF(c))
Alexander Duyckadaf9812014-12-31 10:55:47 -08001543 return c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001544
1545 /* Rescan start scanning in new node */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001546 p = c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001547 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001548 }
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001549
1550 /* Node empty, walk back up to parent */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001551 c = p;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001552 } while ((p = node_parent_rcu(c)) != NULL);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001553
1554 return NULL; /* Root of trie */
1555}
1556
Alexander Duyckadaf9812014-12-31 10:55:47 -08001557static struct tnode *trie_firstleaf(struct trie *t)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001558{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001559 struct tnode *n = rcu_dereference_rtnl(t->trie);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001560
1561 if (!n)
1562 return NULL;
1563
1564 if (IS_LEAF(n)) /* trie is just a leaf */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001565 return n;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001566
1567 return leaf_walk_rcu(n, NULL);
1568}
1569
Alexander Duyckadaf9812014-12-31 10:55:47 -08001570static struct tnode *trie_nextleaf(struct tnode *l)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001571{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001572 struct tnode *p = node_parent_rcu(l);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001573
1574 if (!p)
1575 return NULL; /* trie with just one leaf */
1576
Alexander Duyckadaf9812014-12-31 10:55:47 -08001577 return leaf_walk_rcu(p, l);
Robert Olsson19baf832005-06-21 12:43:18 -07001578}
1579
Alexander Duyckadaf9812014-12-31 10:55:47 -08001580static struct tnode *trie_leafindex(struct trie *t, int index)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001581{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001582 struct tnode *l = trie_firstleaf(t);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001583
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001584 while (l && index-- > 0)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001585 l = trie_nextleaf(l);
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001586
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001587 return l;
1588}
1589
1590
Robert Olssond562f1f2007-03-26 14:22:22 -07001591/*
1592 * Caller must hold RTNL.
1593 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001594int fib_table_flush(struct fib_table *tb)
Robert Olsson19baf832005-06-21 12:43:18 -07001595{
1596 struct trie *t = (struct trie *) tb->tb_data;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001597 struct tnode *l, *ll = NULL;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001598 int found = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001599
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001600 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001601 found += trie_flush_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001602
1603 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001604 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001605 ll = l;
1606 }
1607
1608 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001609 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001610
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001611 pr_debug("trie_flush found=%d\n", found);
Robert Olsson19baf832005-06-21 12:43:18 -07001612 return found;
1613}
1614
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001615void fib_free_table(struct fib_table *tb)
1616{
Alexander Duyck8274a972014-12-31 10:55:29 -08001617#ifdef CONFIG_IP_FIB_TRIE_STATS
1618 struct trie *t = (struct trie *)tb->tb_data;
1619
1620 free_percpu(t->stats);
1621#endif /* CONFIG_IP_FIB_TRIE_STATS */
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001622 kfree(tb);
1623}
1624
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001625static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1626 struct fib_table *tb,
Robert Olsson19baf832005-06-21 12:43:18 -07001627 struct sk_buff *skb, struct netlink_callback *cb)
1628{
1629 int i, s_i;
1630 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07001631 __be32 xkey = htonl(key);
Robert Olsson19baf832005-06-21 12:43:18 -07001632
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001633 s_i = cb->args[5];
Robert Olsson19baf832005-06-21 12:43:18 -07001634 i = 0;
1635
Robert Olsson2373ce12005-08-25 13:01:29 -07001636 /* rcu_read_lock is hold by caller */
1637
1638 list_for_each_entry_rcu(fa, fah, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001639 if (i < s_i) {
1640 i++;
1641 continue;
1642 }
Robert Olsson19baf832005-06-21 12:43:18 -07001643
Eric W. Biederman15e47302012-09-07 20:12:54 +00001644 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
Robert Olsson19baf832005-06-21 12:43:18 -07001645 cb->nlh->nlmsg_seq,
1646 RTM_NEWROUTE,
1647 tb->tb_id,
1648 fa->fa_type,
Thomas Grafbe403ea2006-08-17 18:15:17 -07001649 xkey,
Robert Olsson19baf832005-06-21 12:43:18 -07001650 plen,
1651 fa->fa_tos,
Stephen Hemminger64347f72008-01-22 21:55:01 -08001652 fa->fa_info, NLM_F_MULTI) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001653 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001654 return -1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001655 }
Robert Olsson19baf832005-06-21 12:43:18 -07001656 i++;
1657 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001658 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001659 return skb->len;
1660}
1661
Alexander Duyckadaf9812014-12-31 10:55:47 -08001662static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001663 struct sk_buff *skb, struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001664{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001665 struct leaf_info *li;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001666 int i, s_i;
Robert Olsson19baf832005-06-21 12:43:18 -07001667
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001668 s_i = cb->args[4];
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001669 i = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001670
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001671 /* rcu_read_lock is hold by caller */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001672 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001673 if (i < s_i) {
1674 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001675 continue;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001676 }
Robert Olsson19baf832005-06-21 12:43:18 -07001677
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001678 if (i > s_i)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001679 cb->args[5] = 0;
Olof Johansson91b9a272005-08-09 20:24:39 -07001680
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001681 if (list_empty(&li->falh))
Robert Olsson19baf832005-06-21 12:43:18 -07001682 continue;
1683
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001684 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001685 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001686 return -1;
1687 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001688 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001689 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001690
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001691 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001692 return skb->len;
1693}
1694
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001695int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1696 struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001697{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001698 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001699 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001700 t_key key = cb->args[2];
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001701 int count = cb->args[3];
Robert Olsson19baf832005-06-21 12:43:18 -07001702
Robert Olsson2373ce12005-08-25 13:01:29 -07001703 rcu_read_lock();
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001704 /* Dump starting at last key.
1705 * Note: 0.0.0.0/0 (ie default) is first key.
1706 */
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001707 if (count == 0)
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001708 l = trie_firstleaf(t);
1709 else {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001710 /* Normally, continue from last key, but if that is missing
1711 * fallback to using slow rescan
1712 */
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001713 l = fib_find_node(t, key);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001714 if (!l)
1715 l = trie_leafindex(t, count);
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001716 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001717
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001718 while (l) {
1719 cb->args[2] = l->key;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001720 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001721 cb->args[3] = count;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001722 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001723 return -1;
Robert Olsson19baf832005-06-21 12:43:18 -07001724 }
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001725
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001726 ++count;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001727 l = trie_nextleaf(l);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001728 memset(&cb->args[4], 0,
1729 sizeof(cb->args) - 4*sizeof(cb->args[0]));
Robert Olsson19baf832005-06-21 12:43:18 -07001730 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001731 cb->args[3] = count;
Robert Olsson2373ce12005-08-25 13:01:29 -07001732 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001733
Robert Olsson19baf832005-06-21 12:43:18 -07001734 return skb->len;
Robert Olsson19baf832005-06-21 12:43:18 -07001735}
1736
David S. Miller5348ba82011-02-01 15:30:56 -08001737void __init fib_trie_init(void)
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001738{
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001739 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1740 sizeof(struct fib_alias),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001741 0, SLAB_PANIC, NULL);
1742
1743 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001744 max(sizeof(struct tnode),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001745 sizeof(struct leaf_info)),
1746 0, SLAB_PANIC, NULL);
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001747}
Robert Olsson19baf832005-06-21 12:43:18 -07001748
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001749
David S. Miller5348ba82011-02-01 15:30:56 -08001750struct fib_table *fib_trie_table(u32 id)
Robert Olsson19baf832005-06-21 12:43:18 -07001751{
1752 struct fib_table *tb;
1753 struct trie *t;
1754
Robert Olsson19baf832005-06-21 12:43:18 -07001755 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1756 GFP_KERNEL);
1757 if (tb == NULL)
1758 return NULL;
1759
1760 tb->tb_id = id;
Denis V. Lunev971b8932007-12-08 00:32:23 -08001761 tb->tb_default = -1;
David S. Miller21d8c492011-04-14 14:49:37 -07001762 tb->tb_num_default = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001763
1764 t = (struct trie *) tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001765 RCU_INIT_POINTER(t->trie, NULL);
1766#ifdef CONFIG_IP_FIB_TRIE_STATS
1767 t->stats = alloc_percpu(struct trie_use_stats);
1768 if (!t->stats) {
1769 kfree(tb);
1770 tb = NULL;
1771 }
1772#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001773
Robert Olsson19baf832005-06-21 12:43:18 -07001774 return tb;
1775}
1776
Robert Olsson19baf832005-06-21 12:43:18 -07001777#ifdef CONFIG_PROC_FS
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001778/* Depth first Trie walk iterator */
1779struct fib_trie_iter {
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001780 struct seq_net_private p;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001781 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001782 struct tnode *tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001783 unsigned int index;
1784 unsigned int depth;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001785};
Robert Olsson19baf832005-06-21 12:43:18 -07001786
Alexander Duyckadaf9812014-12-31 10:55:47 -08001787static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
Robert Olsson19baf832005-06-21 12:43:18 -07001788{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001789 struct tnode *tn = iter->tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001790 unsigned int cindex = iter->index;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001791 struct tnode *p;
1792
Eric W. Biederman6640e692007-01-24 14:42:04 -08001793 /* A single entry routing table */
1794 if (!tn)
1795 return NULL;
1796
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001797 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1798 iter->tnode, iter->index, iter->depth);
1799rescan:
1800 while (cindex < (1<<tn->bits)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001801 struct tnode *n = tnode_get_child_rcu(tn, cindex);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001802
1803 if (n) {
1804 if (IS_LEAF(n)) {
1805 iter->tnode = tn;
1806 iter->index = cindex + 1;
1807 } else {
1808 /* push down one level */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001809 iter->tnode = n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001810 iter->index = 0;
1811 ++iter->depth;
1812 }
1813 return n;
1814 }
1815
1816 ++cindex;
1817 }
1818
1819 /* Current node exhausted, pop back up */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001820 p = node_parent_rcu(tn);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001821 if (p) {
Alexander Duycke9b44012014-12-31 10:56:12 -08001822 cindex = get_index(tn->key, p) + 1;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001823 tn = p;
1824 --iter->depth;
1825 goto rescan;
1826 }
1827
1828 /* got root? */
Robert Olsson19baf832005-06-21 12:43:18 -07001829 return NULL;
1830}
1831
Alexander Duyckadaf9812014-12-31 10:55:47 -08001832static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001833 struct trie *t)
Robert Olsson19baf832005-06-21 12:43:18 -07001834{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001835 struct tnode *n;
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001836
Stephen Hemminger132adf52007-03-08 20:44:43 -08001837 if (!t)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001838 return NULL;
1839
1840 n = rcu_dereference(t->trie);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001841 if (!n)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001842 return NULL;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001843
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001844 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001845 iter->tnode = n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001846 iter->index = 0;
1847 iter->depth = 1;
1848 } else {
1849 iter->tnode = NULL;
1850 iter->index = 0;
1851 iter->depth = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001852 }
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001853
1854 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07001855}
1856
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001857static void trie_collect_stats(struct trie *t, struct trie_stat *s)
Robert Olsson19baf832005-06-21 12:43:18 -07001858{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001859 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001860 struct fib_trie_iter iter;
Robert Olsson19baf832005-06-21 12:43:18 -07001861
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001862 memset(s, 0, sizeof(*s));
Robert Olsson19baf832005-06-21 12:43:18 -07001863
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001864 rcu_read_lock();
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001865 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001866 if (IS_LEAF(n)) {
Stephen Hemminger93672292008-01-22 21:54:05 -08001867 struct leaf_info *li;
Stephen Hemminger93672292008-01-22 21:54:05 -08001868
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001869 s->leaves++;
1870 s->totdepth += iter.depth;
1871 if (iter.depth > s->maxdepth)
1872 s->maxdepth = iter.depth;
Stephen Hemminger93672292008-01-22 21:54:05 -08001873
Alexander Duyckadaf9812014-12-31 10:55:47 -08001874 hlist_for_each_entry_rcu(li, &n->list, hlist)
Stephen Hemminger93672292008-01-22 21:54:05 -08001875 ++s->prefixes;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001876 } else {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001877 int i;
Robert Olsson19baf832005-06-21 12:43:18 -07001878
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001879 s->tnodes++;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001880 if (n->bits < MAX_STAT_DEPTH)
1881 s->nodesizes[n->bits]++;
Robert Olsson06ef9212006-03-20 21:35:01 -08001882
Alexander Duyckadaf9812014-12-31 10:55:47 -08001883 for (i = 0; i < tnode_child_length(n); i++)
1884 if (!rcu_access_pointer(n->child[i]))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001885 s->nullpointers++;
1886 }
1887 }
1888 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001889}
1890
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001891/*
Robert Olsson19baf832005-06-21 12:43:18 -07001892 * This outputs /proc/net/fib_triestats
Robert Olsson19baf832005-06-21 12:43:18 -07001893 */
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001894static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
Robert Olsson19baf832005-06-21 12:43:18 -07001895{
Eric Dumazeta034ee32010-09-09 23:32:28 +00001896 unsigned int i, max, pointers, bytes, avdepth;
Robert Olsson19baf832005-06-21 12:43:18 -07001897
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001898 if (stat->leaves)
1899 avdepth = stat->totdepth*100 / stat->leaves;
1900 else
1901 avdepth = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001902
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001903 seq_printf(seq, "\tAver depth: %u.%02d\n",
1904 avdepth / 100, avdepth % 100);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001905 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
Robert Olsson19baf832005-06-21 12:43:18 -07001906
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001907 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
Alexander Duyckadaf9812014-12-31 10:55:47 -08001908 bytes = sizeof(struct tnode) * stat->leaves;
Stephen Hemminger93672292008-01-22 21:54:05 -08001909
1910 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1911 bytes += sizeof(struct leaf_info) * stat->prefixes;
1912
Stephen Hemminger187b5182008-01-12 20:55:55 -08001913 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001914 bytes += sizeof(struct tnode) * stat->tnodes;
Robert Olsson19baf832005-06-21 12:43:18 -07001915
Robert Olsson06ef9212006-03-20 21:35:01 -08001916 max = MAX_STAT_DEPTH;
1917 while (max > 0 && stat->nodesizes[max-1] == 0)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001918 max--;
Robert Olsson19baf832005-06-21 12:43:18 -07001919
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001920 pointers = 0;
Jerry Snitselaarf585a992013-07-22 12:01:58 -07001921 for (i = 1; i < max; i++)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001922 if (stat->nodesizes[i] != 0) {
Stephen Hemminger187b5182008-01-12 20:55:55 -08001923 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001924 pointers += (1<<i) * stat->nodesizes[i];
1925 }
1926 seq_putc(seq, '\n');
Stephen Hemminger187b5182008-01-12 20:55:55 -08001927 seq_printf(seq, "\tPointers: %u\n", pointers);
Robert Olsson19baf832005-06-21 12:43:18 -07001928
Alexander Duyckadaf9812014-12-31 10:55:47 -08001929 bytes += sizeof(struct tnode *) * pointers;
Stephen Hemminger187b5182008-01-12 20:55:55 -08001930 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1931 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001932}
Robert Olsson19baf832005-06-21 12:43:18 -07001933
1934#ifdef CONFIG_IP_FIB_TRIE_STATS
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001935static void trie_show_usage(struct seq_file *seq,
Alexander Duyck8274a972014-12-31 10:55:29 -08001936 const struct trie_use_stats __percpu *stats)
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001937{
Alexander Duyck8274a972014-12-31 10:55:29 -08001938 struct trie_use_stats s = { 0 };
1939 int cpu;
1940
1941 /* loop through all of the CPUs and gather up the stats */
1942 for_each_possible_cpu(cpu) {
1943 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
1944
1945 s.gets += pcpu->gets;
1946 s.backtrack += pcpu->backtrack;
1947 s.semantic_match_passed += pcpu->semantic_match_passed;
1948 s.semantic_match_miss += pcpu->semantic_match_miss;
1949 s.null_node_hit += pcpu->null_node_hit;
1950 s.resize_node_skipped += pcpu->resize_node_skipped;
1951 }
1952
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001953 seq_printf(seq, "\nCounters:\n---------\n");
Alexander Duyck8274a972014-12-31 10:55:29 -08001954 seq_printf(seq, "gets = %u\n", s.gets);
1955 seq_printf(seq, "backtracks = %u\n", s.backtrack);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001956 seq_printf(seq, "semantic match passed = %u\n",
Alexander Duyck8274a972014-12-31 10:55:29 -08001957 s.semantic_match_passed);
1958 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
1959 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
1960 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
Robert Olsson19baf832005-06-21 12:43:18 -07001961}
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001962#endif /* CONFIG_IP_FIB_TRIE_STATS */
1963
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001964static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001965{
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001966 if (tb->tb_id == RT_TABLE_LOCAL)
1967 seq_puts(seq, "Local:\n");
1968 else if (tb->tb_id == RT_TABLE_MAIN)
1969 seq_puts(seq, "Main:\n");
1970 else
1971 seq_printf(seq, "Id %d:\n", tb->tb_id);
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001972}
Robert Olsson19baf832005-06-21 12:43:18 -07001973
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001974
Robert Olsson19baf832005-06-21 12:43:18 -07001975static int fib_triestat_seq_show(struct seq_file *seq, void *v)
1976{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001977 struct net *net = (struct net *)seq->private;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001978 unsigned int h;
Eric W. Biederman877a9bf2007-12-07 00:47:47 -08001979
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001980 seq_printf(seq,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001981 "Basic info: size of leaf:"
1982 " %Zd bytes, size of tnode: %Zd bytes.\n",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001983 sizeof(struct tnode), sizeof(struct tnode));
Olof Johansson91b9a272005-08-09 20:24:39 -07001984
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001985 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1986 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001987 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001988
Sasha Levinb67bfe02013-02-27 17:06:00 -08001989 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001990 struct trie *t = (struct trie *) tb->tb_data;
1991 struct trie_stat stat;
1992
1993 if (!t)
1994 continue;
1995
1996 fib_table_print(seq, tb);
1997
1998 trie_collect_stats(t, &stat);
1999 trie_show_stats(seq, &stat);
2000#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08002001 trie_show_usage(seq, t->stats);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002002#endif
2003 }
2004 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002005
Robert Olsson19baf832005-06-21 12:43:18 -07002006 return 0;
2007}
2008
Robert Olsson19baf832005-06-21 12:43:18 -07002009static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2010{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07002011 return single_open_net(inode, file, fib_triestat_seq_show);
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002012}
2013
Arjan van de Ven9a321442007-02-12 00:55:35 -08002014static const struct file_operations fib_triestat_fops = {
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07002015 .owner = THIS_MODULE,
2016 .open = fib_triestat_seq_open,
2017 .read = seq_read,
2018 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07002019 .release = single_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002020};
2021
Alexander Duyckadaf9812014-12-31 10:55:47 -08002022static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
Robert Olsson19baf832005-06-21 12:43:18 -07002023{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002024 struct fib_trie_iter *iter = seq->private;
2025 struct net *net = seq_file_net(seq);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002026 loff_t idx = 0;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002027 unsigned int h;
Robert Olsson19baf832005-06-21 12:43:18 -07002028
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002029 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2030 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002031 struct fib_table *tb;
2032
Sasha Levinb67bfe02013-02-27 17:06:00 -08002033 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08002034 struct tnode *n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002035
2036 for (n = fib_trie_get_first(iter,
2037 (struct trie *) tb->tb_data);
2038 n; n = fib_trie_get_next(iter))
2039 if (pos == idx++) {
2040 iter->tb = tb;
2041 return n;
2042 }
2043 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002044 }
Robert Olsson19baf832005-06-21 12:43:18 -07002045
Robert Olsson19baf832005-06-21 12:43:18 -07002046 return NULL;
2047}
2048
2049static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002050 __acquires(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002051{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002052 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002053 return fib_trie_get_idx(seq, *pos);
Robert Olsson19baf832005-06-21 12:43:18 -07002054}
2055
2056static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2057{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002058 struct fib_trie_iter *iter = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002059 struct net *net = seq_file_net(seq);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002060 struct fib_table *tb = iter->tb;
2061 struct hlist_node *tb_node;
2062 unsigned int h;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002063 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002064
Robert Olsson19baf832005-06-21 12:43:18 -07002065 ++*pos;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002066 /* next node in same table */
2067 n = fib_trie_get_next(iter);
2068 if (n)
2069 return n;
Olof Johansson91b9a272005-08-09 20:24:39 -07002070
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002071 /* walk rest of this hash chain */
2072 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
Eric Dumazet0a5c0472011-03-31 01:51:35 -07002073 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002074 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2075 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2076 if (n)
2077 goto found;
2078 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002079
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002080 /* new hash chain */
2081 while (++h < FIB_TABLE_HASHSZ) {
2082 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Sasha Levinb67bfe02013-02-27 17:06:00 -08002083 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002084 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2085 if (n)
2086 goto found;
2087 }
2088 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002089 return NULL;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002090
2091found:
2092 iter->tb = tb;
2093 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002094}
2095
2096static void fib_trie_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002097 __releases(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002098{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002099 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002100}
2101
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002102static void seq_indent(struct seq_file *seq, int n)
2103{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002104 while (n-- > 0)
2105 seq_puts(seq, " ");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002106}
Robert Olsson19baf832005-06-21 12:43:18 -07002107
Eric Dumazet28d36e32008-01-14 23:09:56 -08002108static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002109{
Stephen Hemminger132adf52007-03-08 20:44:43 -08002110 switch (s) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002111 case RT_SCOPE_UNIVERSE: return "universe";
2112 case RT_SCOPE_SITE: return "site";
2113 case RT_SCOPE_LINK: return "link";
2114 case RT_SCOPE_HOST: return "host";
2115 case RT_SCOPE_NOWHERE: return "nowhere";
2116 default:
Eric Dumazet28d36e32008-01-14 23:09:56 -08002117 snprintf(buf, len, "scope=%d", s);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002118 return buf;
2119 }
2120}
2121
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -07002122static const char *const rtn_type_names[__RTN_MAX] = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002123 [RTN_UNSPEC] = "UNSPEC",
2124 [RTN_UNICAST] = "UNICAST",
2125 [RTN_LOCAL] = "LOCAL",
2126 [RTN_BROADCAST] = "BROADCAST",
2127 [RTN_ANYCAST] = "ANYCAST",
2128 [RTN_MULTICAST] = "MULTICAST",
2129 [RTN_BLACKHOLE] = "BLACKHOLE",
2130 [RTN_UNREACHABLE] = "UNREACHABLE",
2131 [RTN_PROHIBIT] = "PROHIBIT",
2132 [RTN_THROW] = "THROW",
2133 [RTN_NAT] = "NAT",
2134 [RTN_XRESOLVE] = "XRESOLVE",
2135};
2136
Eric Dumazeta034ee32010-09-09 23:32:28 +00002137static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002138{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002139 if (t < __RTN_MAX && rtn_type_names[t])
2140 return rtn_type_names[t];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002141 snprintf(buf, len, "type %u", t);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002142 return buf;
2143}
2144
2145/* Pretty print the trie */
Robert Olsson19baf832005-06-21 12:43:18 -07002146static int fib_trie_seq_show(struct seq_file *seq, void *v)
2147{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002148 const struct fib_trie_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002149 struct tnode *n = v;
Robert Olsson19baf832005-06-21 12:43:18 -07002150
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002151 if (!node_parent_rcu(n))
2152 fib_table_print(seq, iter->tb);
Robert Olsson095b8502007-01-26 19:06:01 -08002153
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002154 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08002155 __be32 prf = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002156
Alexander Duycke9b44012014-12-31 10:56:12 -08002157 seq_indent(seq, iter->depth-1);
2158 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2159 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2160 n->full_children, n->empty_children);
Olof Johansson91b9a272005-08-09 20:24:39 -07002161 } else {
Stephen Hemminger13280422008-01-22 21:54:37 -08002162 struct leaf_info *li;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002163 __be32 val = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002164
2165 seq_indent(seq, iter->depth);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002166 seq_printf(seq, " |-- %pI4\n", &val);
Eric Dumazet28d36e32008-01-14 23:09:56 -08002167
Alexander Duyckadaf9812014-12-31 10:55:47 -08002168 hlist_for_each_entry_rcu(li, &n->list, hlist) {
Stephen Hemminger13280422008-01-22 21:54:37 -08002169 struct fib_alias *fa;
Eric Dumazet28d36e32008-01-14 23:09:56 -08002170
Stephen Hemminger13280422008-01-22 21:54:37 -08002171 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2172 char buf1[32], buf2[32];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002173
Stephen Hemminger13280422008-01-22 21:54:37 -08002174 seq_indent(seq, iter->depth+1);
2175 seq_printf(seq, " /%d %s %s", li->plen,
2176 rtn_scope(buf1, sizeof(buf1),
David S. Miller37e826c2011-03-24 18:06:47 -07002177 fa->fa_info->fib_scope),
Stephen Hemminger13280422008-01-22 21:54:37 -08002178 rtn_type(buf2, sizeof(buf2),
2179 fa->fa_type));
2180 if (fa->fa_tos)
Denis V. Lunevb9c4d822008-02-05 02:58:45 -08002181 seq_printf(seq, " tos=%d", fa->fa_tos);
Stephen Hemminger13280422008-01-22 21:54:37 -08002182 seq_putc(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002183 }
2184 }
Robert Olsson19baf832005-06-21 12:43:18 -07002185 }
2186
2187 return 0;
2188}
2189
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002190static const struct seq_operations fib_trie_seq_ops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002191 .start = fib_trie_seq_start,
2192 .next = fib_trie_seq_next,
2193 .stop = fib_trie_seq_stop,
2194 .show = fib_trie_seq_show,
Robert Olsson19baf832005-06-21 12:43:18 -07002195};
2196
2197static int fib_trie_seq_open(struct inode *inode, struct file *file)
2198{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002199 return seq_open_net(inode, file, &fib_trie_seq_ops,
2200 sizeof(struct fib_trie_iter));
Robert Olsson19baf832005-06-21 12:43:18 -07002201}
2202
Arjan van de Ven9a321442007-02-12 00:55:35 -08002203static const struct file_operations fib_trie_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002204 .owner = THIS_MODULE,
2205 .open = fib_trie_seq_open,
2206 .read = seq_read,
2207 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002208 .release = seq_release_net,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002209};
2210
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002211struct fib_route_iter {
2212 struct seq_net_private p;
2213 struct trie *main_trie;
2214 loff_t pos;
2215 t_key key;
2216};
2217
Alexander Duyckadaf9812014-12-31 10:55:47 -08002218static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002219{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002220 struct tnode *l = NULL;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002221 struct trie *t = iter->main_trie;
2222
2223 /* use cache location of last found key */
2224 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2225 pos -= iter->pos;
2226 else {
2227 iter->pos = 0;
2228 l = trie_firstleaf(t);
2229 }
2230
2231 while (l && pos-- > 0) {
2232 iter->pos++;
2233 l = trie_nextleaf(l);
2234 }
2235
2236 if (l)
2237 iter->key = pos; /* remember it */
2238 else
2239 iter->pos = 0; /* forget it */
2240
2241 return l;
2242}
2243
2244static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2245 __acquires(RCU)
2246{
2247 struct fib_route_iter *iter = seq->private;
2248 struct fib_table *tb;
2249
2250 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002251 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002252 if (!tb)
2253 return NULL;
2254
2255 iter->main_trie = (struct trie *) tb->tb_data;
2256 if (*pos == 0)
2257 return SEQ_START_TOKEN;
2258 else
2259 return fib_route_get_idx(iter, *pos - 1);
2260}
2261
2262static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2263{
2264 struct fib_route_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002265 struct tnode *l = v;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002266
2267 ++*pos;
2268 if (v == SEQ_START_TOKEN) {
2269 iter->pos = 0;
2270 l = trie_firstleaf(iter->main_trie);
2271 } else {
2272 iter->pos++;
2273 l = trie_nextleaf(l);
2274 }
2275
2276 if (l)
2277 iter->key = l->key;
2278 else
2279 iter->pos = 0;
2280 return l;
2281}
2282
2283static void fib_route_seq_stop(struct seq_file *seq, void *v)
2284 __releases(RCU)
2285{
2286 rcu_read_unlock();
2287}
2288
Eric Dumazeta034ee32010-09-09 23:32:28 +00002289static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002290{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002291 unsigned int flags = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002292
Eric Dumazeta034ee32010-09-09 23:32:28 +00002293 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2294 flags = RTF_REJECT;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002295 if (fi && fi->fib_nh->nh_gw)
2296 flags |= RTF_GATEWAY;
Al Viro32ab5f82006-09-26 22:21:45 -07002297 if (mask == htonl(0xFFFFFFFF))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002298 flags |= RTF_HOST;
2299 flags |= RTF_UP;
2300 return flags;
2301}
2302
2303/*
2304 * This outputs /proc/net/route.
2305 * The format of the file is not supposed to be changed
Eric Dumazeta034ee32010-09-09 23:32:28 +00002306 * and needs to be same as fib_hash output to avoid breaking
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002307 * legacy utilities
2308 */
2309static int fib_route_seq_show(struct seq_file *seq, void *v)
2310{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002311 struct tnode *l = v;
Stephen Hemminger13280422008-01-22 21:54:37 -08002312 struct leaf_info *li;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002313
2314 if (v == SEQ_START_TOKEN) {
2315 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2316 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2317 "\tWindow\tIRTT");
2318 return 0;
2319 }
2320
Sasha Levinb67bfe02013-02-27 17:06:00 -08002321 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002322 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07002323 __be32 mask, prefix;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002324
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002325 mask = inet_make_mask(li->plen);
2326 prefix = htonl(l->key);
2327
2328 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
Herbert Xu1371e372005-10-15 09:42:39 +10002329 const struct fib_info *fi = fa->fa_info;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002330 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002331
2332 if (fa->fa_type == RTN_BROADCAST
2333 || fa->fa_type == RTN_MULTICAST)
2334 continue;
2335
Tetsuo Handa652586d2013-11-14 14:31:57 -08002336 seq_setwidth(seq, 127);
2337
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002338 if (fi)
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002339 seq_printf(seq,
2340 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002341 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002342 fi->fib_dev ? fi->fib_dev->name : "*",
2343 prefix,
2344 fi->fib_nh->nh_gw, flags, 0, 0,
2345 fi->fib_priority,
2346 mask,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002347 (fi->fib_advmss ?
2348 fi->fib_advmss + 40 : 0),
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002349 fi->fib_window,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002350 fi->fib_rtt >> 3);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002351 else
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002352 seq_printf(seq,
2353 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002354 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002355 prefix, 0, flags, 0, 0, 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002356 mask, 0, 0, 0);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002357
Tetsuo Handa652586d2013-11-14 14:31:57 -08002358 seq_pad(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002359 }
2360 }
2361
2362 return 0;
2363}
2364
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002365static const struct seq_operations fib_route_seq_ops = {
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002366 .start = fib_route_seq_start,
2367 .next = fib_route_seq_next,
2368 .stop = fib_route_seq_stop,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002369 .show = fib_route_seq_show,
2370};
2371
2372static int fib_route_seq_open(struct inode *inode, struct file *file)
2373{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002374 return seq_open_net(inode, file, &fib_route_seq_ops,
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002375 sizeof(struct fib_route_iter));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002376}
2377
Arjan van de Ven9a321442007-02-12 00:55:35 -08002378static const struct file_operations fib_route_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002379 .owner = THIS_MODULE,
2380 .open = fib_route_seq_open,
2381 .read = seq_read,
2382 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002383 .release = seq_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002384};
2385
Denis V. Lunev61a02652008-01-10 03:21:09 -08002386int __net_init fib_proc_init(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002387{
Gao fengd4beaa62013-02-18 01:34:54 +00002388 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002389 goto out1;
2390
Gao fengd4beaa62013-02-18 01:34:54 +00002391 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2392 &fib_triestat_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002393 goto out2;
2394
Gao fengd4beaa62013-02-18 01:34:54 +00002395 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002396 goto out3;
2397
Robert Olsson19baf832005-06-21 12:43:18 -07002398 return 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002399
2400out3:
Gao fengece31ff2013-02-18 01:34:56 +00002401 remove_proc_entry("fib_triestat", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002402out2:
Gao fengece31ff2013-02-18 01:34:56 +00002403 remove_proc_entry("fib_trie", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002404out1:
2405 return -ENOMEM;
Robert Olsson19baf832005-06-21 12:43:18 -07002406}
2407
Denis V. Lunev61a02652008-01-10 03:21:09 -08002408void __net_exit fib_proc_exit(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002409{
Gao fengece31ff2013-02-18 01:34:56 +00002410 remove_proc_entry("fib_trie", net->proc_net);
2411 remove_proc_entry("fib_triestat", net->proc_net);
2412 remove_proc_entry("route", net->proc_net);
Robert Olsson19baf832005-06-21 12:43:18 -07002413}
2414
2415#endif /* CONFIG_PROC_FS */