blob: 28a3065470bc71a86795eccff8485909bd2910cc [file] [log] [blame]
Robert Olsson19baf832005-06-21 12:43:18 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090010 * Jens Laas <jens.laas@data.slu.se> Swedish University of
Robert Olsson19baf832005-06-21 12:43:18 -070011 * Agricultural Sciences.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090012 *
Robert Olsson19baf832005-06-21 12:43:18 -070013 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030015 * This work is based on the LPC-trie which is originally described in:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 *
Robert Olsson19baf832005-06-21 12:43:18 -070017 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
Justin P. Mattock631dd1a2010-10-18 11:03:14 +020019 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
Robert Olsson19baf832005-06-21 12:43:18 -070020 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
Robert Olsson19baf832005-06-21 12:43:18 -070025 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
Robert Olssonfd966252005-12-22 11:25:10 -080042 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
Robert Olsson19baf832005-06-21 12:43:18 -070049 */
50
Jens Låås80b71b82009-08-28 23:57:15 -070051#define VERSION "0.409"
Robert Olsson19baf832005-06-21 12:43:18 -070052
Robert Olsson19baf832005-06-21 12:43:18 -070053#include <asm/uaccess.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070054#include <linux/bitops.h>
Robert Olsson19baf832005-06-21 12:43:18 -070055#include <linux/types.h>
56#include <linux/kernel.h>
Robert Olsson19baf832005-06-21 12:43:18 -070057#include <linux/mm.h>
58#include <linux/string.h>
59#include <linux/socket.h>
60#include <linux/sockios.h>
61#include <linux/errno.h>
62#include <linux/in.h>
63#include <linux/inet.h>
Stephen Hemmingercd8787a2006-01-03 14:38:34 -080064#include <linux/inetdevice.h>
Robert Olsson19baf832005-06-21 12:43:18 -070065#include <linux/netdevice.h>
66#include <linux/if_arp.h>
67#include <linux/proc_fs.h>
Robert Olsson2373ce12005-08-25 13:01:29 -070068#include <linux/rcupdate.h>
Robert Olsson19baf832005-06-21 12:43:18 -070069#include <linux/skbuff.h>
70#include <linux/netlink.h>
71#include <linux/init.h>
72#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040074#include <linux/export.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020075#include <net/net_namespace.h>
Robert Olsson19baf832005-06-21 12:43:18 -070076#include <net/ip.h>
77#include <net/protocol.h>
78#include <net/route.h>
79#include <net/tcp.h>
80#include <net/sock.h>
81#include <net/ip_fib.h>
82#include "fib_lookup.h"
83
Robert Olsson06ef9212006-03-20 21:35:01 -080084#define MAX_STAT_DEPTH 32
Robert Olsson19baf832005-06-21 12:43:18 -070085
Robert Olsson19baf832005-06-21 12:43:18 -070086#define KEYLENGTH (8*sizeof(t_key))
Robert Olsson19baf832005-06-21 12:43:18 -070087
Robert Olsson19baf832005-06-21 12:43:18 -070088typedef unsigned int t_key;
89
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080090#define IS_TNODE(n) ((n)->bits)
91#define IS_LEAF(n) (!(n)->bits)
Robert Olsson2373ce12005-08-25 13:01:29 -070092
Alexander Duycke9b44012014-12-31 10:56:12 -080093#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
Alexander Duyck9f9e6362014-12-31 10:55:54 -080094
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080095struct tnode {
96 t_key key;
97 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
98 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
99 struct tnode __rcu *parent;
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800100 struct rcu_head rcu;
Alexander Duyckadaf9812014-12-31 10:55:47 -0800101 union {
102 /* The fields in this struct are valid if bits > 0 (TNODE) */
103 struct {
104 unsigned int full_children; /* KEYLENGTH bits needed */
105 unsigned int empty_children; /* KEYLENGTH bits needed */
106 struct tnode __rcu *child[0];
107 };
108 /* This list pointer if valid if bits == 0 (LEAF) */
109 struct hlist_head list;
110 };
Robert Olsson19baf832005-06-21 12:43:18 -0700111};
112
113struct leaf_info {
114 struct hlist_node hlist;
115 int plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000116 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
Robert Olsson19baf832005-06-21 12:43:18 -0700117 struct list_head falh;
Eric Dumazet5c745012011-07-18 03:16:33 +0000118 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700119};
120
Robert Olsson19baf832005-06-21 12:43:18 -0700121#ifdef CONFIG_IP_FIB_TRIE_STATS
122struct trie_use_stats {
123 unsigned int gets;
124 unsigned int backtrack;
125 unsigned int semantic_match_passed;
126 unsigned int semantic_match_miss;
127 unsigned int null_node_hit;
Robert Olsson2f368952005-07-05 15:02:40 -0700128 unsigned int resize_node_skipped;
Robert Olsson19baf832005-06-21 12:43:18 -0700129};
130#endif
131
132struct trie_stat {
133 unsigned int totdepth;
134 unsigned int maxdepth;
135 unsigned int tnodes;
136 unsigned int leaves;
137 unsigned int nullpointers;
Stephen Hemminger93672292008-01-22 21:54:05 -0800138 unsigned int prefixes;
Robert Olsson06ef9212006-03-20 21:35:01 -0800139 unsigned int nodesizes[MAX_STAT_DEPTH];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700140};
Robert Olsson19baf832005-06-21 12:43:18 -0700141
142struct trie {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800143 struct tnode __rcu *trie;
Robert Olsson19baf832005-06-21 12:43:18 -0700144#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800145 struct trie_use_stats __percpu *stats;
Robert Olsson19baf832005-06-21 12:43:18 -0700146#endif
Robert Olsson19baf832005-06-21 12:43:18 -0700147};
148
Alexander Duyck98293e82014-12-31 10:56:18 -0800149static void tnode_put_child_reorg(struct tnode *tn, unsigned long i,
150 struct tnode *n, int wasfull);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800151static struct tnode *resize(struct trie *t, struct tnode *tn);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700152static struct tnode *inflate(struct trie *t, struct tnode *tn);
153static struct tnode *halve(struct trie *t, struct tnode *tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700154/* tnodes to free after resize(); protected by RTNL */
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800155static struct callback_head *tnode_free_head;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000156static size_t tnode_free_size;
157
158/*
159 * synchronize_rcu after call_rcu for that many pages; it should be especially
160 * useful before resizing the root node with PREEMPT_NONE configs; the value was
161 * obtained experimentally, aiming to avoid visible slowdown.
162 */
163static const int sync_pages = 128;
Robert Olsson19baf832005-06-21 12:43:18 -0700164
Christoph Lametere18b8902006-12-06 20:33:20 -0800165static struct kmem_cache *fn_alias_kmem __read_mostly;
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800166static struct kmem_cache *trie_leaf_kmem __read_mostly;
Robert Olsson19baf832005-06-21 12:43:18 -0700167
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800168/* caller must hold RTNL */
169#define node_parent(n) rtnl_dereference((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700170
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800171/* caller must hold RCU read lock or RTNL */
172#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700173
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800174/* wrapper for rcu_assign_pointer */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800175static inline void node_set_parent(struct tnode *n, struct tnode *tp)
Stephen Hemminger06801912007-08-10 15:22:13 -0700176{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800177 if (n)
178 rcu_assign_pointer(n->parent, tp);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800179}
180
181#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
182
183/* This provides us with the number of children in this node, in the case of a
184 * leaf this will return 0 meaning none of the children are accessible.
185 */
Alexander Duyck98293e82014-12-31 10:56:18 -0800186static inline unsigned long tnode_child_length(const struct tnode *tn)
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800187{
188 return (1ul << tn->bits) & ~(1ul);
Stephen Hemminger06801912007-08-10 15:22:13 -0700189}
Robert Olsson2373ce12005-08-25 13:01:29 -0700190
Alexander Duyck98293e82014-12-31 10:56:18 -0800191/* caller must hold RTNL */
192static inline struct tnode *tnode_get_child(const struct tnode *tn,
193 unsigned long i)
Robert Olsson19baf832005-06-21 12:43:18 -0700194{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800195 BUG_ON(i >= tnode_child_length(tn));
Robert Olsson19baf832005-06-21 12:43:18 -0700196
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700197 return rtnl_dereference(tn->child[i]);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800198}
199
Alexander Duyck98293e82014-12-31 10:56:18 -0800200/* caller must hold RCU read lock or RTNL */
201static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
202 unsigned long i)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800203{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800204 BUG_ON(i >= tnode_child_length(tn));
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800205
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700206 return rcu_dereference_rtnl(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700207}
208
Alexander Duycke9b44012014-12-31 10:56:12 -0800209/* To understand this stuff, an understanding of keys and all their bits is
210 * necessary. Every node in the trie has a key associated with it, but not
211 * all of the bits in that key are significant.
212 *
213 * Consider a node 'n' and its parent 'tp'.
214 *
215 * If n is a leaf, every bit in its key is significant. Its presence is
216 * necessitated by path compression, since during a tree traversal (when
217 * searching for a leaf - unless we are doing an insertion) we will completely
218 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
219 * a potentially successful search, that we have indeed been walking the
220 * correct key path.
221 *
222 * Note that we can never "miss" the correct key in the tree if present by
223 * following the wrong path. Path compression ensures that segments of the key
224 * that are the same for all keys with a given prefix are skipped, but the
225 * skipped part *is* identical for each node in the subtrie below the skipped
226 * bit! trie_insert() in this implementation takes care of that.
227 *
228 * if n is an internal node - a 'tnode' here, the various parts of its key
229 * have many different meanings.
230 *
231 * Example:
232 * _________________________________________________________________
233 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
234 * -----------------------------------------------------------------
235 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
236 *
237 * _________________________________________________________________
238 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
239 * -----------------------------------------------------------------
240 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
241 *
242 * tp->pos = 22
243 * tp->bits = 3
244 * n->pos = 13
245 * n->bits = 4
246 *
247 * First, let's just ignore the bits that come before the parent tp, that is
248 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
249 * point we do not use them for anything.
250 *
251 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
252 * index into the parent's child array. That is, they will be used to find
253 * 'n' among tp's children.
254 *
255 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
256 * for the node n.
257 *
258 * All the bits we have seen so far are significant to the node n. The rest
259 * of the bits are really not needed or indeed known in n->key.
260 *
261 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
262 * n's child array, and will of course be different for each child.
263 *
264 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
265 * at this point.
266 */
Robert Olsson19baf832005-06-21 12:43:18 -0700267
Denis V. Lunevf5026fa2007-12-13 09:47:57 -0800268static const int halve_threshold = 25;
269static const int inflate_threshold = 50;
Jarek Poplawski345aa032009-07-07 19:39:16 -0700270static const int halve_threshold_root = 15;
Jens Låås80b71b82009-08-28 23:57:15 -0700271static const int inflate_threshold_root = 30;
Robert Olsson2373ce12005-08-25 13:01:29 -0700272
273static void __alias_free_mem(struct rcu_head *head)
274{
275 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
276 kmem_cache_free(fn_alias_kmem, fa);
277}
278
279static inline void alias_free_mem_rcu(struct fib_alias *fa)
280{
281 call_rcu(&fa->rcu, __alias_free_mem);
282}
283
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800284#define TNODE_KMALLOC_MAX \
Alexander Duyckadaf9812014-12-31 10:55:47 -0800285 ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800286
287static void __node_free_rcu(struct rcu_head *head)
Robert Olsson2373ce12005-08-25 13:01:29 -0700288{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800289 struct tnode *n = container_of(head, struct tnode, rcu);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800290
291 if (IS_LEAF(n))
292 kmem_cache_free(trie_leaf_kmem, n);
293 else if (n->bits <= TNODE_KMALLOC_MAX)
294 kfree(n);
295 else
296 vfree(n);
Robert Olsson2373ce12005-08-25 13:01:29 -0700297}
298
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800299#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
Stephen Hemminger387a5482008-04-10 03:47:34 -0700300
Robert Olsson2373ce12005-08-25 13:01:29 -0700301static inline void free_leaf_info(struct leaf_info *leaf)
302{
Lai Jiangshanbceb0f42011-03-18 11:42:34 +0800303 kfree_rcu(leaf, rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700304}
305
Eric Dumazet8d965442008-01-13 22:31:44 -0800306static struct tnode *tnode_alloc(size_t size)
Robert Olsson2373ce12005-08-25 13:01:29 -0700307{
Robert Olsson2373ce12005-08-25 13:01:29 -0700308 if (size <= PAGE_SIZE)
Eric Dumazet8d965442008-01-13 22:31:44 -0800309 return kzalloc(size, GFP_KERNEL);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700310 else
Eric Dumazet7a1c8e52010-11-20 07:46:35 +0000311 return vzalloc(size);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700312}
Robert Olsson2373ce12005-08-25 13:01:29 -0700313
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700314static void tnode_free_safe(struct tnode *tn)
315{
316 BUG_ON(IS_LEAF(tn));
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800317 tn->rcu.next = tnode_free_head;
318 tnode_free_head = &tn->rcu;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700319}
320
321static void tnode_free_flush(void)
322{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800323 struct callback_head *head;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700324
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800325 while ((head = tnode_free_head)) {
326 struct tnode *tn = container_of(head, struct tnode, rcu);
327
328 tnode_free_head = head->next;
329 tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
330
331 node_free(tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700332 }
Jarek Poplawskic3059472009-07-14 08:33:08 +0000333
334 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
335 tnode_free_size = 0;
336 synchronize_rcu();
337 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700338}
339
Alexander Duyckadaf9812014-12-31 10:55:47 -0800340static struct tnode *leaf_new(t_key key)
Robert Olsson19baf832005-06-21 12:43:18 -0700341{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800342 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700343 if (l) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800344 l->parent = NULL;
345 /* set key and pos to reflect full key value
346 * any trailing zeros in the key should be ignored
347 * as the nodes are searched
348 */
349 l->key = key;
Alexander Duycke9b44012014-12-31 10:56:12 -0800350 l->pos = 0;
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800351 /* set bits to 0 indicating we are not a tnode */
352 l->bits = 0;
353
Robert Olsson19baf832005-06-21 12:43:18 -0700354 INIT_HLIST_HEAD(&l->list);
355 }
356 return l;
357}
358
359static struct leaf_info *leaf_info_new(int plen)
360{
361 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -0700362 if (li) {
363 li->plen = plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000364 li->mask_plen = ntohl(inet_make_mask(plen));
Robert Olsson2373ce12005-08-25 13:01:29 -0700365 INIT_LIST_HEAD(&li->falh);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700366 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700367 return li;
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700368}
369
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800370static struct tnode *tnode_new(t_key key, int pos, int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700371{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800372 size_t sz = offsetof(struct tnode, child[1 << bits]);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700373 struct tnode *tn = tnode_alloc(sz);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800374 unsigned int shift = pos + bits;
375
376 /* verify bits and pos their msb bits clear and values are valid */
377 BUG_ON(!bits || (shift > KEYLENGTH));
Robert Olsson19baf832005-06-21 12:43:18 -0700378
Olof Johansson91b9a272005-08-09 20:24:39 -0700379 if (tn) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800380 tn->parent = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700381 tn->pos = pos;
382 tn->bits = bits;
Alexander Duycke9b44012014-12-31 10:56:12 -0800383 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
Robert Olsson19baf832005-06-21 12:43:18 -0700384 tn->full_children = 0;
385 tn->empty_children = 1<<bits;
386 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700387
Eric Dumazeta034ee32010-09-09 23:32:28 +0000388 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
Alexander Duyckadaf9812014-12-31 10:55:47 -0800389 sizeof(struct tnode *) << bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700390 return tn;
391}
392
Alexander Duycke9b44012014-12-31 10:56:12 -0800393/* Check whether a tnode 'n' is "full", i.e. it is an internal node
Robert Olsson19baf832005-06-21 12:43:18 -0700394 * and no bits are skipped. See discussion in dyntree paper p. 6
395 */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800396static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700397{
Alexander Duycke9b44012014-12-31 10:56:12 -0800398 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
Robert Olsson19baf832005-06-21 12:43:18 -0700399}
400
Alexander Duyck98293e82014-12-31 10:56:18 -0800401static inline void put_child(struct tnode *tn, unsigned long i,
Alexander Duyckadaf9812014-12-31 10:55:47 -0800402 struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700403{
404 tnode_put_child_reorg(tn, i, n, -1);
405}
406
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700407 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700408 * Add a child at position i overwriting the old value.
409 * Update the value of full_children and empty_children.
410 */
411
Alexander Duyck98293e82014-12-31 10:56:18 -0800412static void tnode_put_child_reorg(struct tnode *tn, unsigned long i,
413 struct tnode *n, int wasfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700414{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800415 struct tnode *chi = rtnl_dereference(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700416 int isfull;
417
Alexander Duyck98293e82014-12-31 10:56:18 -0800418 BUG_ON(i >= tnode_child_length(tn));
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700419
Robert Olsson19baf832005-06-21 12:43:18 -0700420 /* update emptyChildren */
421 if (n == NULL && chi != NULL)
422 tn->empty_children++;
423 else if (n != NULL && chi == NULL)
424 tn->empty_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700425
Robert Olsson19baf832005-06-21 12:43:18 -0700426 /* update fullChildren */
Olof Johansson91b9a272005-08-09 20:24:39 -0700427 if (wasfull == -1)
Robert Olsson19baf832005-06-21 12:43:18 -0700428 wasfull = tnode_full(tn, chi);
429
430 isfull = tnode_full(tn, n);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700431 if (wasfull && !isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700432 tn->full_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700433 else if (!wasfull && isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700434 tn->full_children++;
Olof Johansson91b9a272005-08-09 20:24:39 -0700435
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800436 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700437
Eric Dumazetcf778b02012-01-12 04:41:32 +0000438 rcu_assign_pointer(tn->child[i], n);
Robert Olsson19baf832005-06-21 12:43:18 -0700439}
440
Alexander Duyck836a0122014-12-31 10:56:06 -0800441static void put_child_root(struct tnode *tp, struct trie *t,
442 t_key key, struct tnode *n)
443{
444 if (tp)
445 put_child(tp, get_index(key, tp), n);
446 else
447 rcu_assign_pointer(t->trie, n);
448}
449
Jens Låås80b71b82009-08-28 23:57:15 -0700450#define MAX_WORK 10
Alexander Duyckadaf9812014-12-31 10:55:47 -0800451static struct tnode *resize(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700452{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800453 struct tnode *old_tn, *n = NULL;
Robert Olssone6308be2005-10-04 13:01:58 -0700454 int inflate_threshold_use;
455 int halve_threshold_use;
Jens Låås80b71b82009-08-28 23:57:15 -0700456 int max_work;
Robert Olsson19baf832005-06-21 12:43:18 -0700457
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900458 if (!tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700459 return NULL;
460
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700461 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
462 tn, inflate_threshold, halve_threshold);
Robert Olsson19baf832005-06-21 12:43:18 -0700463
464 /* No children */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800465 if (tn->empty_children > (tnode_child_length(tn) - 1))
466 goto no_children;
467
Robert Olsson19baf832005-06-21 12:43:18 -0700468 /* One child */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800469 if (tn->empty_children == (tnode_child_length(tn) - 1))
Jens Låås80b71b82009-08-28 23:57:15 -0700470 goto one_child;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700471 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700472 * Double as long as the resulting node has a number of
473 * nonempty nodes that are above the threshold.
474 */
475
476 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700477 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
478 * the Helsinki University of Technology and Matti Tikkanen of Nokia
Robert Olsson19baf832005-06-21 12:43:18 -0700479 * Telecommunications, page 6:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700480 * "A node is doubled if the ratio of non-empty children to all
Robert Olsson19baf832005-06-21 12:43:18 -0700481 * children in the *doubled* node is at least 'high'."
482 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700483 * 'high' in this instance is the variable 'inflate_threshold'. It
484 * is expressed as a percentage, so we multiply it with
485 * tnode_child_length() and instead of multiplying by 2 (since the
486 * child array will be doubled by inflate()) and multiplying
487 * the left-hand side by 100 (to handle the percentage thing) we
Robert Olsson19baf832005-06-21 12:43:18 -0700488 * multiply the left-hand side by 50.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700489 *
490 * The left-hand side may look a bit weird: tnode_child_length(tn)
491 * - tn->empty_children is of course the number of non-null children
492 * in the current node. tn->full_children is the number of "full"
Robert Olsson19baf832005-06-21 12:43:18 -0700493 * children, that is non-null tnodes with a skip value of 0.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700494 * All of those will be doubled in the resulting inflated tnode, so
Robert Olsson19baf832005-06-21 12:43:18 -0700495 * we just count them one extra time here.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700496 *
Robert Olsson19baf832005-06-21 12:43:18 -0700497 * A clearer way to write this would be:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700498 *
Robert Olsson19baf832005-06-21 12:43:18 -0700499 * to_be_doubled = tn->full_children;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700500 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
Robert Olsson19baf832005-06-21 12:43:18 -0700501 * tn->full_children;
502 *
503 * new_child_length = tnode_child_length(tn) * 2;
504 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700505 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
Robert Olsson19baf832005-06-21 12:43:18 -0700506 * new_child_length;
507 * if (new_fill_factor >= inflate_threshold)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700508 *
509 * ...and so on, tho it would mess up the while () loop.
510 *
Robert Olsson19baf832005-06-21 12:43:18 -0700511 * anyway,
512 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
513 * inflate_threshold
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700514 *
Robert Olsson19baf832005-06-21 12:43:18 -0700515 * avoid a division:
516 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
517 * inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700518 *
Robert Olsson19baf832005-06-21 12:43:18 -0700519 * expand not_to_be_doubled and to_be_doubled, and shorten:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700520 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700521 * tn->full_children) >= inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700522 *
Robert Olsson19baf832005-06-21 12:43:18 -0700523 * expand new_child_length:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700524 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700525 * tn->full_children) >=
Robert Olsson19baf832005-06-21 12:43:18 -0700526 * inflate_threshold * tnode_child_length(tn) * 2
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700527 *
Robert Olsson19baf832005-06-21 12:43:18 -0700528 * shorten again:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700529 * 50 * (tn->full_children + tnode_child_length(tn) -
Olof Johansson91b9a272005-08-09 20:24:39 -0700530 * tn->empty_children) >= inflate_threshold *
Robert Olsson19baf832005-06-21 12:43:18 -0700531 * tnode_child_length(tn)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700532 *
Robert Olsson19baf832005-06-21 12:43:18 -0700533 */
534
Robert Olssone6308be2005-10-04 13:01:58 -0700535 /* Keep root node larger */
536
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800537 if (!node_parent(tn)) {
Jens Låås80b71b82009-08-28 23:57:15 -0700538 inflate_threshold_use = inflate_threshold_root;
539 halve_threshold_use = halve_threshold_root;
Eric Dumazeta034ee32010-09-09 23:32:28 +0000540 } else {
Robert Olssone6308be2005-10-04 13:01:58 -0700541 inflate_threshold_use = inflate_threshold;
Jens Låås80b71b82009-08-28 23:57:15 -0700542 halve_threshold_use = halve_threshold;
543 }
Robert Olssone6308be2005-10-04 13:01:58 -0700544
Jens Låås80b71b82009-08-28 23:57:15 -0700545 max_work = MAX_WORK;
546 while ((tn->full_children > 0 && max_work-- &&
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800547 50 * (tn->full_children + tnode_child_length(tn)
548 - tn->empty_children)
549 >= inflate_threshold_use * tnode_child_length(tn))) {
Robert Olsson19baf832005-06-21 12:43:18 -0700550
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700551 old_tn = tn;
552 tn = inflate(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800553
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700554 if (IS_ERR(tn)) {
555 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700556#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800557 this_cpu_inc(t->stats->resize_node_skipped);
Robert Olsson2f368952005-07-05 15:02:40 -0700558#endif
559 break;
560 }
Robert Olsson19baf832005-06-21 12:43:18 -0700561 }
562
Jens Låås80b71b82009-08-28 23:57:15 -0700563 /* Return if at least one inflate is run */
Eric Dumazeta034ee32010-09-09 23:32:28 +0000564 if (max_work != MAX_WORK)
Alexander Duyckadaf9812014-12-31 10:55:47 -0800565 return tn;
Jens Låås80b71b82009-08-28 23:57:15 -0700566
Robert Olsson19baf832005-06-21 12:43:18 -0700567 /*
568 * Halve as long as the number of empty children in this
569 * node is above threshold.
570 */
Robert Olsson2f368952005-07-05 15:02:40 -0700571
Jens Låås80b71b82009-08-28 23:57:15 -0700572 max_work = MAX_WORK;
573 while (tn->bits > 1 && max_work-- &&
Robert Olsson19baf832005-06-21 12:43:18 -0700574 100 * (tnode_child_length(tn) - tn->empty_children) <
Robert Olssone6308be2005-10-04 13:01:58 -0700575 halve_threshold_use * tnode_child_length(tn)) {
Robert Olsson19baf832005-06-21 12:43:18 -0700576
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700577 old_tn = tn;
578 tn = halve(t, tn);
579 if (IS_ERR(tn)) {
580 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700581#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800582 this_cpu_inc(t->stats->resize_node_skipped);
Robert Olsson2f368952005-07-05 15:02:40 -0700583#endif
584 break;
585 }
586 }
587
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700588
Robert Olsson19baf832005-06-21 12:43:18 -0700589 /* Only one child remains */
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800590 if (tn->empty_children == (tnode_child_length(tn) - 1)) {
591 unsigned long i;
Jens Låås80b71b82009-08-28 23:57:15 -0700592one_child:
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800593 for (i = tnode_child_length(tn); !n && i;)
594 n = tnode_get_child(tn, --i);
595no_children:
596 /* compress one level */
597 node_set_parent(n, NULL);
598 tnode_free_safe(tn);
599 return n;
Jens Låås80b71b82009-08-28 23:57:15 -0700600 }
Alexander Duyckadaf9812014-12-31 10:55:47 -0800601 return tn;
Robert Olsson19baf832005-06-21 12:43:18 -0700602}
603
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700604
605static void tnode_clean_free(struct tnode *tn)
606{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800607 struct tnode *tofree;
Alexander Duyck98293e82014-12-31 10:56:18 -0800608 unsigned long i;
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700609
610 for (i = 0; i < tnode_child_length(tn); i++) {
Alexander Duyck98293e82014-12-31 10:56:18 -0800611 tofree = tnode_get_child(tn, i);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700612 if (tofree)
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800613 node_free(tofree);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700614 }
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800615 node_free(tn);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700616}
617
Alexander Duyckadaf9812014-12-31 10:55:47 -0800618static struct tnode *inflate(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700619{
Alexander Duyck98293e82014-12-31 10:56:18 -0800620 unsigned long olen = tnode_child_length(oldtnode);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800621 struct tnode *tn;
Alexander Duyck98293e82014-12-31 10:56:18 -0800622 unsigned long i;
Alexander Duycke9b44012014-12-31 10:56:12 -0800623 t_key m;
Robert Olsson19baf832005-06-21 12:43:18 -0700624
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700625 pr_debug("In inflate\n");
Robert Olsson19baf832005-06-21 12:43:18 -0700626
Alexander Duycke9b44012014-12-31 10:56:12 -0800627 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700628
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700629 if (!tn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700630 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700631
632 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700633 * Preallocate and store tnodes before the actual work so we
634 * don't get into an inconsistent state if memory allocation
635 * fails. In case of failure we return the oldnode and inflate
Robert Olsson2f368952005-07-05 15:02:40 -0700636 * of tnode is ignored.
637 */
Alexander Duycke9b44012014-12-31 10:56:12 -0800638 for (i = 0, m = 1u << tn->pos; i < olen; i++) {
639 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700640
Alexander Duycke9b44012014-12-31 10:56:12 -0800641 if (tnode_full(oldtnode, inode) && (inode->bits > 1)) {
Robert Olsson2f368952005-07-05 15:02:40 -0700642 struct tnode *left, *right;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700643
Alexander Duycke9b44012014-12-31 10:56:12 -0800644 left = tnode_new(inode->key & ~m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700645 inode->bits - 1);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700646 if (!left)
647 goto nomem;
Olof Johansson91b9a272005-08-09 20:24:39 -0700648
Alexander Duycke9b44012014-12-31 10:56:12 -0800649 right = tnode_new(inode->key | m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700650 inode->bits - 1);
651
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900652 if (!right) {
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800653 node_free(left);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700654 goto nomem;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900655 }
Robert Olsson2f368952005-07-05 15:02:40 -0700656
Alexander Duyckadaf9812014-12-31 10:55:47 -0800657 put_child(tn, 2*i, left);
658 put_child(tn, 2*i+1, right);
Robert Olsson2f368952005-07-05 15:02:40 -0700659 }
660 }
661
Olof Johansson91b9a272005-08-09 20:24:39 -0700662 for (i = 0; i < olen; i++) {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800663 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700664 struct tnode *left, *right;
Alexander Duyck98293e82014-12-31 10:56:18 -0800665 unsigned long size, j;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700666
Robert Olsson19baf832005-06-21 12:43:18 -0700667 /* An empty child */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800668 if (inode == NULL)
Robert Olsson19baf832005-06-21 12:43:18 -0700669 continue;
670
671 /* A leaf or an internal node with skipped bits */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800672 if (!tnode_full(oldtnode, inode)) {
Alexander Duycke9b44012014-12-31 10:56:12 -0800673 put_child(tn, get_index(inode->key, tn), inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700674 continue;
675 }
676
677 /* An internal node with two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700678 if (inode->bits == 1) {
Lin Ming61648d92012-07-29 02:00:03 +0000679 put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
680 put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
Robert Olsson19baf832005-06-21 12:43:18 -0700681
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700682 tnode_free_safe(inode);
Olof Johansson91b9a272005-08-09 20:24:39 -0700683 continue;
Robert Olsson19baf832005-06-21 12:43:18 -0700684 }
685
Olof Johansson91b9a272005-08-09 20:24:39 -0700686 /* An internal node with more than two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700687
Olof Johansson91b9a272005-08-09 20:24:39 -0700688 /* We will replace this node 'inode' with two new
689 * ones, 'left' and 'right', each with half of the
690 * original children. The two new nodes will have
691 * a position one bit further down the key and this
692 * means that the "significant" part of their keys
693 * (see the discussion near the top of this file)
694 * will differ by one bit, which will be "0" in
695 * left's key and "1" in right's key. Since we are
696 * moving the key position by one step, the bit that
697 * we are moving away from - the bit at position
698 * (inode->pos) - is the one that will differ between
699 * left and right. So... we synthesize that bit in the
700 * two new keys.
701 * The mask 'm' below will be a single "one" bit at
702 * the position (inode->pos)
703 */
Robert Olsson19baf832005-06-21 12:43:18 -0700704
Olof Johansson91b9a272005-08-09 20:24:39 -0700705 /* Use the old key, but set the new significant
706 * bit to zero.
707 */
Robert Olsson19baf832005-06-21 12:43:18 -0700708
Alexander Duyckadaf9812014-12-31 10:55:47 -0800709 left = tnode_get_child(tn, 2*i);
Lin Ming61648d92012-07-29 02:00:03 +0000710 put_child(tn, 2*i, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -0700711
Olof Johansson91b9a272005-08-09 20:24:39 -0700712 BUG_ON(!left);
Robert Olsson2f368952005-07-05 15:02:40 -0700713
Alexander Duyckadaf9812014-12-31 10:55:47 -0800714 right = tnode_get_child(tn, 2*i+1);
Lin Ming61648d92012-07-29 02:00:03 +0000715 put_child(tn, 2*i+1, NULL);
Robert Olsson2f368952005-07-05 15:02:40 -0700716
Olof Johansson91b9a272005-08-09 20:24:39 -0700717 BUG_ON(!right);
Robert Olsson2f368952005-07-05 15:02:40 -0700718
Olof Johansson91b9a272005-08-09 20:24:39 -0700719 size = tnode_child_length(left);
720 for (j = 0; j < size; j++) {
Lin Ming61648d92012-07-29 02:00:03 +0000721 put_child(left, j, rtnl_dereference(inode->child[j]));
722 put_child(right, j, rtnl_dereference(inode->child[j + size]));
Robert Olsson19baf832005-06-21 12:43:18 -0700723 }
Lin Ming61648d92012-07-29 02:00:03 +0000724 put_child(tn, 2*i, resize(t, left));
725 put_child(tn, 2*i+1, resize(t, right));
Olof Johansson91b9a272005-08-09 20:24:39 -0700726
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700727 tnode_free_safe(inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700728 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700729 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700730 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700731nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700732 tnode_clean_free(tn);
733 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700734}
735
Alexander Duyckadaf9812014-12-31 10:55:47 -0800736static struct tnode *halve(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700737{
Alexander Duyck98293e82014-12-31 10:56:18 -0800738 unsigned long olen = tnode_child_length(oldtnode);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800739 struct tnode *tn, *left, *right;
Robert Olsson19baf832005-06-21 12:43:18 -0700740 int i;
Robert Olsson19baf832005-06-21 12:43:18 -0700741
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700742 pr_debug("In halve\n");
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700743
Alexander Duycke9b44012014-12-31 10:56:12 -0800744 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700745
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700746 if (!tn)
747 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700748
749 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700750 * Preallocate and store tnodes before the actual work so we
751 * don't get into an inconsistent state if memory allocation
752 * fails. In case of failure we return the oldnode and halve
Robert Olsson2f368952005-07-05 15:02:40 -0700753 * of tnode is ignored.
754 */
755
Olof Johansson91b9a272005-08-09 20:24:39 -0700756 for (i = 0; i < olen; i += 2) {
Robert Olsson2f368952005-07-05 15:02:40 -0700757 left = tnode_get_child(oldtnode, i);
758 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700759
Robert Olsson2f368952005-07-05 15:02:40 -0700760 /* Two nonempty children */
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700761 if (left && right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700762 struct tnode *newn;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700763
Alexander Duycke9b44012014-12-31 10:56:12 -0800764 newn = tnode_new(left->key, oldtnode->pos, 1);
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700765
766 if (!newn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700767 goto nomem;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700768
Alexander Duyckadaf9812014-12-31 10:55:47 -0800769 put_child(tn, i/2, newn);
Robert Olsson2f368952005-07-05 15:02:40 -0700770 }
Robert Olsson2f368952005-07-05 15:02:40 -0700771
Robert Olsson2f368952005-07-05 15:02:40 -0700772 }
Robert Olsson19baf832005-06-21 12:43:18 -0700773
Olof Johansson91b9a272005-08-09 20:24:39 -0700774 for (i = 0; i < olen; i += 2) {
775 struct tnode *newBinNode;
776
Robert Olsson19baf832005-06-21 12:43:18 -0700777 left = tnode_get_child(oldtnode, i);
778 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700779
Robert Olsson19baf832005-06-21 12:43:18 -0700780 /* At least one of the children is empty */
781 if (left == NULL) {
782 if (right == NULL) /* Both are empty */
783 continue;
Lin Ming61648d92012-07-29 02:00:03 +0000784 put_child(tn, i/2, right);
Olof Johansson91b9a272005-08-09 20:24:39 -0700785 continue;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700786 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700787
788 if (right == NULL) {
Lin Ming61648d92012-07-29 02:00:03 +0000789 put_child(tn, i/2, left);
Olof Johansson91b9a272005-08-09 20:24:39 -0700790 continue;
791 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700792
Robert Olsson19baf832005-06-21 12:43:18 -0700793 /* Two nonempty children */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800794 newBinNode = tnode_get_child(tn, i/2);
Lin Ming61648d92012-07-29 02:00:03 +0000795 put_child(tn, i/2, NULL);
796 put_child(newBinNode, 0, left);
797 put_child(newBinNode, 1, right);
798 put_child(tn, i/2, resize(t, newBinNode));
Robert Olsson19baf832005-06-21 12:43:18 -0700799 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700800 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700801 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700802nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700803 tnode_clean_free(tn);
804 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700805}
806
Robert Olsson772cb712005-09-19 15:31:18 -0700807/* readside must use rcu_read_lock currently dump routines
Robert Olsson2373ce12005-08-25 13:01:29 -0700808 via get_fa_head and dump */
809
Alexander Duyckadaf9812014-12-31 10:55:47 -0800810static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700811{
Robert Olsson772cb712005-09-19 15:31:18 -0700812 struct hlist_head *head = &l->list;
Robert Olsson19baf832005-06-21 12:43:18 -0700813 struct leaf_info *li;
814
Sasha Levinb67bfe02013-02-27 17:06:00 -0800815 hlist_for_each_entry_rcu(li, head, hlist)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700816 if (li->plen == plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700817 return li;
Olof Johansson91b9a272005-08-09 20:24:39 -0700818
Robert Olsson19baf832005-06-21 12:43:18 -0700819 return NULL;
820}
821
Alexander Duyckadaf9812014-12-31 10:55:47 -0800822static inline struct list_head *get_fa_head(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700823{
Robert Olsson772cb712005-09-19 15:31:18 -0700824 struct leaf_info *li = find_leaf_info(l, plen);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700825
Olof Johansson91b9a272005-08-09 20:24:39 -0700826 if (!li)
827 return NULL;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700828
Olof Johansson91b9a272005-08-09 20:24:39 -0700829 return &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -0700830}
831
832static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
833{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900834 struct leaf_info *li = NULL, *last = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700835
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900836 if (hlist_empty(head)) {
837 hlist_add_head_rcu(&new->hlist, head);
838 } else {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800839 hlist_for_each_entry(li, head, hlist) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900840 if (new->plen > li->plen)
841 break;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700842
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900843 last = li;
844 }
845 if (last)
Ken Helias1d023282014-08-06 16:09:16 -0700846 hlist_add_behind_rcu(&new->hlist, &last->hlist);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900847 else
848 hlist_add_before_rcu(&new->hlist, &li->hlist);
849 }
Robert Olsson19baf832005-06-21 12:43:18 -0700850}
851
Robert Olsson2373ce12005-08-25 13:01:29 -0700852/* rcu_read_lock needs to be hold by caller from readside */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800853static struct tnode *fib_find_node(struct trie *t, u32 key)
Robert Olsson19baf832005-06-21 12:43:18 -0700854{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800855 struct tnode *n = rcu_dereference_rtnl(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700856
Alexander Duyck939afb02014-12-31 10:56:00 -0800857 while (n) {
858 unsigned long index = get_index(key, n);
859
860 /* This bit of code is a bit tricky but it combines multiple
861 * checks into a single check. The prefix consists of the
862 * prefix plus zeros for the bits in the cindex. The index
863 * is the difference between the key and this value. From
864 * this we can actually derive several pieces of data.
865 * if !(index >> bits)
866 * we know the value is cindex
867 * else
868 * we have a mismatch in skip bits and failed
869 */
870 if (index >> n->bits)
871 return NULL;
872
873 /* we have found a leaf. Prefixes have already been compared */
874 if (IS_LEAF(n))
Robert Olsson19baf832005-06-21 12:43:18 -0700875 break;
Alexander Duyck939afb02014-12-31 10:56:00 -0800876
877 n = rcu_dereference_rtnl(n->child[index]);
Robert Olsson19baf832005-06-21 12:43:18 -0700878 }
Robert Olsson19baf832005-06-21 12:43:18 -0700879
Alexander Duyck939afb02014-12-31 10:56:00 -0800880 return n;
Robert Olsson19baf832005-06-21 12:43:18 -0700881}
882
Jarek Poplawski7b855762009-06-18 00:28:51 -0700883static void trie_rebalance(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700884{
Robert Olsson19baf832005-06-21 12:43:18 -0700885 int wasfull;
Robert Olsson3ed18d72009-05-21 15:20:59 -0700886 t_key cindex, key;
Stephen Hemminger06801912007-08-10 15:22:13 -0700887 struct tnode *tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700888
Robert Olsson3ed18d72009-05-21 15:20:59 -0700889 key = tn->key;
890
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800891 while (tn != NULL && (tp = node_parent(tn)) != NULL) {
Alexander Duycke9b44012014-12-31 10:56:12 -0800892 cindex = get_index(key, tp);
Robert Olsson19baf832005-06-21 12:43:18 -0700893 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
Alexander Duyckadaf9812014-12-31 10:55:47 -0800894 tn = resize(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800895
Alexander Duyckadaf9812014-12-31 10:55:47 -0800896 tnode_put_child_reorg(tp, cindex, tn, wasfull);
Olof Johansson91b9a272005-08-09 20:24:39 -0700897
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800898 tp = node_parent(tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -0700899 if (!tp)
Alexander Duyckadaf9812014-12-31 10:55:47 -0800900 rcu_assign_pointer(t->trie, tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -0700901
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700902 tnode_free_flush();
Stephen Hemminger06801912007-08-10 15:22:13 -0700903 if (!tp)
Robert Olsson19baf832005-06-21 12:43:18 -0700904 break;
Stephen Hemminger06801912007-08-10 15:22:13 -0700905 tn = tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700906 }
Stephen Hemminger06801912007-08-10 15:22:13 -0700907
Robert Olsson19baf832005-06-21 12:43:18 -0700908 /* Handle last (top) tnode */
Jarek Poplawski7b855762009-06-18 00:28:51 -0700909 if (IS_TNODE(tn))
Alexander Duyckadaf9812014-12-31 10:55:47 -0800910 tn = resize(t, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700911
Alexander Duyckadaf9812014-12-31 10:55:47 -0800912 rcu_assign_pointer(t->trie, tn);
Jarek Poplawski7b855762009-06-18 00:28:51 -0700913 tnode_free_flush();
Robert Olsson19baf832005-06-21 12:43:18 -0700914}
915
Robert Olsson2373ce12005-08-25 13:01:29 -0700916/* only used from updater-side */
917
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800918static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700919{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700920 struct list_head *fa_head = NULL;
Alexander Duyck836a0122014-12-31 10:56:06 -0800921 struct tnode *l, *n, *tp = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700922 struct leaf_info *li;
Robert Olsson19baf832005-06-21 12:43:18 -0700923
Alexander Duyck836a0122014-12-31 10:56:06 -0800924 li = leaf_info_new(plen);
925 if (!li)
926 return NULL;
927 fa_head = &li->falh;
928
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700929 n = rtnl_dereference(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700930
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700931 /* If we point to NULL, stop. Either the tree is empty and we should
932 * just put a new leaf in if, or we have reached an empty child slot,
Robert Olsson19baf832005-06-21 12:43:18 -0700933 * and we should just put our new leaf in that.
Robert Olsson19baf832005-06-21 12:43:18 -0700934 *
Alexander Duyck836a0122014-12-31 10:56:06 -0800935 * If we hit a node with a key that does't match then we should stop
936 * and create a new tnode to replace that node and insert ourselves
937 * and the other node into the new tnode.
Robert Olsson19baf832005-06-21 12:43:18 -0700938 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800939 while (n) {
940 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700941
Alexander Duyck836a0122014-12-31 10:56:06 -0800942 /* This bit of code is a bit tricky but it combines multiple
943 * checks into a single check. The prefix consists of the
944 * prefix plus zeros for the "bits" in the prefix. The index
945 * is the difference between the key and this value. From
946 * this we can actually derive several pieces of data.
947 * if !(index >> bits)
948 * we know the value is child index
949 * else
950 * we have a mismatch in skip bits and failed
Robert Olsson19baf832005-06-21 12:43:18 -0700951 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800952 if (index >> n->bits)
953 break;
Robert Olsson19baf832005-06-21 12:43:18 -0700954
Alexander Duyck836a0122014-12-31 10:56:06 -0800955 /* we have found a leaf. Prefixes have already been compared */
956 if (IS_LEAF(n)) {
957 /* Case 1: n is a leaf, and prefixes match*/
958 insert_leaf_info(&n->list, li);
959 return fa_head;
Robert Olsson19baf832005-06-21 12:43:18 -0700960 }
Robert Olsson19baf832005-06-21 12:43:18 -0700961
Alexander Duyck836a0122014-12-31 10:56:06 -0800962 tp = n;
963 n = rcu_dereference_rtnl(n->child[index]);
964 }
965
966 l = leaf_new(key);
967 if (!l) {
968 free_leaf_info(li);
969 return NULL;
970 }
971
972 insert_leaf_info(&l->list, li);
973
974 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
975 *
976 * Add a new tnode here
977 * first tnode need some special handling
978 * leaves us in position for handling as case 3
979 */
980 if (n) {
981 struct tnode *tn;
Alexander Duyck836a0122014-12-31 10:56:06 -0800982
Alexander Duycke9b44012014-12-31 10:56:12 -0800983 tn = tnode_new(key, __fls(key ^ n->key), 1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700984 if (!tn) {
Robert Olssonf835e472005-06-28 15:00:39 -0700985 free_leaf_info(li);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800986 node_free(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800987 return NULL;
Olof Johansson91b9a272005-08-09 20:24:39 -0700988 }
989
Alexander Duyck836a0122014-12-31 10:56:06 -0800990 /* initialize routes out of node */
991 NODE_INIT_PARENT(tn, tp);
992 put_child(tn, get_index(key, tn) ^ 1, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700993
Alexander Duyck836a0122014-12-31 10:56:06 -0800994 /* start adding routes into the node */
995 put_child_root(tp, t, key, tn);
996 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700997
Alexander Duyck836a0122014-12-31 10:56:06 -0800998 /* parent now has a NULL spot where the leaf can go */
Alexander Duycke962f302014-12-10 21:49:22 -0800999 tp = tn;
Robert Olsson19baf832005-06-21 12:43:18 -07001000 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001001
Alexander Duyck836a0122014-12-31 10:56:06 -08001002 /* Case 3: n is NULL, and will just insert a new leaf */
1003 if (tp) {
1004 NODE_INIT_PARENT(l, tp);
1005 put_child(tp, get_index(key, tp), l);
1006 trie_rebalance(t, tp);
1007 } else {
1008 rcu_assign_pointer(t->trie, l);
1009 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001010
Robert Olsson19baf832005-06-21 12:43:18 -07001011 return fa_head;
1012}
1013
Robert Olssond562f1f2007-03-26 14:22:22 -07001014/*
1015 * Caller must hold RTNL.
1016 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001017int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001018{
1019 struct trie *t = (struct trie *) tb->tb_data;
1020 struct fib_alias *fa, *new_fa;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001021 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001022 struct fib_info *fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001023 int plen = cfg->fc_dst_len;
1024 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001025 u32 key, mask;
1026 int err;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001027 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001028
1029 if (plen > 32)
1030 return -EINVAL;
1031
Thomas Graf4e902c52006-08-17 18:14:52 -07001032 key = ntohl(cfg->fc_dst);
Robert Olsson19baf832005-06-21 12:43:18 -07001033
Patrick McHardy2dfe55b2006-08-10 23:08:33 -07001034 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001035
Olof Johansson91b9a272005-08-09 20:24:39 -07001036 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001037
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001038 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001039 return -EINVAL;
1040
1041 key = key & mask;
1042
Thomas Graf4e902c52006-08-17 18:14:52 -07001043 fi = fib_create_info(cfg);
1044 if (IS_ERR(fi)) {
1045 err = PTR_ERR(fi);
Robert Olsson19baf832005-06-21 12:43:18 -07001046 goto err;
Thomas Graf4e902c52006-08-17 18:14:52 -07001047 }
Robert Olsson19baf832005-06-21 12:43:18 -07001048
1049 l = fib_find_node(t, key);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001050 fa = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001051
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001052 if (l) {
Robert Olsson19baf832005-06-21 12:43:18 -07001053 fa_head = get_fa_head(l, plen);
1054 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1055 }
1056
1057 /* Now fa, if non-NULL, points to the first fib alias
1058 * with the same keys [prefix,tos,priority], if such key already
1059 * exists or to the node before which we will insert new one.
1060 *
1061 * If fa is NULL, we will need to allocate a new one and
1062 * insert to the head of f.
1063 *
1064 * If f is NULL, no fib node matched the destination key
1065 * and we need to allocate a new one of those as well.
1066 */
1067
Julian Anastasov936f6f82008-01-28 21:18:06 -08001068 if (fa && fa->fa_tos == tos &&
1069 fa->fa_info->fib_priority == fi->fib_priority) {
1070 struct fib_alias *fa_first, *fa_match;
Robert Olsson19baf832005-06-21 12:43:18 -07001071
1072 err = -EEXIST;
Thomas Graf4e902c52006-08-17 18:14:52 -07001073 if (cfg->fc_nlflags & NLM_F_EXCL)
Robert Olsson19baf832005-06-21 12:43:18 -07001074 goto out;
1075
Julian Anastasov936f6f82008-01-28 21:18:06 -08001076 /* We have 2 goals:
1077 * 1. Find exact match for type, scope, fib_info to avoid
1078 * duplicate routes
1079 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1080 */
1081 fa_match = NULL;
1082 fa_first = fa;
1083 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1084 list_for_each_entry_continue(fa, fa_head, fa_list) {
1085 if (fa->fa_tos != tos)
1086 break;
1087 if (fa->fa_info->fib_priority != fi->fib_priority)
1088 break;
1089 if (fa->fa_type == cfg->fc_type &&
Julian Anastasov936f6f82008-01-28 21:18:06 -08001090 fa->fa_info == fi) {
1091 fa_match = fa;
1092 break;
1093 }
1094 }
1095
Thomas Graf4e902c52006-08-17 18:14:52 -07001096 if (cfg->fc_nlflags & NLM_F_REPLACE) {
Robert Olsson19baf832005-06-21 12:43:18 -07001097 struct fib_info *fi_drop;
1098 u8 state;
1099
Julian Anastasov936f6f82008-01-28 21:18:06 -08001100 fa = fa_first;
1101 if (fa_match) {
1102 if (fa == fa_match)
1103 err = 0;
Joonwoo Park67250332008-01-18 03:45:18 -08001104 goto out;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001105 }
Robert Olsson2373ce12005-08-25 13:01:29 -07001106 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001107 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -07001108 if (new_fa == NULL)
1109 goto out;
Robert Olsson19baf832005-06-21 12:43:18 -07001110
1111 fi_drop = fa->fa_info;
Robert Olsson2373ce12005-08-25 13:01:29 -07001112 new_fa->fa_tos = fa->fa_tos;
1113 new_fa->fa_info = fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001114 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001115 state = fa->fa_state;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001116 new_fa->fa_state = state & ~FA_S_ACCESSED;
Robert Olsson19baf832005-06-21 12:43:18 -07001117
Robert Olsson2373ce12005-08-25 13:01:29 -07001118 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1119 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001120
1121 fib_release_info(fi_drop);
1122 if (state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001123 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Milan Kocianb8f55832007-05-23 14:55:06 -07001124 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1125 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
Robert Olsson19baf832005-06-21 12:43:18 -07001126
Olof Johansson91b9a272005-08-09 20:24:39 -07001127 goto succeeded;
Robert Olsson19baf832005-06-21 12:43:18 -07001128 }
1129 /* Error if we find a perfect match which
1130 * uses the same scope, type, and nexthop
1131 * information.
1132 */
Julian Anastasov936f6f82008-01-28 21:18:06 -08001133 if (fa_match)
1134 goto out;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001135
Thomas Graf4e902c52006-08-17 18:14:52 -07001136 if (!(cfg->fc_nlflags & NLM_F_APPEND))
Julian Anastasov936f6f82008-01-28 21:18:06 -08001137 fa = fa_first;
Robert Olsson19baf832005-06-21 12:43:18 -07001138 }
1139 err = -ENOENT;
Thomas Graf4e902c52006-08-17 18:14:52 -07001140 if (!(cfg->fc_nlflags & NLM_F_CREATE))
Robert Olsson19baf832005-06-21 12:43:18 -07001141 goto out;
1142
1143 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001144 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson19baf832005-06-21 12:43:18 -07001145 if (new_fa == NULL)
1146 goto out;
1147
1148 new_fa->fa_info = fi;
1149 new_fa->fa_tos = tos;
Thomas Graf4e902c52006-08-17 18:14:52 -07001150 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001151 new_fa->fa_state = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001152 /*
1153 * Insert new entry to the list.
1154 */
1155
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001156 if (!fa_head) {
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001157 fa_head = fib_insert_node(t, key, plen);
1158 if (unlikely(!fa_head)) {
1159 err = -ENOMEM;
Robert Olssonf835e472005-06-28 15:00:39 -07001160 goto out_free_new_fa;
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001161 }
Robert Olssonf835e472005-06-28 15:00:39 -07001162 }
Robert Olsson19baf832005-06-21 12:43:18 -07001163
David S. Miller21d8c492011-04-14 14:49:37 -07001164 if (!plen)
1165 tb->tb_num_default++;
1166
Robert Olsson2373ce12005-08-25 13:01:29 -07001167 list_add_tail_rcu(&new_fa->fa_list,
1168 (fa ? &fa->fa_list : fa_head));
Robert Olsson19baf832005-06-21 12:43:18 -07001169
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001170 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Thomas Graf4e902c52006-08-17 18:14:52 -07001171 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001172 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001173succeeded:
1174 return 0;
Robert Olssonf835e472005-06-28 15:00:39 -07001175
1176out_free_new_fa:
1177 kmem_cache_free(fn_alias_kmem, new_fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001178out:
1179 fib_release_info(fi);
Olof Johansson91b9a272005-08-09 20:24:39 -07001180err:
Robert Olsson19baf832005-06-21 12:43:18 -07001181 return err;
1182}
1183
Robert Olsson772cb712005-09-19 15:31:18 -07001184/* should be called with rcu_read_lock */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001185static int check_leaf(struct fib_table *tb, struct trie *t, struct tnode *l,
David S. Miller22bd5b92011-03-11 19:54:08 -05001186 t_key key, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001187 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001188{
Robert Olsson19baf832005-06-21 12:43:18 -07001189 struct leaf_info *li;
1190 struct hlist_head *hhead = &l->list;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001191
Sasha Levinb67bfe02013-02-27 17:06:00 -08001192 hlist_for_each_entry_rcu(li, hhead, hlist) {
David S. Miller3be06862011-03-07 15:01:10 -08001193 struct fib_alias *fa;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001194
Eric Dumazet5c745012011-07-18 03:16:33 +00001195 if (l->key != (key & li->mask_plen))
Robert Olsson19baf832005-06-21 12:43:18 -07001196 continue;
1197
David S. Miller3be06862011-03-07 15:01:10 -08001198 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1199 struct fib_info *fi = fa->fa_info;
1200 int nhsel, err;
1201
David S. Miller22bd5b92011-03-11 19:54:08 -05001202 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
David S. Miller3be06862011-03-07 15:01:10 -08001203 continue;
David S. Millerdccd9ecc2012-05-10 22:16:32 -04001204 if (fi->fib_dead)
1205 continue;
David S. Miller37e826c2011-03-24 18:06:47 -07001206 if (fa->fa_info->fib_scope < flp->flowi4_scope)
David S. Miller3be06862011-03-07 15:01:10 -08001207 continue;
1208 fib_alias_accessed(fa);
1209 err = fib_props[fa->fa_type].error;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001210 if (unlikely(err < 0)) {
David S. Miller3be06862011-03-07 15:01:10 -08001211#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001212 this_cpu_inc(t->stats->semantic_match_passed);
David S. Miller3be06862011-03-07 15:01:10 -08001213#endif
Julian Anastasov1fbc7842011-03-25 20:33:23 -07001214 return err;
David S. Miller3be06862011-03-07 15:01:10 -08001215 }
1216 if (fi->fib_flags & RTNH_F_DEAD)
1217 continue;
1218 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1219 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1220
1221 if (nh->nh_flags & RTNH_F_DEAD)
1222 continue;
David S. Miller22bd5b92011-03-11 19:54:08 -05001223 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
David S. Miller3be06862011-03-07 15:01:10 -08001224 continue;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001225
Robert Olsson19baf832005-06-21 12:43:18 -07001226#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001227 this_cpu_inc(t->stats->semantic_match_passed);
Robert Olsson19baf832005-06-21 12:43:18 -07001228#endif
Eric Dumazet5c745012011-07-18 03:16:33 +00001229 res->prefixlen = li->plen;
David S. Miller3be06862011-03-07 15:01:10 -08001230 res->nh_sel = nhsel;
1231 res->type = fa->fa_type;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001232 res->scope = fi->fib_scope;
David S. Miller3be06862011-03-07 15:01:10 -08001233 res->fi = fi;
1234 res->table = tb;
1235 res->fa_head = &li->falh;
1236 if (!(fib_flags & FIB_LOOKUP_NOREF))
Eric Dumazet5c745012011-07-18 03:16:33 +00001237 atomic_inc(&fi->fib_clntref);
David S. Miller3be06862011-03-07 15:01:10 -08001238 return 0;
1239 }
1240 }
1241
1242#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001243 this_cpu_inc(t->stats->semantic_match_miss);
David S. Miller3be06862011-03-07 15:01:10 -08001244#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001245 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001246
Ben Hutchings2e655572008-07-10 16:52:52 -07001247 return 1;
Robert Olsson19baf832005-06-21 12:43:18 -07001248}
1249
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001250static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1251{
1252 t_key prefix = n->key;
1253
1254 return (key ^ prefix) & (prefix | -prefix);
1255}
1256
David S. Miller22bd5b92011-03-11 19:54:08 -05001257int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001258 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001259{
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001260 struct trie *t = (struct trie *)tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001261#ifdef CONFIG_IP_FIB_TRIE_STATS
1262 struct trie_use_stats __percpu *stats = t->stats;
1263#endif
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001264 const t_key key = ntohl(flp->daddr);
1265 struct tnode *n, *pn;
1266 t_key cindex;
1267 int ret = 1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001268
Robert Olsson2373ce12005-08-25 13:01:29 -07001269 rcu_read_lock();
Robert Olsson19baf832005-06-21 12:43:18 -07001270
Robert Olsson2373ce12005-08-25 13:01:29 -07001271 n = rcu_dereference(t->trie);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001272 if (!n)
Robert Olsson19baf832005-06-21 12:43:18 -07001273 goto failed;
1274
1275#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001276 this_cpu_inc(stats->gets);
Robert Olsson19baf832005-06-21 12:43:18 -07001277#endif
1278
Alexander Duyckadaf9812014-12-31 10:55:47 -08001279 pn = n;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001280 cindex = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001281
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001282 /* Step 1: Travel to the longest prefix match in the trie */
1283 for (;;) {
1284 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -07001285
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001286 /* This bit of code is a bit tricky but it combines multiple
1287 * checks into a single check. The prefix consists of the
1288 * prefix plus zeros for the "bits" in the prefix. The index
1289 * is the difference between the key and this value. From
1290 * this we can actually derive several pieces of data.
1291 * if !(index >> bits)
1292 * we know the value is child index
1293 * else
1294 * we have a mismatch in skip bits and failed
1295 */
1296 if (index >> n->bits)
1297 break;
Robert Olsson19baf832005-06-21 12:43:18 -07001298
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001299 /* we have found a leaf. Prefixes have already been compared */
1300 if (IS_LEAF(n))
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001301 goto found;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001302
1303 /* only record pn and cindex if we are going to be chopping
1304 * bits later. Otherwise we are just wasting cycles.
1305 */
1306 if (index) {
1307 pn = n;
1308 cindex = index;
Olof Johansson91b9a272005-08-09 20:24:39 -07001309 }
1310
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001311 n = rcu_dereference(n->child[index]);
1312 if (unlikely(!n))
Robert Olsson19baf832005-06-21 12:43:18 -07001313 goto backtrace;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001314 }
1315
1316 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1317 for (;;) {
1318 /* record the pointer where our next node pointer is stored */
1319 struct tnode __rcu **cptr = n->child;
1320
1321 /* This test verifies that none of the bits that differ
1322 * between the key and the prefix exist in the region of
1323 * the lsb and higher in the prefix.
1324 */
1325 if (unlikely(prefix_mismatch(key, n)))
1326 goto backtrace;
1327
1328 /* exit out and process leaf */
1329 if (unlikely(IS_LEAF(n)))
1330 break;
1331
1332 /* Don't bother recording parent info. Since we are in
1333 * prefix match mode we will have to come back to wherever
1334 * we started this traversal anyway
1335 */
1336
1337 while ((n = rcu_dereference(*cptr)) == NULL) {
1338backtrace:
1339#ifdef CONFIG_IP_FIB_TRIE_STATS
1340 if (!n)
1341 this_cpu_inc(stats->null_node_hit);
1342#endif
1343 /* If we are at cindex 0 there are no more bits for
1344 * us to strip at this level so we must ascend back
1345 * up one level to see if there are any more bits to
1346 * be stripped there.
1347 */
1348 while (!cindex) {
1349 t_key pkey = pn->key;
1350
1351 pn = node_parent_rcu(pn);
1352 if (unlikely(!pn))
1353 goto failed;
1354#ifdef CONFIG_IP_FIB_TRIE_STATS
1355 this_cpu_inc(stats->backtrack);
1356#endif
1357 /* Get Child's index */
1358 cindex = get_index(pkey, pn);
1359 }
1360
1361 /* strip the least significant bit from the cindex */
1362 cindex &= cindex - 1;
1363
1364 /* grab pointer for next child node */
1365 cptr = &pn->child[cindex];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001366 }
Robert Olsson19baf832005-06-21 12:43:18 -07001367 }
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001368
Robert Olsson19baf832005-06-21 12:43:18 -07001369found:
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001370 /* Step 3: Process the leaf, if that fails fall back to backtracing */
1371 ret = check_leaf(tb, t, n, key, flp, res, fib_flags);
1372 if (unlikely(ret > 0))
1373 goto backtrace;
1374failed:
Robert Olsson2373ce12005-08-25 13:01:29 -07001375 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001376 return ret;
1377}
Florian Westphal6fc01432011-08-25 13:46:12 +02001378EXPORT_SYMBOL_GPL(fib_table_lookup);
Robert Olsson19baf832005-06-21 12:43:18 -07001379
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001380/*
1381 * Remove the leaf and return parent.
1382 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001383static void trie_leaf_remove(struct trie *t, struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001384{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -08001385 struct tnode *tp = node_parent(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001386
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001387 pr_debug("entering trie_leaf_remove(%p)\n", l);
Robert Olsson19baf832005-06-21 12:43:18 -07001388
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001389 if (tp) {
Alexander Duyck836a0122014-12-31 10:56:06 -08001390 put_child(tp, get_index(l->key, tp), NULL);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001391 trie_rebalance(t, tp);
Alexander Duyck836a0122014-12-31 10:56:06 -08001392 } else {
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001393 RCU_INIT_POINTER(t->trie, NULL);
Alexander Duyck836a0122014-12-31 10:56:06 -08001394 }
Robert Olsson19baf832005-06-21 12:43:18 -07001395
Alexander Duyck37fd30f2014-12-31 10:55:41 -08001396 node_free(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001397}
1398
Robert Olssond562f1f2007-03-26 14:22:22 -07001399/*
1400 * Caller must hold RTNL.
1401 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001402int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001403{
1404 struct trie *t = (struct trie *) tb->tb_data;
1405 u32 key, mask;
Thomas Graf4e902c52006-08-17 18:14:52 -07001406 int plen = cfg->fc_dst_len;
1407 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001408 struct fib_alias *fa, *fa_to_delete;
1409 struct list_head *fa_head;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001410 struct tnode *l;
Olof Johansson91b9a272005-08-09 20:24:39 -07001411 struct leaf_info *li;
1412
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001413 if (plen > 32)
Robert Olsson19baf832005-06-21 12:43:18 -07001414 return -EINVAL;
1415
Thomas Graf4e902c52006-08-17 18:14:52 -07001416 key = ntohl(cfg->fc_dst);
Olof Johansson91b9a272005-08-09 20:24:39 -07001417 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001418
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001419 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001420 return -EINVAL;
1421
1422 key = key & mask;
1423 l = fib_find_node(t, key);
1424
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001425 if (!l)
Robert Olsson19baf832005-06-21 12:43:18 -07001426 return -ESRCH;
1427
Igor Maravicad5b3102012-08-13 10:26:08 +02001428 li = find_leaf_info(l, plen);
1429
1430 if (!li)
1431 return -ESRCH;
1432
1433 fa_head = &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -07001434 fa = fib_find_alias(fa_head, tos, 0);
1435
1436 if (!fa)
1437 return -ESRCH;
1438
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001439 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
Robert Olsson19baf832005-06-21 12:43:18 -07001440
1441 fa_to_delete = NULL;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001442 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1443 list_for_each_entry_continue(fa, fa_head, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001444 struct fib_info *fi = fa->fa_info;
1445
1446 if (fa->fa_tos != tos)
1447 break;
1448
Thomas Graf4e902c52006-08-17 18:14:52 -07001449 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1450 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
David S. Miller37e826c2011-03-24 18:06:47 -07001451 fa->fa_info->fib_scope == cfg->fc_scope) &&
Julian Anastasov74cb3c12011-03-19 12:13:46 +00001452 (!cfg->fc_prefsrc ||
1453 fi->fib_prefsrc == cfg->fc_prefsrc) &&
Thomas Graf4e902c52006-08-17 18:14:52 -07001454 (!cfg->fc_protocol ||
1455 fi->fib_protocol == cfg->fc_protocol) &&
1456 fib_nh_match(cfg, fi) == 0) {
Robert Olsson19baf832005-06-21 12:43:18 -07001457 fa_to_delete = fa;
1458 break;
1459 }
1460 }
1461
Olof Johansson91b9a272005-08-09 20:24:39 -07001462 if (!fa_to_delete)
1463 return -ESRCH;
Robert Olsson19baf832005-06-21 12:43:18 -07001464
Olof Johansson91b9a272005-08-09 20:24:39 -07001465 fa = fa_to_delete;
Thomas Graf4e902c52006-08-17 18:14:52 -07001466 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001467 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001468
Robert Olsson2373ce12005-08-25 13:01:29 -07001469 list_del_rcu(&fa->fa_list);
Robert Olsson19baf832005-06-21 12:43:18 -07001470
David S. Miller21d8c492011-04-14 14:49:37 -07001471 if (!plen)
1472 tb->tb_num_default--;
1473
Olof Johansson91b9a272005-08-09 20:24:39 -07001474 if (list_empty(fa_head)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001475 hlist_del_rcu(&li->hlist);
Olof Johansson91b9a272005-08-09 20:24:39 -07001476 free_leaf_info(li);
Robert Olsson2373ce12005-08-25 13:01:29 -07001477 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001478
1479 if (hlist_empty(&l->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001480 trie_leaf_remove(t, l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001481
1482 if (fa->fa_state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001483 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Olof Johansson91b9a272005-08-09 20:24:39 -07001484
Robert Olsson2373ce12005-08-25 13:01:29 -07001485 fib_release_info(fa->fa_info);
1486 alias_free_mem_rcu(fa);
Olof Johansson91b9a272005-08-09 20:24:39 -07001487 return 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001488}
1489
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001490static int trie_flush_list(struct list_head *head)
Robert Olsson19baf832005-06-21 12:43:18 -07001491{
1492 struct fib_alias *fa, *fa_node;
1493 int found = 0;
1494
1495 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1496 struct fib_info *fi = fa->fa_info;
Robert Olsson19baf832005-06-21 12:43:18 -07001497
Robert Olsson2373ce12005-08-25 13:01:29 -07001498 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1499 list_del_rcu(&fa->fa_list);
1500 fib_release_info(fa->fa_info);
1501 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001502 found++;
1503 }
1504 }
1505 return found;
1506}
1507
Alexander Duyckadaf9812014-12-31 10:55:47 -08001508static int trie_flush_leaf(struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001509{
1510 int found = 0;
1511 struct hlist_head *lih = &l->list;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001512 struct hlist_node *tmp;
Robert Olsson19baf832005-06-21 12:43:18 -07001513 struct leaf_info *li = NULL;
1514
Sasha Levinb67bfe02013-02-27 17:06:00 -08001515 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001516 found += trie_flush_list(&li->falh);
Robert Olsson19baf832005-06-21 12:43:18 -07001517
1518 if (list_empty(&li->falh)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001519 hlist_del_rcu(&li->hlist);
Robert Olsson19baf832005-06-21 12:43:18 -07001520 free_leaf_info(li);
1521 }
1522 }
1523 return found;
1524}
1525
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001526/*
1527 * Scan for the next right leaf starting at node p->child[idx]
1528 * Since we have back pointer, no recursion necessary.
1529 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001530static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
Robert Olsson19baf832005-06-21 12:43:18 -07001531{
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001532 do {
Alexander Duyck98293e82014-12-31 10:56:18 -08001533 unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001534
Alexander Duyck98293e82014-12-31 10:56:18 -08001535 while (idx < tnode_child_length(p)) {
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001536 c = tnode_get_child_rcu(p, idx++);
Robert Olsson2373ce12005-08-25 13:01:29 -07001537 if (!c)
Olof Johansson91b9a272005-08-09 20:24:39 -07001538 continue;
Robert Olsson19baf832005-06-21 12:43:18 -07001539
Eric Dumazetaab515d2013-08-05 11:18:49 -07001540 if (IS_LEAF(c))
Alexander Duyckadaf9812014-12-31 10:55:47 -08001541 return c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001542
1543 /* Rescan start scanning in new node */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001544 p = c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001545 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001546 }
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001547
1548 /* Node empty, walk back up to parent */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001549 c = p;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001550 } while ((p = node_parent_rcu(c)) != NULL);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001551
1552 return NULL; /* Root of trie */
1553}
1554
Alexander Duyckadaf9812014-12-31 10:55:47 -08001555static struct tnode *trie_firstleaf(struct trie *t)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001556{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001557 struct tnode *n = rcu_dereference_rtnl(t->trie);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001558
1559 if (!n)
1560 return NULL;
1561
1562 if (IS_LEAF(n)) /* trie is just a leaf */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001563 return n;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001564
1565 return leaf_walk_rcu(n, NULL);
1566}
1567
Alexander Duyckadaf9812014-12-31 10:55:47 -08001568static struct tnode *trie_nextleaf(struct tnode *l)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001569{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001570 struct tnode *p = node_parent_rcu(l);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001571
1572 if (!p)
1573 return NULL; /* trie with just one leaf */
1574
Alexander Duyckadaf9812014-12-31 10:55:47 -08001575 return leaf_walk_rcu(p, l);
Robert Olsson19baf832005-06-21 12:43:18 -07001576}
1577
Alexander Duyckadaf9812014-12-31 10:55:47 -08001578static struct tnode *trie_leafindex(struct trie *t, int index)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001579{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001580 struct tnode *l = trie_firstleaf(t);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001581
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001582 while (l && index-- > 0)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001583 l = trie_nextleaf(l);
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001584
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001585 return l;
1586}
1587
1588
Robert Olssond562f1f2007-03-26 14:22:22 -07001589/*
1590 * Caller must hold RTNL.
1591 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001592int fib_table_flush(struct fib_table *tb)
Robert Olsson19baf832005-06-21 12:43:18 -07001593{
1594 struct trie *t = (struct trie *) tb->tb_data;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001595 struct tnode *l, *ll = NULL;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001596 int found = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001597
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001598 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001599 found += trie_flush_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001600
1601 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001602 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001603 ll = l;
1604 }
1605
1606 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001607 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001608
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001609 pr_debug("trie_flush found=%d\n", found);
Robert Olsson19baf832005-06-21 12:43:18 -07001610 return found;
1611}
1612
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001613void fib_free_table(struct fib_table *tb)
1614{
Alexander Duyck8274a972014-12-31 10:55:29 -08001615#ifdef CONFIG_IP_FIB_TRIE_STATS
1616 struct trie *t = (struct trie *)tb->tb_data;
1617
1618 free_percpu(t->stats);
1619#endif /* CONFIG_IP_FIB_TRIE_STATS */
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001620 kfree(tb);
1621}
1622
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001623static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1624 struct fib_table *tb,
Robert Olsson19baf832005-06-21 12:43:18 -07001625 struct sk_buff *skb, struct netlink_callback *cb)
1626{
1627 int i, s_i;
1628 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07001629 __be32 xkey = htonl(key);
Robert Olsson19baf832005-06-21 12:43:18 -07001630
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001631 s_i = cb->args[5];
Robert Olsson19baf832005-06-21 12:43:18 -07001632 i = 0;
1633
Robert Olsson2373ce12005-08-25 13:01:29 -07001634 /* rcu_read_lock is hold by caller */
1635
1636 list_for_each_entry_rcu(fa, fah, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001637 if (i < s_i) {
1638 i++;
1639 continue;
1640 }
Robert Olsson19baf832005-06-21 12:43:18 -07001641
Eric W. Biederman15e47302012-09-07 20:12:54 +00001642 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
Robert Olsson19baf832005-06-21 12:43:18 -07001643 cb->nlh->nlmsg_seq,
1644 RTM_NEWROUTE,
1645 tb->tb_id,
1646 fa->fa_type,
Thomas Grafbe403ea2006-08-17 18:15:17 -07001647 xkey,
Robert Olsson19baf832005-06-21 12:43:18 -07001648 plen,
1649 fa->fa_tos,
Stephen Hemminger64347f72008-01-22 21:55:01 -08001650 fa->fa_info, NLM_F_MULTI) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001651 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001652 return -1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001653 }
Robert Olsson19baf832005-06-21 12:43:18 -07001654 i++;
1655 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001656 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001657 return skb->len;
1658}
1659
Alexander Duyckadaf9812014-12-31 10:55:47 -08001660static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001661 struct sk_buff *skb, struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001662{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001663 struct leaf_info *li;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001664 int i, s_i;
Robert Olsson19baf832005-06-21 12:43:18 -07001665
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001666 s_i = cb->args[4];
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001667 i = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001668
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001669 /* rcu_read_lock is hold by caller */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001670 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001671 if (i < s_i) {
1672 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001673 continue;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001674 }
Robert Olsson19baf832005-06-21 12:43:18 -07001675
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001676 if (i > s_i)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001677 cb->args[5] = 0;
Olof Johansson91b9a272005-08-09 20:24:39 -07001678
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001679 if (list_empty(&li->falh))
Robert Olsson19baf832005-06-21 12:43:18 -07001680 continue;
1681
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001682 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001683 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001684 return -1;
1685 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001686 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001687 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001688
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001689 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001690 return skb->len;
1691}
1692
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001693int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1694 struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001695{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001696 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001697 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001698 t_key key = cb->args[2];
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001699 int count = cb->args[3];
Robert Olsson19baf832005-06-21 12:43:18 -07001700
Robert Olsson2373ce12005-08-25 13:01:29 -07001701 rcu_read_lock();
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001702 /* Dump starting at last key.
1703 * Note: 0.0.0.0/0 (ie default) is first key.
1704 */
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001705 if (count == 0)
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001706 l = trie_firstleaf(t);
1707 else {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001708 /* Normally, continue from last key, but if that is missing
1709 * fallback to using slow rescan
1710 */
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001711 l = fib_find_node(t, key);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001712 if (!l)
1713 l = trie_leafindex(t, count);
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001714 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001715
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001716 while (l) {
1717 cb->args[2] = l->key;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001718 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001719 cb->args[3] = count;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001720 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001721 return -1;
Robert Olsson19baf832005-06-21 12:43:18 -07001722 }
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001723
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001724 ++count;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001725 l = trie_nextleaf(l);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001726 memset(&cb->args[4], 0,
1727 sizeof(cb->args) - 4*sizeof(cb->args[0]));
Robert Olsson19baf832005-06-21 12:43:18 -07001728 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001729 cb->args[3] = count;
Robert Olsson2373ce12005-08-25 13:01:29 -07001730 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001731
Robert Olsson19baf832005-06-21 12:43:18 -07001732 return skb->len;
Robert Olsson19baf832005-06-21 12:43:18 -07001733}
1734
David S. Miller5348ba82011-02-01 15:30:56 -08001735void __init fib_trie_init(void)
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001736{
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001737 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1738 sizeof(struct fib_alias),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001739 0, SLAB_PANIC, NULL);
1740
1741 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001742 max(sizeof(struct tnode),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001743 sizeof(struct leaf_info)),
1744 0, SLAB_PANIC, NULL);
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001745}
Robert Olsson19baf832005-06-21 12:43:18 -07001746
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001747
David S. Miller5348ba82011-02-01 15:30:56 -08001748struct fib_table *fib_trie_table(u32 id)
Robert Olsson19baf832005-06-21 12:43:18 -07001749{
1750 struct fib_table *tb;
1751 struct trie *t;
1752
Robert Olsson19baf832005-06-21 12:43:18 -07001753 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1754 GFP_KERNEL);
1755 if (tb == NULL)
1756 return NULL;
1757
1758 tb->tb_id = id;
Denis V. Lunev971b8932007-12-08 00:32:23 -08001759 tb->tb_default = -1;
David S. Miller21d8c492011-04-14 14:49:37 -07001760 tb->tb_num_default = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001761
1762 t = (struct trie *) tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001763 RCU_INIT_POINTER(t->trie, NULL);
1764#ifdef CONFIG_IP_FIB_TRIE_STATS
1765 t->stats = alloc_percpu(struct trie_use_stats);
1766 if (!t->stats) {
1767 kfree(tb);
1768 tb = NULL;
1769 }
1770#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001771
Robert Olsson19baf832005-06-21 12:43:18 -07001772 return tb;
1773}
1774
Robert Olsson19baf832005-06-21 12:43:18 -07001775#ifdef CONFIG_PROC_FS
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001776/* Depth first Trie walk iterator */
1777struct fib_trie_iter {
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001778 struct seq_net_private p;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001779 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001780 struct tnode *tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001781 unsigned int index;
1782 unsigned int depth;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001783};
Robert Olsson19baf832005-06-21 12:43:18 -07001784
Alexander Duyckadaf9812014-12-31 10:55:47 -08001785static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
Robert Olsson19baf832005-06-21 12:43:18 -07001786{
Alexander Duyck98293e82014-12-31 10:56:18 -08001787 unsigned long cindex = iter->index;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001788 struct tnode *tn = iter->tnode;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001789 struct tnode *p;
1790
Eric W. Biederman6640e692007-01-24 14:42:04 -08001791 /* A single entry routing table */
1792 if (!tn)
1793 return NULL;
1794
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001795 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1796 iter->tnode, iter->index, iter->depth);
1797rescan:
Alexander Duyck98293e82014-12-31 10:56:18 -08001798 while (cindex < tnode_child_length(tn)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001799 struct tnode *n = tnode_get_child_rcu(tn, cindex);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001800
1801 if (n) {
1802 if (IS_LEAF(n)) {
1803 iter->tnode = tn;
1804 iter->index = cindex + 1;
1805 } else {
1806 /* push down one level */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001807 iter->tnode = n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001808 iter->index = 0;
1809 ++iter->depth;
1810 }
1811 return n;
1812 }
1813
1814 ++cindex;
1815 }
1816
1817 /* Current node exhausted, pop back up */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001818 p = node_parent_rcu(tn);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001819 if (p) {
Alexander Duycke9b44012014-12-31 10:56:12 -08001820 cindex = get_index(tn->key, p) + 1;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001821 tn = p;
1822 --iter->depth;
1823 goto rescan;
1824 }
1825
1826 /* got root? */
Robert Olsson19baf832005-06-21 12:43:18 -07001827 return NULL;
1828}
1829
Alexander Duyckadaf9812014-12-31 10:55:47 -08001830static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001831 struct trie *t)
Robert Olsson19baf832005-06-21 12:43:18 -07001832{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001833 struct tnode *n;
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001834
Stephen Hemminger132adf52007-03-08 20:44:43 -08001835 if (!t)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001836 return NULL;
1837
1838 n = rcu_dereference(t->trie);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001839 if (!n)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001840 return NULL;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001841
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001842 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001843 iter->tnode = n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001844 iter->index = 0;
1845 iter->depth = 1;
1846 } else {
1847 iter->tnode = NULL;
1848 iter->index = 0;
1849 iter->depth = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001850 }
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001851
1852 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07001853}
1854
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001855static void trie_collect_stats(struct trie *t, struct trie_stat *s)
Robert Olsson19baf832005-06-21 12:43:18 -07001856{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001857 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001858 struct fib_trie_iter iter;
Robert Olsson19baf832005-06-21 12:43:18 -07001859
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001860 memset(s, 0, sizeof(*s));
Robert Olsson19baf832005-06-21 12:43:18 -07001861
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001862 rcu_read_lock();
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001863 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001864 if (IS_LEAF(n)) {
Stephen Hemminger93672292008-01-22 21:54:05 -08001865 struct leaf_info *li;
Stephen Hemminger93672292008-01-22 21:54:05 -08001866
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001867 s->leaves++;
1868 s->totdepth += iter.depth;
1869 if (iter.depth > s->maxdepth)
1870 s->maxdepth = iter.depth;
Stephen Hemminger93672292008-01-22 21:54:05 -08001871
Alexander Duyckadaf9812014-12-31 10:55:47 -08001872 hlist_for_each_entry_rcu(li, &n->list, hlist)
Stephen Hemminger93672292008-01-22 21:54:05 -08001873 ++s->prefixes;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001874 } else {
Alexander Duyck98293e82014-12-31 10:56:18 -08001875 unsigned long i;
Robert Olsson19baf832005-06-21 12:43:18 -07001876
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001877 s->tnodes++;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001878 if (n->bits < MAX_STAT_DEPTH)
1879 s->nodesizes[n->bits]++;
Robert Olsson06ef9212006-03-20 21:35:01 -08001880
Alexander Duyck98293e82014-12-31 10:56:18 -08001881 for (i = 0; i < tnode_child_length(n); i++) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001882 if (!rcu_access_pointer(n->child[i]))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001883 s->nullpointers++;
Alexander Duyck98293e82014-12-31 10:56:18 -08001884 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001885 }
1886 }
1887 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001888}
1889
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001890/*
Robert Olsson19baf832005-06-21 12:43:18 -07001891 * This outputs /proc/net/fib_triestats
Robert Olsson19baf832005-06-21 12:43:18 -07001892 */
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001893static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
Robert Olsson19baf832005-06-21 12:43:18 -07001894{
Eric Dumazeta034ee32010-09-09 23:32:28 +00001895 unsigned int i, max, pointers, bytes, avdepth;
Robert Olsson19baf832005-06-21 12:43:18 -07001896
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001897 if (stat->leaves)
1898 avdepth = stat->totdepth*100 / stat->leaves;
1899 else
1900 avdepth = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001901
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001902 seq_printf(seq, "\tAver depth: %u.%02d\n",
1903 avdepth / 100, avdepth % 100);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001904 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
Robert Olsson19baf832005-06-21 12:43:18 -07001905
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001906 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
Alexander Duyckadaf9812014-12-31 10:55:47 -08001907 bytes = sizeof(struct tnode) * stat->leaves;
Stephen Hemminger93672292008-01-22 21:54:05 -08001908
1909 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1910 bytes += sizeof(struct leaf_info) * stat->prefixes;
1911
Stephen Hemminger187b5182008-01-12 20:55:55 -08001912 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001913 bytes += sizeof(struct tnode) * stat->tnodes;
Robert Olsson19baf832005-06-21 12:43:18 -07001914
Robert Olsson06ef9212006-03-20 21:35:01 -08001915 max = MAX_STAT_DEPTH;
1916 while (max > 0 && stat->nodesizes[max-1] == 0)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001917 max--;
Robert Olsson19baf832005-06-21 12:43:18 -07001918
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001919 pointers = 0;
Jerry Snitselaarf585a992013-07-22 12:01:58 -07001920 for (i = 1; i < max; i++)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001921 if (stat->nodesizes[i] != 0) {
Stephen Hemminger187b5182008-01-12 20:55:55 -08001922 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001923 pointers += (1<<i) * stat->nodesizes[i];
1924 }
1925 seq_putc(seq, '\n');
Stephen Hemminger187b5182008-01-12 20:55:55 -08001926 seq_printf(seq, "\tPointers: %u\n", pointers);
Robert Olsson19baf832005-06-21 12:43:18 -07001927
Alexander Duyckadaf9812014-12-31 10:55:47 -08001928 bytes += sizeof(struct tnode *) * pointers;
Stephen Hemminger187b5182008-01-12 20:55:55 -08001929 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1930 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001931}
Robert Olsson19baf832005-06-21 12:43:18 -07001932
1933#ifdef CONFIG_IP_FIB_TRIE_STATS
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001934static void trie_show_usage(struct seq_file *seq,
Alexander Duyck8274a972014-12-31 10:55:29 -08001935 const struct trie_use_stats __percpu *stats)
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001936{
Alexander Duyck8274a972014-12-31 10:55:29 -08001937 struct trie_use_stats s = { 0 };
1938 int cpu;
1939
1940 /* loop through all of the CPUs and gather up the stats */
1941 for_each_possible_cpu(cpu) {
1942 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
1943
1944 s.gets += pcpu->gets;
1945 s.backtrack += pcpu->backtrack;
1946 s.semantic_match_passed += pcpu->semantic_match_passed;
1947 s.semantic_match_miss += pcpu->semantic_match_miss;
1948 s.null_node_hit += pcpu->null_node_hit;
1949 s.resize_node_skipped += pcpu->resize_node_skipped;
1950 }
1951
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001952 seq_printf(seq, "\nCounters:\n---------\n");
Alexander Duyck8274a972014-12-31 10:55:29 -08001953 seq_printf(seq, "gets = %u\n", s.gets);
1954 seq_printf(seq, "backtracks = %u\n", s.backtrack);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001955 seq_printf(seq, "semantic match passed = %u\n",
Alexander Duyck8274a972014-12-31 10:55:29 -08001956 s.semantic_match_passed);
1957 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
1958 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
1959 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
Robert Olsson19baf832005-06-21 12:43:18 -07001960}
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001961#endif /* CONFIG_IP_FIB_TRIE_STATS */
1962
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001963static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001964{
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001965 if (tb->tb_id == RT_TABLE_LOCAL)
1966 seq_puts(seq, "Local:\n");
1967 else if (tb->tb_id == RT_TABLE_MAIN)
1968 seq_puts(seq, "Main:\n");
1969 else
1970 seq_printf(seq, "Id %d:\n", tb->tb_id);
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001971}
Robert Olsson19baf832005-06-21 12:43:18 -07001972
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001973
Robert Olsson19baf832005-06-21 12:43:18 -07001974static int fib_triestat_seq_show(struct seq_file *seq, void *v)
1975{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001976 struct net *net = (struct net *)seq->private;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001977 unsigned int h;
Eric W. Biederman877a9bf2007-12-07 00:47:47 -08001978
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001979 seq_printf(seq,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001980 "Basic info: size of leaf:"
1981 " %Zd bytes, size of tnode: %Zd bytes.\n",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001982 sizeof(struct tnode), sizeof(struct tnode));
Olof Johansson91b9a272005-08-09 20:24:39 -07001983
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001984 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1985 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001986 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001987
Sasha Levinb67bfe02013-02-27 17:06:00 -08001988 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001989 struct trie *t = (struct trie *) tb->tb_data;
1990 struct trie_stat stat;
1991
1992 if (!t)
1993 continue;
1994
1995 fib_table_print(seq, tb);
1996
1997 trie_collect_stats(t, &stat);
1998 trie_show_stats(seq, &stat);
1999#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08002000 trie_show_usage(seq, t->stats);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002001#endif
2002 }
2003 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002004
Robert Olsson19baf832005-06-21 12:43:18 -07002005 return 0;
2006}
2007
Robert Olsson19baf832005-06-21 12:43:18 -07002008static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2009{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07002010 return single_open_net(inode, file, fib_triestat_seq_show);
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002011}
2012
Arjan van de Ven9a321442007-02-12 00:55:35 -08002013static const struct file_operations fib_triestat_fops = {
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07002014 .owner = THIS_MODULE,
2015 .open = fib_triestat_seq_open,
2016 .read = seq_read,
2017 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07002018 .release = single_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002019};
2020
Alexander Duyckadaf9812014-12-31 10:55:47 -08002021static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
Robert Olsson19baf832005-06-21 12:43:18 -07002022{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002023 struct fib_trie_iter *iter = seq->private;
2024 struct net *net = seq_file_net(seq);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002025 loff_t idx = 0;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002026 unsigned int h;
Robert Olsson19baf832005-06-21 12:43:18 -07002027
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002028 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2029 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002030 struct fib_table *tb;
2031
Sasha Levinb67bfe02013-02-27 17:06:00 -08002032 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08002033 struct tnode *n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002034
2035 for (n = fib_trie_get_first(iter,
2036 (struct trie *) tb->tb_data);
2037 n; n = fib_trie_get_next(iter))
2038 if (pos == idx++) {
2039 iter->tb = tb;
2040 return n;
2041 }
2042 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002043 }
Robert Olsson19baf832005-06-21 12:43:18 -07002044
Robert Olsson19baf832005-06-21 12:43:18 -07002045 return NULL;
2046}
2047
2048static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002049 __acquires(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002050{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002051 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002052 return fib_trie_get_idx(seq, *pos);
Robert Olsson19baf832005-06-21 12:43:18 -07002053}
2054
2055static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2056{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002057 struct fib_trie_iter *iter = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002058 struct net *net = seq_file_net(seq);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002059 struct fib_table *tb = iter->tb;
2060 struct hlist_node *tb_node;
2061 unsigned int h;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002062 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002063
Robert Olsson19baf832005-06-21 12:43:18 -07002064 ++*pos;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002065 /* next node in same table */
2066 n = fib_trie_get_next(iter);
2067 if (n)
2068 return n;
Olof Johansson91b9a272005-08-09 20:24:39 -07002069
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002070 /* walk rest of this hash chain */
2071 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
Eric Dumazet0a5c0472011-03-31 01:51:35 -07002072 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002073 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2074 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2075 if (n)
2076 goto found;
2077 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002078
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002079 /* new hash chain */
2080 while (++h < FIB_TABLE_HASHSZ) {
2081 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Sasha Levinb67bfe02013-02-27 17:06:00 -08002082 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002083 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2084 if (n)
2085 goto found;
2086 }
2087 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002088 return NULL;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002089
2090found:
2091 iter->tb = tb;
2092 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002093}
2094
2095static void fib_trie_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002096 __releases(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002097{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002098 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002099}
2100
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002101static void seq_indent(struct seq_file *seq, int n)
2102{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002103 while (n-- > 0)
2104 seq_puts(seq, " ");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002105}
Robert Olsson19baf832005-06-21 12:43:18 -07002106
Eric Dumazet28d36e32008-01-14 23:09:56 -08002107static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002108{
Stephen Hemminger132adf52007-03-08 20:44:43 -08002109 switch (s) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002110 case RT_SCOPE_UNIVERSE: return "universe";
2111 case RT_SCOPE_SITE: return "site";
2112 case RT_SCOPE_LINK: return "link";
2113 case RT_SCOPE_HOST: return "host";
2114 case RT_SCOPE_NOWHERE: return "nowhere";
2115 default:
Eric Dumazet28d36e32008-01-14 23:09:56 -08002116 snprintf(buf, len, "scope=%d", s);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002117 return buf;
2118 }
2119}
2120
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -07002121static const char *const rtn_type_names[__RTN_MAX] = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002122 [RTN_UNSPEC] = "UNSPEC",
2123 [RTN_UNICAST] = "UNICAST",
2124 [RTN_LOCAL] = "LOCAL",
2125 [RTN_BROADCAST] = "BROADCAST",
2126 [RTN_ANYCAST] = "ANYCAST",
2127 [RTN_MULTICAST] = "MULTICAST",
2128 [RTN_BLACKHOLE] = "BLACKHOLE",
2129 [RTN_UNREACHABLE] = "UNREACHABLE",
2130 [RTN_PROHIBIT] = "PROHIBIT",
2131 [RTN_THROW] = "THROW",
2132 [RTN_NAT] = "NAT",
2133 [RTN_XRESOLVE] = "XRESOLVE",
2134};
2135
Eric Dumazeta034ee32010-09-09 23:32:28 +00002136static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002137{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002138 if (t < __RTN_MAX && rtn_type_names[t])
2139 return rtn_type_names[t];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002140 snprintf(buf, len, "type %u", t);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002141 return buf;
2142}
2143
2144/* Pretty print the trie */
Robert Olsson19baf832005-06-21 12:43:18 -07002145static int fib_trie_seq_show(struct seq_file *seq, void *v)
2146{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002147 const struct fib_trie_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002148 struct tnode *n = v;
Robert Olsson19baf832005-06-21 12:43:18 -07002149
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002150 if (!node_parent_rcu(n))
2151 fib_table_print(seq, iter->tb);
Robert Olsson095b8502007-01-26 19:06:01 -08002152
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002153 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08002154 __be32 prf = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002155
Alexander Duycke9b44012014-12-31 10:56:12 -08002156 seq_indent(seq, iter->depth-1);
2157 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2158 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2159 n->full_children, n->empty_children);
Olof Johansson91b9a272005-08-09 20:24:39 -07002160 } else {
Stephen Hemminger13280422008-01-22 21:54:37 -08002161 struct leaf_info *li;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002162 __be32 val = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002163
2164 seq_indent(seq, iter->depth);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002165 seq_printf(seq, " |-- %pI4\n", &val);
Eric Dumazet28d36e32008-01-14 23:09:56 -08002166
Alexander Duyckadaf9812014-12-31 10:55:47 -08002167 hlist_for_each_entry_rcu(li, &n->list, hlist) {
Stephen Hemminger13280422008-01-22 21:54:37 -08002168 struct fib_alias *fa;
Eric Dumazet28d36e32008-01-14 23:09:56 -08002169
Stephen Hemminger13280422008-01-22 21:54:37 -08002170 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2171 char buf1[32], buf2[32];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002172
Stephen Hemminger13280422008-01-22 21:54:37 -08002173 seq_indent(seq, iter->depth+1);
2174 seq_printf(seq, " /%d %s %s", li->plen,
2175 rtn_scope(buf1, sizeof(buf1),
David S. Miller37e826c2011-03-24 18:06:47 -07002176 fa->fa_info->fib_scope),
Stephen Hemminger13280422008-01-22 21:54:37 -08002177 rtn_type(buf2, sizeof(buf2),
2178 fa->fa_type));
2179 if (fa->fa_tos)
Denis V. Lunevb9c4d822008-02-05 02:58:45 -08002180 seq_printf(seq, " tos=%d", fa->fa_tos);
Stephen Hemminger13280422008-01-22 21:54:37 -08002181 seq_putc(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002182 }
2183 }
Robert Olsson19baf832005-06-21 12:43:18 -07002184 }
2185
2186 return 0;
2187}
2188
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002189static const struct seq_operations fib_trie_seq_ops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002190 .start = fib_trie_seq_start,
2191 .next = fib_trie_seq_next,
2192 .stop = fib_trie_seq_stop,
2193 .show = fib_trie_seq_show,
Robert Olsson19baf832005-06-21 12:43:18 -07002194};
2195
2196static int fib_trie_seq_open(struct inode *inode, struct file *file)
2197{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002198 return seq_open_net(inode, file, &fib_trie_seq_ops,
2199 sizeof(struct fib_trie_iter));
Robert Olsson19baf832005-06-21 12:43:18 -07002200}
2201
Arjan van de Ven9a321442007-02-12 00:55:35 -08002202static const struct file_operations fib_trie_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002203 .owner = THIS_MODULE,
2204 .open = fib_trie_seq_open,
2205 .read = seq_read,
2206 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002207 .release = seq_release_net,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002208};
2209
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002210struct fib_route_iter {
2211 struct seq_net_private p;
2212 struct trie *main_trie;
2213 loff_t pos;
2214 t_key key;
2215};
2216
Alexander Duyckadaf9812014-12-31 10:55:47 -08002217static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002218{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002219 struct tnode *l = NULL;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002220 struct trie *t = iter->main_trie;
2221
2222 /* use cache location of last found key */
2223 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2224 pos -= iter->pos;
2225 else {
2226 iter->pos = 0;
2227 l = trie_firstleaf(t);
2228 }
2229
2230 while (l && pos-- > 0) {
2231 iter->pos++;
2232 l = trie_nextleaf(l);
2233 }
2234
2235 if (l)
2236 iter->key = pos; /* remember it */
2237 else
2238 iter->pos = 0; /* forget it */
2239
2240 return l;
2241}
2242
2243static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2244 __acquires(RCU)
2245{
2246 struct fib_route_iter *iter = seq->private;
2247 struct fib_table *tb;
2248
2249 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002250 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002251 if (!tb)
2252 return NULL;
2253
2254 iter->main_trie = (struct trie *) tb->tb_data;
2255 if (*pos == 0)
2256 return SEQ_START_TOKEN;
2257 else
2258 return fib_route_get_idx(iter, *pos - 1);
2259}
2260
2261static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2262{
2263 struct fib_route_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002264 struct tnode *l = v;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002265
2266 ++*pos;
2267 if (v == SEQ_START_TOKEN) {
2268 iter->pos = 0;
2269 l = trie_firstleaf(iter->main_trie);
2270 } else {
2271 iter->pos++;
2272 l = trie_nextleaf(l);
2273 }
2274
2275 if (l)
2276 iter->key = l->key;
2277 else
2278 iter->pos = 0;
2279 return l;
2280}
2281
2282static void fib_route_seq_stop(struct seq_file *seq, void *v)
2283 __releases(RCU)
2284{
2285 rcu_read_unlock();
2286}
2287
Eric Dumazeta034ee32010-09-09 23:32:28 +00002288static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002289{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002290 unsigned int flags = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002291
Eric Dumazeta034ee32010-09-09 23:32:28 +00002292 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2293 flags = RTF_REJECT;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002294 if (fi && fi->fib_nh->nh_gw)
2295 flags |= RTF_GATEWAY;
Al Viro32ab5f82006-09-26 22:21:45 -07002296 if (mask == htonl(0xFFFFFFFF))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002297 flags |= RTF_HOST;
2298 flags |= RTF_UP;
2299 return flags;
2300}
2301
2302/*
2303 * This outputs /proc/net/route.
2304 * The format of the file is not supposed to be changed
Eric Dumazeta034ee32010-09-09 23:32:28 +00002305 * and needs to be same as fib_hash output to avoid breaking
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002306 * legacy utilities
2307 */
2308static int fib_route_seq_show(struct seq_file *seq, void *v)
2309{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002310 struct tnode *l = v;
Stephen Hemminger13280422008-01-22 21:54:37 -08002311 struct leaf_info *li;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002312
2313 if (v == SEQ_START_TOKEN) {
2314 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2315 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2316 "\tWindow\tIRTT");
2317 return 0;
2318 }
2319
Sasha Levinb67bfe02013-02-27 17:06:00 -08002320 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002321 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07002322 __be32 mask, prefix;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002323
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002324 mask = inet_make_mask(li->plen);
2325 prefix = htonl(l->key);
2326
2327 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
Herbert Xu1371e372005-10-15 09:42:39 +10002328 const struct fib_info *fi = fa->fa_info;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002329 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002330
2331 if (fa->fa_type == RTN_BROADCAST
2332 || fa->fa_type == RTN_MULTICAST)
2333 continue;
2334
Tetsuo Handa652586d2013-11-14 14:31:57 -08002335 seq_setwidth(seq, 127);
2336
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002337 if (fi)
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002338 seq_printf(seq,
2339 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002340 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002341 fi->fib_dev ? fi->fib_dev->name : "*",
2342 prefix,
2343 fi->fib_nh->nh_gw, flags, 0, 0,
2344 fi->fib_priority,
2345 mask,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002346 (fi->fib_advmss ?
2347 fi->fib_advmss + 40 : 0),
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002348 fi->fib_window,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002349 fi->fib_rtt >> 3);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002350 else
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002351 seq_printf(seq,
2352 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002353 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002354 prefix, 0, flags, 0, 0, 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002355 mask, 0, 0, 0);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002356
Tetsuo Handa652586d2013-11-14 14:31:57 -08002357 seq_pad(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002358 }
2359 }
2360
2361 return 0;
2362}
2363
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002364static const struct seq_operations fib_route_seq_ops = {
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002365 .start = fib_route_seq_start,
2366 .next = fib_route_seq_next,
2367 .stop = fib_route_seq_stop,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002368 .show = fib_route_seq_show,
2369};
2370
2371static int fib_route_seq_open(struct inode *inode, struct file *file)
2372{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002373 return seq_open_net(inode, file, &fib_route_seq_ops,
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002374 sizeof(struct fib_route_iter));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002375}
2376
Arjan van de Ven9a321442007-02-12 00:55:35 -08002377static const struct file_operations fib_route_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002378 .owner = THIS_MODULE,
2379 .open = fib_route_seq_open,
2380 .read = seq_read,
2381 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002382 .release = seq_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002383};
2384
Denis V. Lunev61a02652008-01-10 03:21:09 -08002385int __net_init fib_proc_init(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002386{
Gao fengd4beaa62013-02-18 01:34:54 +00002387 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002388 goto out1;
2389
Gao fengd4beaa62013-02-18 01:34:54 +00002390 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2391 &fib_triestat_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002392 goto out2;
2393
Gao fengd4beaa62013-02-18 01:34:54 +00002394 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002395 goto out3;
2396
Robert Olsson19baf832005-06-21 12:43:18 -07002397 return 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002398
2399out3:
Gao fengece31ff2013-02-18 01:34:56 +00002400 remove_proc_entry("fib_triestat", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002401out2:
Gao fengece31ff2013-02-18 01:34:56 +00002402 remove_proc_entry("fib_trie", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002403out1:
2404 return -ENOMEM;
Robert Olsson19baf832005-06-21 12:43:18 -07002405}
2406
Denis V. Lunev61a02652008-01-10 03:21:09 -08002407void __net_exit fib_proc_exit(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002408{
Gao fengece31ff2013-02-18 01:34:56 +00002409 remove_proc_entry("fib_trie", net->proc_net);
2410 remove_proc_entry("fib_triestat", net->proc_net);
2411 remove_proc_entry("route", net->proc_net);
Robert Olsson19baf832005-06-21 12:43:18 -07002412}
2413
2414#endif /* CONFIG_PROC_FS */