blob: 7fbd2c5a76b1179181aa1f66d0d832ddb499b18c [file] [log] [blame]
Robert Olsson19baf832005-06-21 12:43:18 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090010 * Jens Laas <jens.laas@data.slu.se> Swedish University of
Robert Olsson19baf832005-06-21 12:43:18 -070011 * Agricultural Sciences.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090012 *
Robert Olsson19baf832005-06-21 12:43:18 -070013 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030015 * This work is based on the LPC-trie which is originally described in:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 *
Robert Olsson19baf832005-06-21 12:43:18 -070017 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
Justin P. Mattock631dd1a2010-10-18 11:03:14 +020019 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
Robert Olsson19baf832005-06-21 12:43:18 -070020 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
Robert Olsson19baf832005-06-21 12:43:18 -070025 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
Robert Olssonfd966252005-12-22 11:25:10 -080042 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
Robert Olsson19baf832005-06-21 12:43:18 -070049 */
50
Jens Låås80b71b82009-08-28 23:57:15 -070051#define VERSION "0.409"
Robert Olsson19baf832005-06-21 12:43:18 -070052
Robert Olsson19baf832005-06-21 12:43:18 -070053#include <asm/uaccess.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070054#include <linux/bitops.h>
Robert Olsson19baf832005-06-21 12:43:18 -070055#include <linux/types.h>
56#include <linux/kernel.h>
Robert Olsson19baf832005-06-21 12:43:18 -070057#include <linux/mm.h>
58#include <linux/string.h>
59#include <linux/socket.h>
60#include <linux/sockios.h>
61#include <linux/errno.h>
62#include <linux/in.h>
63#include <linux/inet.h>
Stephen Hemmingercd8787a2006-01-03 14:38:34 -080064#include <linux/inetdevice.h>
Robert Olsson19baf832005-06-21 12:43:18 -070065#include <linux/netdevice.h>
66#include <linux/if_arp.h>
67#include <linux/proc_fs.h>
Robert Olsson2373ce12005-08-25 13:01:29 -070068#include <linux/rcupdate.h>
Robert Olsson19baf832005-06-21 12:43:18 -070069#include <linux/skbuff.h>
70#include <linux/netlink.h>
71#include <linux/init.h>
72#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040074#include <linux/export.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020075#include <net/net_namespace.h>
Robert Olsson19baf832005-06-21 12:43:18 -070076#include <net/ip.h>
77#include <net/protocol.h>
78#include <net/route.h>
79#include <net/tcp.h>
80#include <net/sock.h>
81#include <net/ip_fib.h>
82#include "fib_lookup.h"
83
Robert Olsson06ef9212006-03-20 21:35:01 -080084#define MAX_STAT_DEPTH 32
Robert Olsson19baf832005-06-21 12:43:18 -070085
Robert Olsson19baf832005-06-21 12:43:18 -070086#define KEYLENGTH (8*sizeof(t_key))
Robert Olsson19baf832005-06-21 12:43:18 -070087
Robert Olsson19baf832005-06-21 12:43:18 -070088typedef unsigned int t_key;
89
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080090#define IS_TNODE(n) ((n)->bits)
91#define IS_LEAF(n) (!(n)->bits)
Robert Olsson2373ce12005-08-25 13:01:29 -070092
Alexander Duycke9b44012014-12-31 10:56:12 -080093#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
Alexander Duyck9f9e6362014-12-31 10:55:54 -080094
Alexander Duyck64c9b6f2014-12-31 10:55:35 -080095struct tnode {
96 t_key key;
97 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
98 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
99 struct tnode __rcu *parent;
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800100 struct rcu_head rcu;
Alexander Duyckadaf9812014-12-31 10:55:47 -0800101 union {
102 /* The fields in this struct are valid if bits > 0 (TNODE) */
103 struct {
104 unsigned int full_children; /* KEYLENGTH bits needed */
105 unsigned int empty_children; /* KEYLENGTH bits needed */
106 struct tnode __rcu *child[0];
107 };
108 /* This list pointer if valid if bits == 0 (LEAF) */
109 struct hlist_head list;
110 };
Robert Olsson19baf832005-06-21 12:43:18 -0700111};
112
113struct leaf_info {
114 struct hlist_node hlist;
115 int plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000116 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
Robert Olsson19baf832005-06-21 12:43:18 -0700117 struct list_head falh;
Eric Dumazet5c745012011-07-18 03:16:33 +0000118 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700119};
120
Robert Olsson19baf832005-06-21 12:43:18 -0700121#ifdef CONFIG_IP_FIB_TRIE_STATS
122struct trie_use_stats {
123 unsigned int gets;
124 unsigned int backtrack;
125 unsigned int semantic_match_passed;
126 unsigned int semantic_match_miss;
127 unsigned int null_node_hit;
Robert Olsson2f368952005-07-05 15:02:40 -0700128 unsigned int resize_node_skipped;
Robert Olsson19baf832005-06-21 12:43:18 -0700129};
130#endif
131
132struct trie_stat {
133 unsigned int totdepth;
134 unsigned int maxdepth;
135 unsigned int tnodes;
136 unsigned int leaves;
137 unsigned int nullpointers;
Stephen Hemminger93672292008-01-22 21:54:05 -0800138 unsigned int prefixes;
Robert Olsson06ef9212006-03-20 21:35:01 -0800139 unsigned int nodesizes[MAX_STAT_DEPTH];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700140};
Robert Olsson19baf832005-06-21 12:43:18 -0700141
142struct trie {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800143 struct tnode __rcu *trie;
Robert Olsson19baf832005-06-21 12:43:18 -0700144#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -0800145 struct trie_use_stats __percpu *stats;
Robert Olsson19baf832005-06-21 12:43:18 -0700146#endif
Robert Olsson19baf832005-06-21 12:43:18 -0700147};
148
Alexander Duyckff181ed2014-12-31 10:56:43 -0800149static void resize(struct trie *t, struct tnode *tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700150/* tnodes to free after resize(); protected by RTNL */
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800151static struct callback_head *tnode_free_head;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000152static size_t tnode_free_size;
153
154/*
155 * synchronize_rcu after call_rcu for that many pages; it should be especially
156 * useful before resizing the root node with PREEMPT_NONE configs; the value was
157 * obtained experimentally, aiming to avoid visible slowdown.
158 */
159static const int sync_pages = 128;
Robert Olsson19baf832005-06-21 12:43:18 -0700160
Christoph Lametere18b8902006-12-06 20:33:20 -0800161static struct kmem_cache *fn_alias_kmem __read_mostly;
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800162static struct kmem_cache *trie_leaf_kmem __read_mostly;
Robert Olsson19baf832005-06-21 12:43:18 -0700163
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800164/* caller must hold RTNL */
165#define node_parent(n) rtnl_dereference((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700166
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800167/* caller must hold RCU read lock or RTNL */
168#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700169
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800170/* wrapper for rcu_assign_pointer */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800171static inline void node_set_parent(struct tnode *n, struct tnode *tp)
Stephen Hemminger06801912007-08-10 15:22:13 -0700172{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800173 if (n)
174 rcu_assign_pointer(n->parent, tp);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800175}
176
177#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
178
179/* This provides us with the number of children in this node, in the case of a
180 * leaf this will return 0 meaning none of the children are accessible.
181 */
Alexander Duyck98293e82014-12-31 10:56:18 -0800182static inline unsigned long tnode_child_length(const struct tnode *tn)
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800183{
184 return (1ul << tn->bits) & ~(1ul);
Stephen Hemminger06801912007-08-10 15:22:13 -0700185}
Robert Olsson2373ce12005-08-25 13:01:29 -0700186
Alexander Duyck98293e82014-12-31 10:56:18 -0800187/* caller must hold RTNL */
188static inline struct tnode *tnode_get_child(const struct tnode *tn,
189 unsigned long i)
Robert Olsson19baf832005-06-21 12:43:18 -0700190{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800191 BUG_ON(i >= tnode_child_length(tn));
Robert Olsson19baf832005-06-21 12:43:18 -0700192
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700193 return rtnl_dereference(tn->child[i]);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800194}
195
Alexander Duyck98293e82014-12-31 10:56:18 -0800196/* caller must hold RCU read lock or RTNL */
197static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
198 unsigned long i)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800199{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800200 BUG_ON(i >= tnode_child_length(tn));
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800201
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700202 return rcu_dereference_rtnl(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700203}
204
Alexander Duycke9b44012014-12-31 10:56:12 -0800205/* To understand this stuff, an understanding of keys and all their bits is
206 * necessary. Every node in the trie has a key associated with it, but not
207 * all of the bits in that key are significant.
208 *
209 * Consider a node 'n' and its parent 'tp'.
210 *
211 * If n is a leaf, every bit in its key is significant. Its presence is
212 * necessitated by path compression, since during a tree traversal (when
213 * searching for a leaf - unless we are doing an insertion) we will completely
214 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
215 * a potentially successful search, that we have indeed been walking the
216 * correct key path.
217 *
218 * Note that we can never "miss" the correct key in the tree if present by
219 * following the wrong path. Path compression ensures that segments of the key
220 * that are the same for all keys with a given prefix are skipped, but the
221 * skipped part *is* identical for each node in the subtrie below the skipped
222 * bit! trie_insert() in this implementation takes care of that.
223 *
224 * if n is an internal node - a 'tnode' here, the various parts of its key
225 * have many different meanings.
226 *
227 * Example:
228 * _________________________________________________________________
229 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
230 * -----------------------------------------------------------------
231 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
232 *
233 * _________________________________________________________________
234 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
235 * -----------------------------------------------------------------
236 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
237 *
238 * tp->pos = 22
239 * tp->bits = 3
240 * n->pos = 13
241 * n->bits = 4
242 *
243 * First, let's just ignore the bits that come before the parent tp, that is
244 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
245 * point we do not use them for anything.
246 *
247 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
248 * index into the parent's child array. That is, they will be used to find
249 * 'n' among tp's children.
250 *
251 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
252 * for the node n.
253 *
254 * All the bits we have seen so far are significant to the node n. The rest
255 * of the bits are really not needed or indeed known in n->key.
256 *
257 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
258 * n's child array, and will of course be different for each child.
259 *
260 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
261 * at this point.
262 */
Robert Olsson19baf832005-06-21 12:43:18 -0700263
Denis V. Lunevf5026fa2007-12-13 09:47:57 -0800264static const int halve_threshold = 25;
265static const int inflate_threshold = 50;
Jarek Poplawski345aa032009-07-07 19:39:16 -0700266static const int halve_threshold_root = 15;
Jens Låås80b71b82009-08-28 23:57:15 -0700267static const int inflate_threshold_root = 30;
Robert Olsson2373ce12005-08-25 13:01:29 -0700268
269static void __alias_free_mem(struct rcu_head *head)
270{
271 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
272 kmem_cache_free(fn_alias_kmem, fa);
273}
274
275static inline void alias_free_mem_rcu(struct fib_alias *fa)
276{
277 call_rcu(&fa->rcu, __alias_free_mem);
278}
279
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800280#define TNODE_KMALLOC_MAX \
Alexander Duyckadaf9812014-12-31 10:55:47 -0800281 ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800282
283static void __node_free_rcu(struct rcu_head *head)
Robert Olsson2373ce12005-08-25 13:01:29 -0700284{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800285 struct tnode *n = container_of(head, struct tnode, rcu);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800286
287 if (IS_LEAF(n))
288 kmem_cache_free(trie_leaf_kmem, n);
289 else if (n->bits <= TNODE_KMALLOC_MAX)
290 kfree(n);
291 else
292 vfree(n);
Robert Olsson2373ce12005-08-25 13:01:29 -0700293}
294
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800295#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
Stephen Hemminger387a5482008-04-10 03:47:34 -0700296
Robert Olsson2373ce12005-08-25 13:01:29 -0700297static inline void free_leaf_info(struct leaf_info *leaf)
298{
Lai Jiangshanbceb0f42011-03-18 11:42:34 +0800299 kfree_rcu(leaf, rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700300}
301
Eric Dumazet8d965442008-01-13 22:31:44 -0800302static struct tnode *tnode_alloc(size_t size)
Robert Olsson2373ce12005-08-25 13:01:29 -0700303{
Robert Olsson2373ce12005-08-25 13:01:29 -0700304 if (size <= PAGE_SIZE)
Eric Dumazet8d965442008-01-13 22:31:44 -0800305 return kzalloc(size, GFP_KERNEL);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700306 else
Eric Dumazet7a1c8e52010-11-20 07:46:35 +0000307 return vzalloc(size);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700308}
Robert Olsson2373ce12005-08-25 13:01:29 -0700309
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700310static void tnode_free_safe(struct tnode *tn)
311{
312 BUG_ON(IS_LEAF(tn));
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800313 tn->rcu.next = tnode_free_head;
314 tnode_free_head = &tn->rcu;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700315}
316
317static void tnode_free_flush(void)
318{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800319 struct callback_head *head;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700320
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800321 while ((head = tnode_free_head)) {
322 struct tnode *tn = container_of(head, struct tnode, rcu);
323
324 tnode_free_head = head->next;
325 tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
326
327 node_free(tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700328 }
Jarek Poplawskic3059472009-07-14 08:33:08 +0000329
330 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
331 tnode_free_size = 0;
332 synchronize_rcu();
333 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700334}
335
Alexander Duyckadaf9812014-12-31 10:55:47 -0800336static struct tnode *leaf_new(t_key key)
Robert Olsson19baf832005-06-21 12:43:18 -0700337{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800338 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700339 if (l) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800340 l->parent = NULL;
341 /* set key and pos to reflect full key value
342 * any trailing zeros in the key should be ignored
343 * as the nodes are searched
344 */
345 l->key = key;
Alexander Duycke9b44012014-12-31 10:56:12 -0800346 l->pos = 0;
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800347 /* set bits to 0 indicating we are not a tnode */
348 l->bits = 0;
349
Robert Olsson19baf832005-06-21 12:43:18 -0700350 INIT_HLIST_HEAD(&l->list);
351 }
352 return l;
353}
354
355static struct leaf_info *leaf_info_new(int plen)
356{
357 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -0700358 if (li) {
359 li->plen = plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000360 li->mask_plen = ntohl(inet_make_mask(plen));
Robert Olsson2373ce12005-08-25 13:01:29 -0700361 INIT_LIST_HEAD(&li->falh);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700362 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700363 return li;
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700364}
365
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800366static struct tnode *tnode_new(t_key key, int pos, int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700367{
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800368 size_t sz = offsetof(struct tnode, child[1 << bits]);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700369 struct tnode *tn = tnode_alloc(sz);
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800370 unsigned int shift = pos + bits;
371
372 /* verify bits and pos their msb bits clear and values are valid */
373 BUG_ON(!bits || (shift > KEYLENGTH));
Robert Olsson19baf832005-06-21 12:43:18 -0700374
Olof Johansson91b9a272005-08-09 20:24:39 -0700375 if (tn) {
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800376 tn->parent = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700377 tn->pos = pos;
378 tn->bits = bits;
Alexander Duycke9b44012014-12-31 10:56:12 -0800379 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
Robert Olsson19baf832005-06-21 12:43:18 -0700380 tn->full_children = 0;
381 tn->empty_children = 1<<bits;
382 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700383
Eric Dumazeta034ee32010-09-09 23:32:28 +0000384 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
Alexander Duyckadaf9812014-12-31 10:55:47 -0800385 sizeof(struct tnode *) << bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700386 return tn;
387}
388
Alexander Duycke9b44012014-12-31 10:56:12 -0800389/* Check whether a tnode 'n' is "full", i.e. it is an internal node
Robert Olsson19baf832005-06-21 12:43:18 -0700390 * and no bits are skipped. See discussion in dyntree paper p. 6
391 */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800392static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700393{
Alexander Duycke9b44012014-12-31 10:56:12 -0800394 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
Robert Olsson19baf832005-06-21 12:43:18 -0700395}
396
Alexander Duyckff181ed2014-12-31 10:56:43 -0800397/* Add a child at position i overwriting the old value.
398 * Update the value of full_children and empty_children.
399 */
400static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700401{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800402 struct tnode *chi = rtnl_dereference(tn->child[i]);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800403 int isfull, wasfull;
Robert Olsson19baf832005-06-21 12:43:18 -0700404
Alexander Duyck98293e82014-12-31 10:56:18 -0800405 BUG_ON(i >= tnode_child_length(tn));
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700406
Robert Olsson19baf832005-06-21 12:43:18 -0700407 /* update emptyChildren */
408 if (n == NULL && chi != NULL)
409 tn->empty_children++;
410 else if (n != NULL && chi == NULL)
411 tn->empty_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700412
Robert Olsson19baf832005-06-21 12:43:18 -0700413 /* update fullChildren */
Alexander Duyckff181ed2014-12-31 10:56:43 -0800414 wasfull = tnode_full(tn, chi);
Robert Olsson19baf832005-06-21 12:43:18 -0700415 isfull = tnode_full(tn, n);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800416
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700417 if (wasfull && !isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700418 tn->full_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700419 else if (!wasfull && isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700420 tn->full_children++;
Olof Johansson91b9a272005-08-09 20:24:39 -0700421
Alexander Duyck64c9b6f2014-12-31 10:55:35 -0800422 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700423
Eric Dumazetcf778b02012-01-12 04:41:32 +0000424 rcu_assign_pointer(tn->child[i], n);
Robert Olsson19baf832005-06-21 12:43:18 -0700425}
426
Alexander Duyck836a0122014-12-31 10:56:06 -0800427static void put_child_root(struct tnode *tp, struct trie *t,
428 t_key key, struct tnode *n)
429{
430 if (tp)
431 put_child(tp, get_index(key, tp), n);
432 else
433 rcu_assign_pointer(t->trie, n);
434}
435
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700436static void tnode_clean_free(struct tnode *tn)
437{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800438 struct tnode *tofree;
Alexander Duyck98293e82014-12-31 10:56:18 -0800439 unsigned long i;
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700440
441 for (i = 0; i < tnode_child_length(tn); i++) {
Alexander Duyck98293e82014-12-31 10:56:18 -0800442 tofree = tnode_get_child(tn, i);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700443 if (tofree)
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800444 node_free(tofree);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700445 }
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800446 node_free(tn);
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700447}
448
Alexander Duyckff181ed2014-12-31 10:56:43 -0800449static int inflate(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700450{
Alexander Duyck98293e82014-12-31 10:56:18 -0800451 unsigned long olen = tnode_child_length(oldtnode);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800452 struct tnode *tp = node_parent(oldtnode);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800453 struct tnode *tn;
Alexander Duyck98293e82014-12-31 10:56:18 -0800454 unsigned long i;
Alexander Duycke9b44012014-12-31 10:56:12 -0800455 t_key m;
Robert Olsson19baf832005-06-21 12:43:18 -0700456
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700457 pr_debug("In inflate\n");
Robert Olsson19baf832005-06-21 12:43:18 -0700458
Alexander Duycke9b44012014-12-31 10:56:12 -0800459 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700460 if (!tn)
Alexander Duyckff181ed2014-12-31 10:56:43 -0800461 return -ENOMEM;
Robert Olsson2f368952005-07-05 15:02:40 -0700462
463 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700464 * Preallocate and store tnodes before the actual work so we
465 * don't get into an inconsistent state if memory allocation
466 * fails. In case of failure we return the oldnode and inflate
Robert Olsson2f368952005-07-05 15:02:40 -0700467 * of tnode is ignored.
468 */
Alexander Duycke9b44012014-12-31 10:56:12 -0800469 for (i = 0, m = 1u << tn->pos; i < olen; i++) {
470 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700471
Alexander Duycke9b44012014-12-31 10:56:12 -0800472 if (tnode_full(oldtnode, inode) && (inode->bits > 1)) {
Robert Olsson2f368952005-07-05 15:02:40 -0700473 struct tnode *left, *right;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700474
Alexander Duycke9b44012014-12-31 10:56:12 -0800475 left = tnode_new(inode->key & ~m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700476 inode->bits - 1);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700477 if (!left)
478 goto nomem;
Olof Johansson91b9a272005-08-09 20:24:39 -0700479
Alexander Duycke9b44012014-12-31 10:56:12 -0800480 right = tnode_new(inode->key | m, inode->pos,
Robert Olsson2f368952005-07-05 15:02:40 -0700481 inode->bits - 1);
482
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900483 if (!right) {
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800484 node_free(left);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700485 goto nomem;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900486 }
Robert Olsson2f368952005-07-05 15:02:40 -0700487
Alexander Duyckadaf9812014-12-31 10:55:47 -0800488 put_child(tn, 2*i, left);
489 put_child(tn, 2*i+1, right);
Robert Olsson2f368952005-07-05 15:02:40 -0700490 }
491 }
492
Olof Johansson91b9a272005-08-09 20:24:39 -0700493 for (i = 0; i < olen; i++) {
Alexander Duyckadaf9812014-12-31 10:55:47 -0800494 struct tnode *inode = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700495 struct tnode *left, *right;
Alexander Duyck98293e82014-12-31 10:56:18 -0800496 unsigned long size, j;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700497
Robert Olsson19baf832005-06-21 12:43:18 -0700498 /* An empty child */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800499 if (inode == NULL)
Robert Olsson19baf832005-06-21 12:43:18 -0700500 continue;
501
502 /* A leaf or an internal node with skipped bits */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800503 if (!tnode_full(oldtnode, inode)) {
Alexander Duycke9b44012014-12-31 10:56:12 -0800504 put_child(tn, get_index(inode->key, tn), inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700505 continue;
506 }
507
508 /* An internal node with two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700509 if (inode->bits == 1) {
Lin Ming61648d92012-07-29 02:00:03 +0000510 put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
511 put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
Robert Olsson19baf832005-06-21 12:43:18 -0700512
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700513 tnode_free_safe(inode);
Olof Johansson91b9a272005-08-09 20:24:39 -0700514 continue;
Robert Olsson19baf832005-06-21 12:43:18 -0700515 }
516
Olof Johansson91b9a272005-08-09 20:24:39 -0700517 /* An internal node with more than two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700518
Olof Johansson91b9a272005-08-09 20:24:39 -0700519 /* We will replace this node 'inode' with two new
520 * ones, 'left' and 'right', each with half of the
521 * original children. The two new nodes will have
522 * a position one bit further down the key and this
523 * means that the "significant" part of their keys
524 * (see the discussion near the top of this file)
525 * will differ by one bit, which will be "0" in
526 * left's key and "1" in right's key. Since we are
527 * moving the key position by one step, the bit that
528 * we are moving away from - the bit at position
529 * (inode->pos) - is the one that will differ between
530 * left and right. So... we synthesize that bit in the
531 * two new keys.
532 * The mask 'm' below will be a single "one" bit at
533 * the position (inode->pos)
534 */
Robert Olsson19baf832005-06-21 12:43:18 -0700535
Olof Johansson91b9a272005-08-09 20:24:39 -0700536 /* Use the old key, but set the new significant
537 * bit to zero.
538 */
Robert Olsson19baf832005-06-21 12:43:18 -0700539
Alexander Duyckadaf9812014-12-31 10:55:47 -0800540 left = tnode_get_child(tn, 2*i);
Lin Ming61648d92012-07-29 02:00:03 +0000541 put_child(tn, 2*i, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -0700542
Olof Johansson91b9a272005-08-09 20:24:39 -0700543 BUG_ON(!left);
Robert Olsson2f368952005-07-05 15:02:40 -0700544
Alexander Duyckadaf9812014-12-31 10:55:47 -0800545 right = tnode_get_child(tn, 2*i+1);
Lin Ming61648d92012-07-29 02:00:03 +0000546 put_child(tn, 2*i+1, NULL);
Robert Olsson2f368952005-07-05 15:02:40 -0700547
Olof Johansson91b9a272005-08-09 20:24:39 -0700548 BUG_ON(!right);
Robert Olsson2f368952005-07-05 15:02:40 -0700549
Olof Johansson91b9a272005-08-09 20:24:39 -0700550 size = tnode_child_length(left);
551 for (j = 0; j < size; j++) {
Lin Ming61648d92012-07-29 02:00:03 +0000552 put_child(left, j, rtnl_dereference(inode->child[j]));
553 put_child(right, j, rtnl_dereference(inode->child[j + size]));
Robert Olsson19baf832005-06-21 12:43:18 -0700554 }
Alexander Duyckff181ed2014-12-31 10:56:43 -0800555
556 put_child(tn, 2 * i, left);
557 put_child(tn, 2 * i + 1, right);
Olof Johansson91b9a272005-08-09 20:24:39 -0700558
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700559 tnode_free_safe(inode);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800560
561 resize(t, left);
562 resize(t, right);
Robert Olsson19baf832005-06-21 12:43:18 -0700563 }
Alexander Duyckff181ed2014-12-31 10:56:43 -0800564
565 put_child_root(tp, t, tn->key, tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700566 tnode_free_safe(oldtnode);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800567 return 0;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700568nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700569 tnode_clean_free(tn);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800570 return -ENOMEM;
Robert Olsson19baf832005-06-21 12:43:18 -0700571}
572
Alexander Duyckff181ed2014-12-31 10:56:43 -0800573static int halve(struct trie *t, struct tnode *oldtnode)
Robert Olsson19baf832005-06-21 12:43:18 -0700574{
Alexander Duyck98293e82014-12-31 10:56:18 -0800575 unsigned long olen = tnode_child_length(oldtnode);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800576 struct tnode *tp = node_parent(oldtnode);
Alexander Duyckadaf9812014-12-31 10:55:47 -0800577 struct tnode *tn, *left, *right;
Robert Olsson19baf832005-06-21 12:43:18 -0700578 int i;
Robert Olsson19baf832005-06-21 12:43:18 -0700579
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700580 pr_debug("In halve\n");
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700581
Alexander Duycke9b44012014-12-31 10:56:12 -0800582 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700583 if (!tn)
Alexander Duyckff181ed2014-12-31 10:56:43 -0800584 return -ENOMEM;
Robert Olsson2f368952005-07-05 15:02:40 -0700585
586 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700587 * Preallocate and store tnodes before the actual work so we
588 * don't get into an inconsistent state if memory allocation
589 * fails. In case of failure we return the oldnode and halve
Robert Olsson2f368952005-07-05 15:02:40 -0700590 * of tnode is ignored.
591 */
592
Olof Johansson91b9a272005-08-09 20:24:39 -0700593 for (i = 0; i < olen; i += 2) {
Robert Olsson2f368952005-07-05 15:02:40 -0700594 left = tnode_get_child(oldtnode, i);
595 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700596
Robert Olsson2f368952005-07-05 15:02:40 -0700597 /* Two nonempty children */
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700598 if (left && right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700599 struct tnode *newn;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700600
Alexander Duycke9b44012014-12-31 10:56:12 -0800601 newn = tnode_new(left->key, oldtnode->pos, 1);
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700602
Alexander Duyckff181ed2014-12-31 10:56:43 -0800603 if (!newn) {
604 tnode_clean_free(tn);
605 return -ENOMEM;
606 }
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700607
Alexander Duyckadaf9812014-12-31 10:55:47 -0800608 put_child(tn, i/2, newn);
Robert Olsson2f368952005-07-05 15:02:40 -0700609 }
Robert Olsson2f368952005-07-05 15:02:40 -0700610
Robert Olsson2f368952005-07-05 15:02:40 -0700611 }
Robert Olsson19baf832005-06-21 12:43:18 -0700612
Olof Johansson91b9a272005-08-09 20:24:39 -0700613 for (i = 0; i < olen; i += 2) {
614 struct tnode *newBinNode;
615
Robert Olsson19baf832005-06-21 12:43:18 -0700616 left = tnode_get_child(oldtnode, i);
617 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700618
Robert Olsson19baf832005-06-21 12:43:18 -0700619 /* At least one of the children is empty */
620 if (left == NULL) {
621 if (right == NULL) /* Both are empty */
622 continue;
Lin Ming61648d92012-07-29 02:00:03 +0000623 put_child(tn, i/2, right);
Olof Johansson91b9a272005-08-09 20:24:39 -0700624 continue;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700625 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700626
627 if (right == NULL) {
Lin Ming61648d92012-07-29 02:00:03 +0000628 put_child(tn, i/2, left);
Olof Johansson91b9a272005-08-09 20:24:39 -0700629 continue;
630 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700631
Robert Olsson19baf832005-06-21 12:43:18 -0700632 /* Two nonempty children */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800633 newBinNode = tnode_get_child(tn, i/2);
Lin Ming61648d92012-07-29 02:00:03 +0000634 put_child(newBinNode, 0, left);
635 put_child(newBinNode, 1, right);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800636
637 put_child(tn, i / 2, newBinNode);
638
639 resize(t, newBinNode);
Robert Olsson19baf832005-06-21 12:43:18 -0700640 }
Alexander Duyckff181ed2014-12-31 10:56:43 -0800641
642 put_child_root(tp, t, tn->key, tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700643 tnode_free_safe(oldtnode);
Alexander Duyckff181ed2014-12-31 10:56:43 -0800644
645 return 0;
Robert Olsson19baf832005-06-21 12:43:18 -0700646}
647
Alexander Duyckf05a4812014-12-31 10:56:37 -0800648/* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
649 * the Helsinki University of Technology and Matti Tikkanen of Nokia
650 * Telecommunications, page 6:
651 * "A node is doubled if the ratio of non-empty children to all
652 * children in the *doubled* node is at least 'high'."
653 *
654 * 'high' in this instance is the variable 'inflate_threshold'. It
655 * is expressed as a percentage, so we multiply it with
656 * tnode_child_length() and instead of multiplying by 2 (since the
657 * child array will be doubled by inflate()) and multiplying
658 * the left-hand side by 100 (to handle the percentage thing) we
659 * multiply the left-hand side by 50.
660 *
661 * The left-hand side may look a bit weird: tnode_child_length(tn)
662 * - tn->empty_children is of course the number of non-null children
663 * in the current node. tn->full_children is the number of "full"
664 * children, that is non-null tnodes with a skip value of 0.
665 * All of those will be doubled in the resulting inflated tnode, so
666 * we just count them one extra time here.
667 *
668 * A clearer way to write this would be:
669 *
670 * to_be_doubled = tn->full_children;
671 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
672 * tn->full_children;
673 *
674 * new_child_length = tnode_child_length(tn) * 2;
675 *
676 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
677 * new_child_length;
678 * if (new_fill_factor >= inflate_threshold)
679 *
680 * ...and so on, tho it would mess up the while () loop.
681 *
682 * anyway,
683 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
684 * inflate_threshold
685 *
686 * avoid a division:
687 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
688 * inflate_threshold * new_child_length
689 *
690 * expand not_to_be_doubled and to_be_doubled, and shorten:
691 * 100 * (tnode_child_length(tn) - tn->empty_children +
692 * tn->full_children) >= inflate_threshold * new_child_length
693 *
694 * expand new_child_length:
695 * 100 * (tnode_child_length(tn) - tn->empty_children +
696 * tn->full_children) >=
697 * inflate_threshold * tnode_child_length(tn) * 2
698 *
699 * shorten again:
700 * 50 * (tn->full_children + tnode_child_length(tn) -
701 * tn->empty_children) >= inflate_threshold *
702 * tnode_child_length(tn)
703 *
704 */
Alexander Duyckff181ed2014-12-31 10:56:43 -0800705static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
Alexander Duyckf05a4812014-12-31 10:56:37 -0800706{
707 unsigned long used = tnode_child_length(tn);
708 unsigned long threshold = used;
709
710 /* Keep root node larger */
Alexander Duyckff181ed2014-12-31 10:56:43 -0800711 threshold *= tp ? inflate_threshold : inflate_threshold_root;
Alexander Duyckf05a4812014-12-31 10:56:37 -0800712 used += tn->full_children;
713 used -= tn->empty_children;
714
715 return tn->pos && ((50 * used) >= threshold);
716}
717
Alexander Duyckff181ed2014-12-31 10:56:43 -0800718static bool should_halve(const struct tnode *tp, const struct tnode *tn)
Alexander Duyckf05a4812014-12-31 10:56:37 -0800719{
720 unsigned long used = tnode_child_length(tn);
721 unsigned long threshold = used;
722
723 /* Keep root node larger */
Alexander Duyckff181ed2014-12-31 10:56:43 -0800724 threshold *= tp ? halve_threshold : halve_threshold_root;
Alexander Duyckf05a4812014-12-31 10:56:37 -0800725 used -= tn->empty_children;
726
727 return (tn->bits > 1) && ((100 * used) < threshold);
728}
729
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800730#define MAX_WORK 10
Alexander Duyckff181ed2014-12-31 10:56:43 -0800731static void resize(struct trie *t, struct tnode *tn)
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800732{
Alexander Duyckff181ed2014-12-31 10:56:43 -0800733 struct tnode *tp = node_parent(tn), *n = NULL;
734 struct tnode __rcu **cptr;
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800735 int max_work;
736
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800737 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
738 tn, inflate_threshold, halve_threshold);
739
Alexander Duyckff181ed2014-12-31 10:56:43 -0800740 /* track the tnode via the pointer from the parent instead of
741 * doing it ourselves. This way we can let RCU fully do its
742 * thing without us interfering
743 */
744 cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
745 BUG_ON(tn != rtnl_dereference(*cptr));
746
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800747 /* No children */
748 if (tn->empty_children > (tnode_child_length(tn) - 1))
749 goto no_children;
750
751 /* One child */
752 if (tn->empty_children == (tnode_child_length(tn) - 1))
753 goto one_child;
Alexander Duyckf05a4812014-12-31 10:56:37 -0800754
755 /* Double as long as the resulting node has a number of
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800756 * nonempty nodes that are above the threshold.
757 */
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800758 max_work = MAX_WORK;
Alexander Duyckff181ed2014-12-31 10:56:43 -0800759 while (should_inflate(tp, tn) && max_work--) {
760 if (inflate(t, tn)) {
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800761#ifdef CONFIG_IP_FIB_TRIE_STATS
762 this_cpu_inc(t->stats->resize_node_skipped);
763#endif
764 break;
765 }
Alexander Duyckff181ed2014-12-31 10:56:43 -0800766
767 tn = rtnl_dereference(*cptr);
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800768 }
769
770 /* Return if at least one inflate is run */
771 if (max_work != MAX_WORK)
Alexander Duyckff181ed2014-12-31 10:56:43 -0800772 return;
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800773
Alexander Duyckf05a4812014-12-31 10:56:37 -0800774 /* Halve as long as the number of empty children in this
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800775 * node is above threshold.
776 */
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800777 max_work = MAX_WORK;
Alexander Duyckff181ed2014-12-31 10:56:43 -0800778 while (should_halve(tp, tn) && max_work--) {
779 if (halve(t, tn)) {
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800780#ifdef CONFIG_IP_FIB_TRIE_STATS
781 this_cpu_inc(t->stats->resize_node_skipped);
782#endif
783 break;
784 }
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800785
Alexander Duyckff181ed2014-12-31 10:56:43 -0800786 tn = rtnl_dereference(*cptr);
787 }
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800788
789 /* Only one child remains */
790 if (tn->empty_children == (tnode_child_length(tn) - 1)) {
791 unsigned long i;
792one_child:
793 for (i = tnode_child_length(tn); !n && i;)
794 n = tnode_get_child(tn, --i);
795no_children:
796 /* compress one level */
Alexander Duyckff181ed2014-12-31 10:56:43 -0800797 put_child_root(tp, t, tn->key, n);
798 node_set_parent(n, tp);
799
800 /* drop dead node */
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800801 tnode_free_safe(tn);
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800802 }
Alexander Duyckcf3637b2014-12-31 10:56:31 -0800803}
804
Robert Olsson772cb712005-09-19 15:31:18 -0700805/* readside must use rcu_read_lock currently dump routines
Robert Olsson2373ce12005-08-25 13:01:29 -0700806 via get_fa_head and dump */
807
Alexander Duyckadaf9812014-12-31 10:55:47 -0800808static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700809{
Robert Olsson772cb712005-09-19 15:31:18 -0700810 struct hlist_head *head = &l->list;
Robert Olsson19baf832005-06-21 12:43:18 -0700811 struct leaf_info *li;
812
Sasha Levinb67bfe02013-02-27 17:06:00 -0800813 hlist_for_each_entry_rcu(li, head, hlist)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700814 if (li->plen == plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700815 return li;
Olof Johansson91b9a272005-08-09 20:24:39 -0700816
Robert Olsson19baf832005-06-21 12:43:18 -0700817 return NULL;
818}
819
Alexander Duyckadaf9812014-12-31 10:55:47 -0800820static inline struct list_head *get_fa_head(struct tnode *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700821{
Robert Olsson772cb712005-09-19 15:31:18 -0700822 struct leaf_info *li = find_leaf_info(l, plen);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700823
Olof Johansson91b9a272005-08-09 20:24:39 -0700824 if (!li)
825 return NULL;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700826
Olof Johansson91b9a272005-08-09 20:24:39 -0700827 return &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -0700828}
829
830static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
831{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900832 struct leaf_info *li = NULL, *last = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700833
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900834 if (hlist_empty(head)) {
835 hlist_add_head_rcu(&new->hlist, head);
836 } else {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800837 hlist_for_each_entry(li, head, hlist) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900838 if (new->plen > li->plen)
839 break;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700840
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900841 last = li;
842 }
843 if (last)
Ken Helias1d023282014-08-06 16:09:16 -0700844 hlist_add_behind_rcu(&new->hlist, &last->hlist);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900845 else
846 hlist_add_before_rcu(&new->hlist, &li->hlist);
847 }
Robert Olsson19baf832005-06-21 12:43:18 -0700848}
849
Robert Olsson2373ce12005-08-25 13:01:29 -0700850/* rcu_read_lock needs to be hold by caller from readside */
Alexander Duyckadaf9812014-12-31 10:55:47 -0800851static struct tnode *fib_find_node(struct trie *t, u32 key)
Robert Olsson19baf832005-06-21 12:43:18 -0700852{
Alexander Duyckadaf9812014-12-31 10:55:47 -0800853 struct tnode *n = rcu_dereference_rtnl(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700854
Alexander Duyck939afb02014-12-31 10:56:00 -0800855 while (n) {
856 unsigned long index = get_index(key, n);
857
858 /* This bit of code is a bit tricky but it combines multiple
859 * checks into a single check. The prefix consists of the
860 * prefix plus zeros for the bits in the cindex. The index
861 * is the difference between the key and this value. From
862 * this we can actually derive several pieces of data.
863 * if !(index >> bits)
864 * we know the value is cindex
865 * else
866 * we have a mismatch in skip bits and failed
867 */
868 if (index >> n->bits)
869 return NULL;
870
871 /* we have found a leaf. Prefixes have already been compared */
872 if (IS_LEAF(n))
Robert Olsson19baf832005-06-21 12:43:18 -0700873 break;
Alexander Duyck939afb02014-12-31 10:56:00 -0800874
875 n = rcu_dereference_rtnl(n->child[index]);
Robert Olsson19baf832005-06-21 12:43:18 -0700876 }
Robert Olsson19baf832005-06-21 12:43:18 -0700877
Alexander Duyck939afb02014-12-31 10:56:00 -0800878 return n;
Robert Olsson19baf832005-06-21 12:43:18 -0700879}
880
Jarek Poplawski7b855762009-06-18 00:28:51 -0700881static void trie_rebalance(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700882{
Stephen Hemminger06801912007-08-10 15:22:13 -0700883 struct tnode *tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700884
Alexander Duyckff181ed2014-12-31 10:56:43 -0800885 while ((tp = node_parent(tn)) != NULL) {
886 resize(t, tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -0700887
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700888 tnode_free_flush();
Stephen Hemminger06801912007-08-10 15:22:13 -0700889 tn = tp;
Robert Olsson19baf832005-06-21 12:43:18 -0700890 }
Stephen Hemminger06801912007-08-10 15:22:13 -0700891
Robert Olsson19baf832005-06-21 12:43:18 -0700892 /* Handle last (top) tnode */
Jarek Poplawski7b855762009-06-18 00:28:51 -0700893 if (IS_TNODE(tn))
Alexander Duyckff181ed2014-12-31 10:56:43 -0800894 resize(t, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700895
Jarek Poplawski7b855762009-06-18 00:28:51 -0700896 tnode_free_flush();
Robert Olsson19baf832005-06-21 12:43:18 -0700897}
898
Robert Olsson2373ce12005-08-25 13:01:29 -0700899/* only used from updater-side */
900
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800901static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700902{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700903 struct list_head *fa_head = NULL;
Alexander Duyck836a0122014-12-31 10:56:06 -0800904 struct tnode *l, *n, *tp = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -0700905 struct leaf_info *li;
Robert Olsson19baf832005-06-21 12:43:18 -0700906
Alexander Duyck836a0122014-12-31 10:56:06 -0800907 li = leaf_info_new(plen);
908 if (!li)
909 return NULL;
910 fa_head = &li->falh;
911
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700912 n = rtnl_dereference(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700913
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700914 /* If we point to NULL, stop. Either the tree is empty and we should
915 * just put a new leaf in if, or we have reached an empty child slot,
Robert Olsson19baf832005-06-21 12:43:18 -0700916 * and we should just put our new leaf in that.
Robert Olsson19baf832005-06-21 12:43:18 -0700917 *
Alexander Duyck836a0122014-12-31 10:56:06 -0800918 * If we hit a node with a key that does't match then we should stop
919 * and create a new tnode to replace that node and insert ourselves
920 * and the other node into the new tnode.
Robert Olsson19baf832005-06-21 12:43:18 -0700921 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800922 while (n) {
923 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700924
Alexander Duyck836a0122014-12-31 10:56:06 -0800925 /* This bit of code is a bit tricky but it combines multiple
926 * checks into a single check. The prefix consists of the
927 * prefix plus zeros for the "bits" in the prefix. The index
928 * is the difference between the key and this value. From
929 * this we can actually derive several pieces of data.
930 * if !(index >> bits)
931 * we know the value is child index
932 * else
933 * we have a mismatch in skip bits and failed
Robert Olsson19baf832005-06-21 12:43:18 -0700934 */
Alexander Duyck836a0122014-12-31 10:56:06 -0800935 if (index >> n->bits)
936 break;
Robert Olsson19baf832005-06-21 12:43:18 -0700937
Alexander Duyck836a0122014-12-31 10:56:06 -0800938 /* we have found a leaf. Prefixes have already been compared */
939 if (IS_LEAF(n)) {
940 /* Case 1: n is a leaf, and prefixes match*/
941 insert_leaf_info(&n->list, li);
942 return fa_head;
Robert Olsson19baf832005-06-21 12:43:18 -0700943 }
Robert Olsson19baf832005-06-21 12:43:18 -0700944
Alexander Duyck836a0122014-12-31 10:56:06 -0800945 tp = n;
946 n = rcu_dereference_rtnl(n->child[index]);
947 }
948
949 l = leaf_new(key);
950 if (!l) {
951 free_leaf_info(li);
952 return NULL;
953 }
954
955 insert_leaf_info(&l->list, li);
956
957 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
958 *
959 * Add a new tnode here
960 * first tnode need some special handling
961 * leaves us in position for handling as case 3
962 */
963 if (n) {
964 struct tnode *tn;
Alexander Duyck836a0122014-12-31 10:56:06 -0800965
Alexander Duycke9b44012014-12-31 10:56:12 -0800966 tn = tnode_new(key, __fls(key ^ n->key), 1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700967 if (!tn) {
Robert Olssonf835e472005-06-28 15:00:39 -0700968 free_leaf_info(li);
Alexander Duyck37fd30f2014-12-31 10:55:41 -0800969 node_free(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -0800970 return NULL;
Olof Johansson91b9a272005-08-09 20:24:39 -0700971 }
972
Alexander Duyck836a0122014-12-31 10:56:06 -0800973 /* initialize routes out of node */
974 NODE_INIT_PARENT(tn, tp);
975 put_child(tn, get_index(key, tn) ^ 1, n);
Robert Olsson19baf832005-06-21 12:43:18 -0700976
Alexander Duyck836a0122014-12-31 10:56:06 -0800977 /* start adding routes into the node */
978 put_child_root(tp, t, key, tn);
979 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700980
Alexander Duyck836a0122014-12-31 10:56:06 -0800981 /* parent now has a NULL spot where the leaf can go */
Alexander Duycke962f302014-12-10 21:49:22 -0800982 tp = tn;
Robert Olsson19baf832005-06-21 12:43:18 -0700983 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700984
Alexander Duyck836a0122014-12-31 10:56:06 -0800985 /* Case 3: n is NULL, and will just insert a new leaf */
986 if (tp) {
987 NODE_INIT_PARENT(l, tp);
988 put_child(tp, get_index(key, tp), l);
989 trie_rebalance(t, tp);
990 } else {
991 rcu_assign_pointer(t->trie, l);
992 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700993
Robert Olsson19baf832005-06-21 12:43:18 -0700994 return fa_head;
995}
996
Robert Olssond562f1f2007-03-26 14:22:22 -0700997/*
998 * Caller must hold RTNL.
999 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001000int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001001{
1002 struct trie *t = (struct trie *) tb->tb_data;
1003 struct fib_alias *fa, *new_fa;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001004 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001005 struct fib_info *fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001006 int plen = cfg->fc_dst_len;
1007 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001008 u32 key, mask;
1009 int err;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001010 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001011
1012 if (plen > 32)
1013 return -EINVAL;
1014
Thomas Graf4e902c52006-08-17 18:14:52 -07001015 key = ntohl(cfg->fc_dst);
Robert Olsson19baf832005-06-21 12:43:18 -07001016
Patrick McHardy2dfe55b2006-08-10 23:08:33 -07001017 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001018
Olof Johansson91b9a272005-08-09 20:24:39 -07001019 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001020
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001021 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001022 return -EINVAL;
1023
1024 key = key & mask;
1025
Thomas Graf4e902c52006-08-17 18:14:52 -07001026 fi = fib_create_info(cfg);
1027 if (IS_ERR(fi)) {
1028 err = PTR_ERR(fi);
Robert Olsson19baf832005-06-21 12:43:18 -07001029 goto err;
Thomas Graf4e902c52006-08-17 18:14:52 -07001030 }
Robert Olsson19baf832005-06-21 12:43:18 -07001031
1032 l = fib_find_node(t, key);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001033 fa = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001034
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001035 if (l) {
Robert Olsson19baf832005-06-21 12:43:18 -07001036 fa_head = get_fa_head(l, plen);
1037 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1038 }
1039
1040 /* Now fa, if non-NULL, points to the first fib alias
1041 * with the same keys [prefix,tos,priority], if such key already
1042 * exists or to the node before which we will insert new one.
1043 *
1044 * If fa is NULL, we will need to allocate a new one and
1045 * insert to the head of f.
1046 *
1047 * If f is NULL, no fib node matched the destination key
1048 * and we need to allocate a new one of those as well.
1049 */
1050
Julian Anastasov936f6f82008-01-28 21:18:06 -08001051 if (fa && fa->fa_tos == tos &&
1052 fa->fa_info->fib_priority == fi->fib_priority) {
1053 struct fib_alias *fa_first, *fa_match;
Robert Olsson19baf832005-06-21 12:43:18 -07001054
1055 err = -EEXIST;
Thomas Graf4e902c52006-08-17 18:14:52 -07001056 if (cfg->fc_nlflags & NLM_F_EXCL)
Robert Olsson19baf832005-06-21 12:43:18 -07001057 goto out;
1058
Julian Anastasov936f6f82008-01-28 21:18:06 -08001059 /* We have 2 goals:
1060 * 1. Find exact match for type, scope, fib_info to avoid
1061 * duplicate routes
1062 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1063 */
1064 fa_match = NULL;
1065 fa_first = fa;
1066 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1067 list_for_each_entry_continue(fa, fa_head, fa_list) {
1068 if (fa->fa_tos != tos)
1069 break;
1070 if (fa->fa_info->fib_priority != fi->fib_priority)
1071 break;
1072 if (fa->fa_type == cfg->fc_type &&
Julian Anastasov936f6f82008-01-28 21:18:06 -08001073 fa->fa_info == fi) {
1074 fa_match = fa;
1075 break;
1076 }
1077 }
1078
Thomas Graf4e902c52006-08-17 18:14:52 -07001079 if (cfg->fc_nlflags & NLM_F_REPLACE) {
Robert Olsson19baf832005-06-21 12:43:18 -07001080 struct fib_info *fi_drop;
1081 u8 state;
1082
Julian Anastasov936f6f82008-01-28 21:18:06 -08001083 fa = fa_first;
1084 if (fa_match) {
1085 if (fa == fa_match)
1086 err = 0;
Joonwoo Park67250332008-01-18 03:45:18 -08001087 goto out;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001088 }
Robert Olsson2373ce12005-08-25 13:01:29 -07001089 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001090 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -07001091 if (new_fa == NULL)
1092 goto out;
Robert Olsson19baf832005-06-21 12:43:18 -07001093
1094 fi_drop = fa->fa_info;
Robert Olsson2373ce12005-08-25 13:01:29 -07001095 new_fa->fa_tos = fa->fa_tos;
1096 new_fa->fa_info = fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001097 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001098 state = fa->fa_state;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001099 new_fa->fa_state = state & ~FA_S_ACCESSED;
Robert Olsson19baf832005-06-21 12:43:18 -07001100
Robert Olsson2373ce12005-08-25 13:01:29 -07001101 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1102 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001103
1104 fib_release_info(fi_drop);
1105 if (state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001106 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Milan Kocianb8f55832007-05-23 14:55:06 -07001107 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1108 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
Robert Olsson19baf832005-06-21 12:43:18 -07001109
Olof Johansson91b9a272005-08-09 20:24:39 -07001110 goto succeeded;
Robert Olsson19baf832005-06-21 12:43:18 -07001111 }
1112 /* Error if we find a perfect match which
1113 * uses the same scope, type, and nexthop
1114 * information.
1115 */
Julian Anastasov936f6f82008-01-28 21:18:06 -08001116 if (fa_match)
1117 goto out;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001118
Thomas Graf4e902c52006-08-17 18:14:52 -07001119 if (!(cfg->fc_nlflags & NLM_F_APPEND))
Julian Anastasov936f6f82008-01-28 21:18:06 -08001120 fa = fa_first;
Robert Olsson19baf832005-06-21 12:43:18 -07001121 }
1122 err = -ENOENT;
Thomas Graf4e902c52006-08-17 18:14:52 -07001123 if (!(cfg->fc_nlflags & NLM_F_CREATE))
Robert Olsson19baf832005-06-21 12:43:18 -07001124 goto out;
1125
1126 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001127 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson19baf832005-06-21 12:43:18 -07001128 if (new_fa == NULL)
1129 goto out;
1130
1131 new_fa->fa_info = fi;
1132 new_fa->fa_tos = tos;
Thomas Graf4e902c52006-08-17 18:14:52 -07001133 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001134 new_fa->fa_state = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001135 /*
1136 * Insert new entry to the list.
1137 */
1138
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001139 if (!fa_head) {
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001140 fa_head = fib_insert_node(t, key, plen);
1141 if (unlikely(!fa_head)) {
1142 err = -ENOMEM;
Robert Olssonf835e472005-06-28 15:00:39 -07001143 goto out_free_new_fa;
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001144 }
Robert Olssonf835e472005-06-28 15:00:39 -07001145 }
Robert Olsson19baf832005-06-21 12:43:18 -07001146
David S. Miller21d8c492011-04-14 14:49:37 -07001147 if (!plen)
1148 tb->tb_num_default++;
1149
Robert Olsson2373ce12005-08-25 13:01:29 -07001150 list_add_tail_rcu(&new_fa->fa_list,
1151 (fa ? &fa->fa_list : fa_head));
Robert Olsson19baf832005-06-21 12:43:18 -07001152
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001153 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Thomas Graf4e902c52006-08-17 18:14:52 -07001154 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001155 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001156succeeded:
1157 return 0;
Robert Olssonf835e472005-06-28 15:00:39 -07001158
1159out_free_new_fa:
1160 kmem_cache_free(fn_alias_kmem, new_fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001161out:
1162 fib_release_info(fi);
Olof Johansson91b9a272005-08-09 20:24:39 -07001163err:
Robert Olsson19baf832005-06-21 12:43:18 -07001164 return err;
1165}
1166
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001167static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1168{
1169 t_key prefix = n->key;
1170
1171 return (key ^ prefix) & (prefix | -prefix);
1172}
1173
Alexander Duyck345e9b52014-12-31 10:56:24 -08001174/* should be called with rcu_read_lock */
David S. Miller22bd5b92011-03-11 19:54:08 -05001175int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001176 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001177{
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001178 struct trie *t = (struct trie *)tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001179#ifdef CONFIG_IP_FIB_TRIE_STATS
1180 struct trie_use_stats __percpu *stats = t->stats;
1181#endif
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001182 const t_key key = ntohl(flp->daddr);
1183 struct tnode *n, *pn;
Alexander Duyck345e9b52014-12-31 10:56:24 -08001184 struct leaf_info *li;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001185 t_key cindex;
Robert Olsson19baf832005-06-21 12:43:18 -07001186
Robert Olsson2373ce12005-08-25 13:01:29 -07001187 n = rcu_dereference(t->trie);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001188 if (!n)
Alexander Duyck345e9b52014-12-31 10:56:24 -08001189 return -EAGAIN;
Robert Olsson19baf832005-06-21 12:43:18 -07001190
1191#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001192 this_cpu_inc(stats->gets);
Robert Olsson19baf832005-06-21 12:43:18 -07001193#endif
1194
Alexander Duyckadaf9812014-12-31 10:55:47 -08001195 pn = n;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001196 cindex = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001197
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001198 /* Step 1: Travel to the longest prefix match in the trie */
1199 for (;;) {
1200 unsigned long index = get_index(key, n);
Robert Olsson19baf832005-06-21 12:43:18 -07001201
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001202 /* This bit of code is a bit tricky but it combines multiple
1203 * checks into a single check. The prefix consists of the
1204 * prefix plus zeros for the "bits" in the prefix. The index
1205 * is the difference between the key and this value. From
1206 * this we can actually derive several pieces of data.
1207 * if !(index >> bits)
1208 * we know the value is child index
1209 * else
1210 * we have a mismatch in skip bits and failed
1211 */
1212 if (index >> n->bits)
1213 break;
Robert Olsson19baf832005-06-21 12:43:18 -07001214
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001215 /* we have found a leaf. Prefixes have already been compared */
1216 if (IS_LEAF(n))
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001217 goto found;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001218
1219 /* only record pn and cindex if we are going to be chopping
1220 * bits later. Otherwise we are just wasting cycles.
1221 */
1222 if (index) {
1223 pn = n;
1224 cindex = index;
Olof Johansson91b9a272005-08-09 20:24:39 -07001225 }
1226
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001227 n = rcu_dereference(n->child[index]);
1228 if (unlikely(!n))
Robert Olsson19baf832005-06-21 12:43:18 -07001229 goto backtrace;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001230 }
1231
1232 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1233 for (;;) {
1234 /* record the pointer where our next node pointer is stored */
1235 struct tnode __rcu **cptr = n->child;
1236
1237 /* This test verifies that none of the bits that differ
1238 * between the key and the prefix exist in the region of
1239 * the lsb and higher in the prefix.
1240 */
1241 if (unlikely(prefix_mismatch(key, n)))
1242 goto backtrace;
1243
1244 /* exit out and process leaf */
1245 if (unlikely(IS_LEAF(n)))
1246 break;
1247
1248 /* Don't bother recording parent info. Since we are in
1249 * prefix match mode we will have to come back to wherever
1250 * we started this traversal anyway
1251 */
1252
1253 while ((n = rcu_dereference(*cptr)) == NULL) {
1254backtrace:
1255#ifdef CONFIG_IP_FIB_TRIE_STATS
1256 if (!n)
1257 this_cpu_inc(stats->null_node_hit);
1258#endif
1259 /* If we are at cindex 0 there are no more bits for
1260 * us to strip at this level so we must ascend back
1261 * up one level to see if there are any more bits to
1262 * be stripped there.
1263 */
1264 while (!cindex) {
1265 t_key pkey = pn->key;
1266
1267 pn = node_parent_rcu(pn);
1268 if (unlikely(!pn))
Alexander Duyck345e9b52014-12-31 10:56:24 -08001269 return -EAGAIN;
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001270#ifdef CONFIG_IP_FIB_TRIE_STATS
1271 this_cpu_inc(stats->backtrack);
1272#endif
1273 /* Get Child's index */
1274 cindex = get_index(pkey, pn);
1275 }
1276
1277 /* strip the least significant bit from the cindex */
1278 cindex &= cindex - 1;
1279
1280 /* grab pointer for next child node */
1281 cptr = &pn->child[cindex];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001282 }
Robert Olsson19baf832005-06-21 12:43:18 -07001283 }
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001284
Robert Olsson19baf832005-06-21 12:43:18 -07001285found:
Alexander Duyck9f9e6362014-12-31 10:55:54 -08001286 /* Step 3: Process the leaf, if that fails fall back to backtracing */
Alexander Duyck345e9b52014-12-31 10:56:24 -08001287 hlist_for_each_entry_rcu(li, &n->list, hlist) {
1288 struct fib_alias *fa;
1289
1290 if ((key ^ n->key) & li->mask_plen)
1291 continue;
1292
1293 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1294 struct fib_info *fi = fa->fa_info;
1295 int nhsel, err;
1296
1297 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1298 continue;
1299 if (fi->fib_dead)
1300 continue;
1301 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1302 continue;
1303 fib_alias_accessed(fa);
1304 err = fib_props[fa->fa_type].error;
1305 if (unlikely(err < 0)) {
1306#ifdef CONFIG_IP_FIB_TRIE_STATS
1307 this_cpu_inc(stats->semantic_match_passed);
1308#endif
1309 return err;
1310 }
1311 if (fi->fib_flags & RTNH_F_DEAD)
1312 continue;
1313 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1314 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1315
1316 if (nh->nh_flags & RTNH_F_DEAD)
1317 continue;
1318 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
1319 continue;
1320
1321 if (!(fib_flags & FIB_LOOKUP_NOREF))
1322 atomic_inc(&fi->fib_clntref);
1323
1324 res->prefixlen = li->plen;
1325 res->nh_sel = nhsel;
1326 res->type = fa->fa_type;
1327 res->scope = fi->fib_scope;
1328 res->fi = fi;
1329 res->table = tb;
1330 res->fa_head = &li->falh;
1331#ifdef CONFIG_IP_FIB_TRIE_STATS
1332 this_cpu_inc(stats->semantic_match_passed);
1333#endif
1334 return err;
1335 }
1336 }
1337
1338#ifdef CONFIG_IP_FIB_TRIE_STATS
1339 this_cpu_inc(stats->semantic_match_miss);
1340#endif
1341 }
1342 goto backtrace;
Robert Olsson19baf832005-06-21 12:43:18 -07001343}
Florian Westphal6fc01432011-08-25 13:46:12 +02001344EXPORT_SYMBOL_GPL(fib_table_lookup);
Robert Olsson19baf832005-06-21 12:43:18 -07001345
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001346/*
1347 * Remove the leaf and return parent.
1348 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001349static void trie_leaf_remove(struct trie *t, struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001350{
Alexander Duyck64c9b6f2014-12-31 10:55:35 -08001351 struct tnode *tp = node_parent(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001352
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001353 pr_debug("entering trie_leaf_remove(%p)\n", l);
Robert Olsson19baf832005-06-21 12:43:18 -07001354
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001355 if (tp) {
Alexander Duyck836a0122014-12-31 10:56:06 -08001356 put_child(tp, get_index(l->key, tp), NULL);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001357 trie_rebalance(t, tp);
Alexander Duyck836a0122014-12-31 10:56:06 -08001358 } else {
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001359 RCU_INIT_POINTER(t->trie, NULL);
Alexander Duyck836a0122014-12-31 10:56:06 -08001360 }
Robert Olsson19baf832005-06-21 12:43:18 -07001361
Alexander Duyck37fd30f2014-12-31 10:55:41 -08001362 node_free(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001363}
1364
Robert Olssond562f1f2007-03-26 14:22:22 -07001365/*
1366 * Caller must hold RTNL.
1367 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001368int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001369{
1370 struct trie *t = (struct trie *) tb->tb_data;
1371 u32 key, mask;
Thomas Graf4e902c52006-08-17 18:14:52 -07001372 int plen = cfg->fc_dst_len;
1373 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001374 struct fib_alias *fa, *fa_to_delete;
1375 struct list_head *fa_head;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001376 struct tnode *l;
Olof Johansson91b9a272005-08-09 20:24:39 -07001377 struct leaf_info *li;
1378
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001379 if (plen > 32)
Robert Olsson19baf832005-06-21 12:43:18 -07001380 return -EINVAL;
1381
Thomas Graf4e902c52006-08-17 18:14:52 -07001382 key = ntohl(cfg->fc_dst);
Olof Johansson91b9a272005-08-09 20:24:39 -07001383 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001384
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001385 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001386 return -EINVAL;
1387
1388 key = key & mask;
1389 l = fib_find_node(t, key);
1390
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001391 if (!l)
Robert Olsson19baf832005-06-21 12:43:18 -07001392 return -ESRCH;
1393
Igor Maravicad5b3102012-08-13 10:26:08 +02001394 li = find_leaf_info(l, plen);
1395
1396 if (!li)
1397 return -ESRCH;
1398
1399 fa_head = &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -07001400 fa = fib_find_alias(fa_head, tos, 0);
1401
1402 if (!fa)
1403 return -ESRCH;
1404
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001405 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
Robert Olsson19baf832005-06-21 12:43:18 -07001406
1407 fa_to_delete = NULL;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001408 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1409 list_for_each_entry_continue(fa, fa_head, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001410 struct fib_info *fi = fa->fa_info;
1411
1412 if (fa->fa_tos != tos)
1413 break;
1414
Thomas Graf4e902c52006-08-17 18:14:52 -07001415 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1416 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
David S. Miller37e826c2011-03-24 18:06:47 -07001417 fa->fa_info->fib_scope == cfg->fc_scope) &&
Julian Anastasov74cb3c12011-03-19 12:13:46 +00001418 (!cfg->fc_prefsrc ||
1419 fi->fib_prefsrc == cfg->fc_prefsrc) &&
Thomas Graf4e902c52006-08-17 18:14:52 -07001420 (!cfg->fc_protocol ||
1421 fi->fib_protocol == cfg->fc_protocol) &&
1422 fib_nh_match(cfg, fi) == 0) {
Robert Olsson19baf832005-06-21 12:43:18 -07001423 fa_to_delete = fa;
1424 break;
1425 }
1426 }
1427
Olof Johansson91b9a272005-08-09 20:24:39 -07001428 if (!fa_to_delete)
1429 return -ESRCH;
Robert Olsson19baf832005-06-21 12:43:18 -07001430
Olof Johansson91b9a272005-08-09 20:24:39 -07001431 fa = fa_to_delete;
Thomas Graf4e902c52006-08-17 18:14:52 -07001432 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001433 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001434
Robert Olsson2373ce12005-08-25 13:01:29 -07001435 list_del_rcu(&fa->fa_list);
Robert Olsson19baf832005-06-21 12:43:18 -07001436
David S. Miller21d8c492011-04-14 14:49:37 -07001437 if (!plen)
1438 tb->tb_num_default--;
1439
Olof Johansson91b9a272005-08-09 20:24:39 -07001440 if (list_empty(fa_head)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001441 hlist_del_rcu(&li->hlist);
Olof Johansson91b9a272005-08-09 20:24:39 -07001442 free_leaf_info(li);
Robert Olsson2373ce12005-08-25 13:01:29 -07001443 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001444
1445 if (hlist_empty(&l->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001446 trie_leaf_remove(t, l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001447
1448 if (fa->fa_state & FA_S_ACCESSED)
Nicolas Dichtel4ccfe6d2012-09-07 00:45:29 +00001449 rt_cache_flush(cfg->fc_nlinfo.nl_net);
Olof Johansson91b9a272005-08-09 20:24:39 -07001450
Robert Olsson2373ce12005-08-25 13:01:29 -07001451 fib_release_info(fa->fa_info);
1452 alias_free_mem_rcu(fa);
Olof Johansson91b9a272005-08-09 20:24:39 -07001453 return 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001454}
1455
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001456static int trie_flush_list(struct list_head *head)
Robert Olsson19baf832005-06-21 12:43:18 -07001457{
1458 struct fib_alias *fa, *fa_node;
1459 int found = 0;
1460
1461 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1462 struct fib_info *fi = fa->fa_info;
Robert Olsson19baf832005-06-21 12:43:18 -07001463
Robert Olsson2373ce12005-08-25 13:01:29 -07001464 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1465 list_del_rcu(&fa->fa_list);
1466 fib_release_info(fa->fa_info);
1467 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001468 found++;
1469 }
1470 }
1471 return found;
1472}
1473
Alexander Duyckadaf9812014-12-31 10:55:47 -08001474static int trie_flush_leaf(struct tnode *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001475{
1476 int found = 0;
1477 struct hlist_head *lih = &l->list;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001478 struct hlist_node *tmp;
Robert Olsson19baf832005-06-21 12:43:18 -07001479 struct leaf_info *li = NULL;
1480
Sasha Levinb67bfe02013-02-27 17:06:00 -08001481 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001482 found += trie_flush_list(&li->falh);
Robert Olsson19baf832005-06-21 12:43:18 -07001483
1484 if (list_empty(&li->falh)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001485 hlist_del_rcu(&li->hlist);
Robert Olsson19baf832005-06-21 12:43:18 -07001486 free_leaf_info(li);
1487 }
1488 }
1489 return found;
1490}
1491
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001492/*
1493 * Scan for the next right leaf starting at node p->child[idx]
1494 * Since we have back pointer, no recursion necessary.
1495 */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001496static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
Robert Olsson19baf832005-06-21 12:43:18 -07001497{
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001498 do {
Alexander Duyck98293e82014-12-31 10:56:18 -08001499 unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001500
Alexander Duyck98293e82014-12-31 10:56:18 -08001501 while (idx < tnode_child_length(p)) {
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001502 c = tnode_get_child_rcu(p, idx++);
Robert Olsson2373ce12005-08-25 13:01:29 -07001503 if (!c)
Olof Johansson91b9a272005-08-09 20:24:39 -07001504 continue;
Robert Olsson19baf832005-06-21 12:43:18 -07001505
Eric Dumazetaab515d2013-08-05 11:18:49 -07001506 if (IS_LEAF(c))
Alexander Duyckadaf9812014-12-31 10:55:47 -08001507 return c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001508
1509 /* Rescan start scanning in new node */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001510 p = c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001511 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001512 }
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001513
1514 /* Node empty, walk back up to parent */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001515 c = p;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001516 } while ((p = node_parent_rcu(c)) != NULL);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001517
1518 return NULL; /* Root of trie */
1519}
1520
Alexander Duyckadaf9812014-12-31 10:55:47 -08001521static struct tnode *trie_firstleaf(struct trie *t)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001522{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001523 struct tnode *n = rcu_dereference_rtnl(t->trie);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001524
1525 if (!n)
1526 return NULL;
1527
1528 if (IS_LEAF(n)) /* trie is just a leaf */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001529 return n;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001530
1531 return leaf_walk_rcu(n, NULL);
1532}
1533
Alexander Duyckadaf9812014-12-31 10:55:47 -08001534static struct tnode *trie_nextleaf(struct tnode *l)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001535{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001536 struct tnode *p = node_parent_rcu(l);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001537
1538 if (!p)
1539 return NULL; /* trie with just one leaf */
1540
Alexander Duyckadaf9812014-12-31 10:55:47 -08001541 return leaf_walk_rcu(p, l);
Robert Olsson19baf832005-06-21 12:43:18 -07001542}
1543
Alexander Duyckadaf9812014-12-31 10:55:47 -08001544static struct tnode *trie_leafindex(struct trie *t, int index)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001545{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001546 struct tnode *l = trie_firstleaf(t);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001547
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001548 while (l && index-- > 0)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001549 l = trie_nextleaf(l);
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001550
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001551 return l;
1552}
1553
1554
Robert Olssond562f1f2007-03-26 14:22:22 -07001555/*
1556 * Caller must hold RTNL.
1557 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001558int fib_table_flush(struct fib_table *tb)
Robert Olsson19baf832005-06-21 12:43:18 -07001559{
1560 struct trie *t = (struct trie *) tb->tb_data;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001561 struct tnode *l, *ll = NULL;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001562 int found = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001563
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001564 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001565 found += trie_flush_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001566
1567 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001568 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001569 ll = l;
1570 }
1571
1572 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001573 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001574
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001575 pr_debug("trie_flush found=%d\n", found);
Robert Olsson19baf832005-06-21 12:43:18 -07001576 return found;
1577}
1578
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001579void fib_free_table(struct fib_table *tb)
1580{
Alexander Duyck8274a972014-12-31 10:55:29 -08001581#ifdef CONFIG_IP_FIB_TRIE_STATS
1582 struct trie *t = (struct trie *)tb->tb_data;
1583
1584 free_percpu(t->stats);
1585#endif /* CONFIG_IP_FIB_TRIE_STATS */
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001586 kfree(tb);
1587}
1588
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001589static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1590 struct fib_table *tb,
Robert Olsson19baf832005-06-21 12:43:18 -07001591 struct sk_buff *skb, struct netlink_callback *cb)
1592{
1593 int i, s_i;
1594 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07001595 __be32 xkey = htonl(key);
Robert Olsson19baf832005-06-21 12:43:18 -07001596
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001597 s_i = cb->args[5];
Robert Olsson19baf832005-06-21 12:43:18 -07001598 i = 0;
1599
Robert Olsson2373ce12005-08-25 13:01:29 -07001600 /* rcu_read_lock is hold by caller */
1601
1602 list_for_each_entry_rcu(fa, fah, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001603 if (i < s_i) {
1604 i++;
1605 continue;
1606 }
Robert Olsson19baf832005-06-21 12:43:18 -07001607
Eric W. Biederman15e47302012-09-07 20:12:54 +00001608 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
Robert Olsson19baf832005-06-21 12:43:18 -07001609 cb->nlh->nlmsg_seq,
1610 RTM_NEWROUTE,
1611 tb->tb_id,
1612 fa->fa_type,
Thomas Grafbe403ea2006-08-17 18:15:17 -07001613 xkey,
Robert Olsson19baf832005-06-21 12:43:18 -07001614 plen,
1615 fa->fa_tos,
Stephen Hemminger64347f72008-01-22 21:55:01 -08001616 fa->fa_info, NLM_F_MULTI) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001617 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001618 return -1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001619 }
Robert Olsson19baf832005-06-21 12:43:18 -07001620 i++;
1621 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001622 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001623 return skb->len;
1624}
1625
Alexander Duyckadaf9812014-12-31 10:55:47 -08001626static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001627 struct sk_buff *skb, struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001628{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001629 struct leaf_info *li;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001630 int i, s_i;
Robert Olsson19baf832005-06-21 12:43:18 -07001631
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001632 s_i = cb->args[4];
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001633 i = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001634
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001635 /* rcu_read_lock is hold by caller */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001636 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001637 if (i < s_i) {
1638 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001639 continue;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001640 }
Robert Olsson19baf832005-06-21 12:43:18 -07001641
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001642 if (i > s_i)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001643 cb->args[5] = 0;
Olof Johansson91b9a272005-08-09 20:24:39 -07001644
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001645 if (list_empty(&li->falh))
Robert Olsson19baf832005-06-21 12:43:18 -07001646 continue;
1647
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001648 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001649 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001650 return -1;
1651 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001652 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001653 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001654
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001655 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001656 return skb->len;
1657}
1658
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001659int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1660 struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001661{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001662 struct tnode *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001663 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001664 t_key key = cb->args[2];
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001665 int count = cb->args[3];
Robert Olsson19baf832005-06-21 12:43:18 -07001666
Robert Olsson2373ce12005-08-25 13:01:29 -07001667 rcu_read_lock();
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001668 /* Dump starting at last key.
1669 * Note: 0.0.0.0/0 (ie default) is first key.
1670 */
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001671 if (count == 0)
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001672 l = trie_firstleaf(t);
1673 else {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001674 /* Normally, continue from last key, but if that is missing
1675 * fallback to using slow rescan
1676 */
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001677 l = fib_find_node(t, key);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001678 if (!l)
1679 l = trie_leafindex(t, count);
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001680 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001681
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001682 while (l) {
1683 cb->args[2] = l->key;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001684 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001685 cb->args[3] = count;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001686 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001687 return -1;
Robert Olsson19baf832005-06-21 12:43:18 -07001688 }
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001689
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001690 ++count;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001691 l = trie_nextleaf(l);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001692 memset(&cb->args[4], 0,
1693 sizeof(cb->args) - 4*sizeof(cb->args[0]));
Robert Olsson19baf832005-06-21 12:43:18 -07001694 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001695 cb->args[3] = count;
Robert Olsson2373ce12005-08-25 13:01:29 -07001696 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001697
Robert Olsson19baf832005-06-21 12:43:18 -07001698 return skb->len;
Robert Olsson19baf832005-06-21 12:43:18 -07001699}
1700
David S. Miller5348ba82011-02-01 15:30:56 -08001701void __init fib_trie_init(void)
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001702{
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001703 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1704 sizeof(struct fib_alias),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001705 0, SLAB_PANIC, NULL);
1706
1707 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001708 max(sizeof(struct tnode),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001709 sizeof(struct leaf_info)),
1710 0, SLAB_PANIC, NULL);
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001711}
Robert Olsson19baf832005-06-21 12:43:18 -07001712
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001713
David S. Miller5348ba82011-02-01 15:30:56 -08001714struct fib_table *fib_trie_table(u32 id)
Robert Olsson19baf832005-06-21 12:43:18 -07001715{
1716 struct fib_table *tb;
1717 struct trie *t;
1718
Robert Olsson19baf832005-06-21 12:43:18 -07001719 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1720 GFP_KERNEL);
1721 if (tb == NULL)
1722 return NULL;
1723
1724 tb->tb_id = id;
Denis V. Lunev971b8932007-12-08 00:32:23 -08001725 tb->tb_default = -1;
David S. Miller21d8c492011-04-14 14:49:37 -07001726 tb->tb_num_default = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001727
1728 t = (struct trie *) tb->tb_data;
Alexander Duyck8274a972014-12-31 10:55:29 -08001729 RCU_INIT_POINTER(t->trie, NULL);
1730#ifdef CONFIG_IP_FIB_TRIE_STATS
1731 t->stats = alloc_percpu(struct trie_use_stats);
1732 if (!t->stats) {
1733 kfree(tb);
1734 tb = NULL;
1735 }
1736#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001737
Robert Olsson19baf832005-06-21 12:43:18 -07001738 return tb;
1739}
1740
Robert Olsson19baf832005-06-21 12:43:18 -07001741#ifdef CONFIG_PROC_FS
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001742/* Depth first Trie walk iterator */
1743struct fib_trie_iter {
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001744 struct seq_net_private p;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001745 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001746 struct tnode *tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001747 unsigned int index;
1748 unsigned int depth;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001749};
Robert Olsson19baf832005-06-21 12:43:18 -07001750
Alexander Duyckadaf9812014-12-31 10:55:47 -08001751static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
Robert Olsson19baf832005-06-21 12:43:18 -07001752{
Alexander Duyck98293e82014-12-31 10:56:18 -08001753 unsigned long cindex = iter->index;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001754 struct tnode *tn = iter->tnode;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001755 struct tnode *p;
1756
Eric W. Biederman6640e692007-01-24 14:42:04 -08001757 /* A single entry routing table */
1758 if (!tn)
1759 return NULL;
1760
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001761 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1762 iter->tnode, iter->index, iter->depth);
1763rescan:
Alexander Duyck98293e82014-12-31 10:56:18 -08001764 while (cindex < tnode_child_length(tn)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001765 struct tnode *n = tnode_get_child_rcu(tn, cindex);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001766
1767 if (n) {
1768 if (IS_LEAF(n)) {
1769 iter->tnode = tn;
1770 iter->index = cindex + 1;
1771 } else {
1772 /* push down one level */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001773 iter->tnode = n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001774 iter->index = 0;
1775 ++iter->depth;
1776 }
1777 return n;
1778 }
1779
1780 ++cindex;
1781 }
1782
1783 /* Current node exhausted, pop back up */
Alexander Duyckadaf9812014-12-31 10:55:47 -08001784 p = node_parent_rcu(tn);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001785 if (p) {
Alexander Duycke9b44012014-12-31 10:56:12 -08001786 cindex = get_index(tn->key, p) + 1;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001787 tn = p;
1788 --iter->depth;
1789 goto rescan;
1790 }
1791
1792 /* got root? */
Robert Olsson19baf832005-06-21 12:43:18 -07001793 return NULL;
1794}
1795
Alexander Duyckadaf9812014-12-31 10:55:47 -08001796static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001797 struct trie *t)
Robert Olsson19baf832005-06-21 12:43:18 -07001798{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001799 struct tnode *n;
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001800
Stephen Hemminger132adf52007-03-08 20:44:43 -08001801 if (!t)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001802 return NULL;
1803
1804 n = rcu_dereference(t->trie);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001805 if (!n)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08001806 return NULL;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001807
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001808 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001809 iter->tnode = n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001810 iter->index = 0;
1811 iter->depth = 1;
1812 } else {
1813 iter->tnode = NULL;
1814 iter->index = 0;
1815 iter->depth = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001816 }
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001817
1818 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07001819}
1820
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001821static void trie_collect_stats(struct trie *t, struct trie_stat *s)
Robert Olsson19baf832005-06-21 12:43:18 -07001822{
Alexander Duyckadaf9812014-12-31 10:55:47 -08001823 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001824 struct fib_trie_iter iter;
Robert Olsson19baf832005-06-21 12:43:18 -07001825
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001826 memset(s, 0, sizeof(*s));
Robert Olsson19baf832005-06-21 12:43:18 -07001827
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001828 rcu_read_lock();
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001829 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001830 if (IS_LEAF(n)) {
Stephen Hemminger93672292008-01-22 21:54:05 -08001831 struct leaf_info *li;
Stephen Hemminger93672292008-01-22 21:54:05 -08001832
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001833 s->leaves++;
1834 s->totdepth += iter.depth;
1835 if (iter.depth > s->maxdepth)
1836 s->maxdepth = iter.depth;
Stephen Hemminger93672292008-01-22 21:54:05 -08001837
Alexander Duyckadaf9812014-12-31 10:55:47 -08001838 hlist_for_each_entry_rcu(li, &n->list, hlist)
Stephen Hemminger93672292008-01-22 21:54:05 -08001839 ++s->prefixes;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001840 } else {
Alexander Duyck98293e82014-12-31 10:56:18 -08001841 unsigned long i;
Robert Olsson19baf832005-06-21 12:43:18 -07001842
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001843 s->tnodes++;
Alexander Duyckadaf9812014-12-31 10:55:47 -08001844 if (n->bits < MAX_STAT_DEPTH)
1845 s->nodesizes[n->bits]++;
Robert Olsson06ef9212006-03-20 21:35:01 -08001846
Alexander Duyck98293e82014-12-31 10:56:18 -08001847 for (i = 0; i < tnode_child_length(n); i++) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001848 if (!rcu_access_pointer(n->child[i]))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001849 s->nullpointers++;
Alexander Duyck98293e82014-12-31 10:56:18 -08001850 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001851 }
1852 }
1853 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001854}
1855
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001856/*
Robert Olsson19baf832005-06-21 12:43:18 -07001857 * This outputs /proc/net/fib_triestats
Robert Olsson19baf832005-06-21 12:43:18 -07001858 */
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001859static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
Robert Olsson19baf832005-06-21 12:43:18 -07001860{
Eric Dumazeta034ee32010-09-09 23:32:28 +00001861 unsigned int i, max, pointers, bytes, avdepth;
Robert Olsson19baf832005-06-21 12:43:18 -07001862
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001863 if (stat->leaves)
1864 avdepth = stat->totdepth*100 / stat->leaves;
1865 else
1866 avdepth = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001867
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001868 seq_printf(seq, "\tAver depth: %u.%02d\n",
1869 avdepth / 100, avdepth % 100);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001870 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
Robert Olsson19baf832005-06-21 12:43:18 -07001871
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001872 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
Alexander Duyckadaf9812014-12-31 10:55:47 -08001873 bytes = sizeof(struct tnode) * stat->leaves;
Stephen Hemminger93672292008-01-22 21:54:05 -08001874
1875 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1876 bytes += sizeof(struct leaf_info) * stat->prefixes;
1877
Stephen Hemminger187b5182008-01-12 20:55:55 -08001878 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001879 bytes += sizeof(struct tnode) * stat->tnodes;
Robert Olsson19baf832005-06-21 12:43:18 -07001880
Robert Olsson06ef9212006-03-20 21:35:01 -08001881 max = MAX_STAT_DEPTH;
1882 while (max > 0 && stat->nodesizes[max-1] == 0)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001883 max--;
Robert Olsson19baf832005-06-21 12:43:18 -07001884
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001885 pointers = 0;
Jerry Snitselaarf585a992013-07-22 12:01:58 -07001886 for (i = 1; i < max; i++)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001887 if (stat->nodesizes[i] != 0) {
Stephen Hemminger187b5182008-01-12 20:55:55 -08001888 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001889 pointers += (1<<i) * stat->nodesizes[i];
1890 }
1891 seq_putc(seq, '\n');
Stephen Hemminger187b5182008-01-12 20:55:55 -08001892 seq_printf(seq, "\tPointers: %u\n", pointers);
Robert Olsson19baf832005-06-21 12:43:18 -07001893
Alexander Duyckadaf9812014-12-31 10:55:47 -08001894 bytes += sizeof(struct tnode *) * pointers;
Stephen Hemminger187b5182008-01-12 20:55:55 -08001895 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1896 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001897}
Robert Olsson19baf832005-06-21 12:43:18 -07001898
1899#ifdef CONFIG_IP_FIB_TRIE_STATS
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001900static void trie_show_usage(struct seq_file *seq,
Alexander Duyck8274a972014-12-31 10:55:29 -08001901 const struct trie_use_stats __percpu *stats)
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001902{
Alexander Duyck8274a972014-12-31 10:55:29 -08001903 struct trie_use_stats s = { 0 };
1904 int cpu;
1905
1906 /* loop through all of the CPUs and gather up the stats */
1907 for_each_possible_cpu(cpu) {
1908 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
1909
1910 s.gets += pcpu->gets;
1911 s.backtrack += pcpu->backtrack;
1912 s.semantic_match_passed += pcpu->semantic_match_passed;
1913 s.semantic_match_miss += pcpu->semantic_match_miss;
1914 s.null_node_hit += pcpu->null_node_hit;
1915 s.resize_node_skipped += pcpu->resize_node_skipped;
1916 }
1917
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001918 seq_printf(seq, "\nCounters:\n---------\n");
Alexander Duyck8274a972014-12-31 10:55:29 -08001919 seq_printf(seq, "gets = %u\n", s.gets);
1920 seq_printf(seq, "backtracks = %u\n", s.backtrack);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001921 seq_printf(seq, "semantic match passed = %u\n",
Alexander Duyck8274a972014-12-31 10:55:29 -08001922 s.semantic_match_passed);
1923 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
1924 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
1925 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
Robert Olsson19baf832005-06-21 12:43:18 -07001926}
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08001927#endif /* CONFIG_IP_FIB_TRIE_STATS */
1928
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001929static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001930{
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001931 if (tb->tb_id == RT_TABLE_LOCAL)
1932 seq_puts(seq, "Local:\n");
1933 else if (tb->tb_id == RT_TABLE_MAIN)
1934 seq_puts(seq, "Main:\n");
1935 else
1936 seq_printf(seq, "Id %d:\n", tb->tb_id);
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001937}
Robert Olsson19baf832005-06-21 12:43:18 -07001938
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001939
Robert Olsson19baf832005-06-21 12:43:18 -07001940static int fib_triestat_seq_show(struct seq_file *seq, void *v)
1941{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001942 struct net *net = (struct net *)seq->private;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001943 unsigned int h;
Eric W. Biederman877a9bf2007-12-07 00:47:47 -08001944
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08001945 seq_printf(seq,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001946 "Basic info: size of leaf:"
1947 " %Zd bytes, size of tnode: %Zd bytes.\n",
Alexander Duyckadaf9812014-12-31 10:55:47 -08001948 sizeof(struct tnode), sizeof(struct tnode));
Olof Johansson91b9a272005-08-09 20:24:39 -07001949
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001950 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1951 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001952 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001953
Sasha Levinb67bfe02013-02-27 17:06:00 -08001954 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001955 struct trie *t = (struct trie *) tb->tb_data;
1956 struct trie_stat stat;
1957
1958 if (!t)
1959 continue;
1960
1961 fib_table_print(seq, tb);
1962
1963 trie_collect_stats(t, &stat);
1964 trie_show_stats(seq, &stat);
1965#ifdef CONFIG_IP_FIB_TRIE_STATS
Alexander Duyck8274a972014-12-31 10:55:29 -08001966 trie_show_usage(seq, t->stats);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001967#endif
1968 }
1969 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001970
Robert Olsson19baf832005-06-21 12:43:18 -07001971 return 0;
1972}
1973
Robert Olsson19baf832005-06-21 12:43:18 -07001974static int fib_triestat_seq_open(struct inode *inode, struct file *file)
1975{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07001976 return single_open_net(inode, file, fib_triestat_seq_show);
Denis V. Lunev1c340b22008-01-10 03:27:17 -08001977}
1978
Arjan van de Ven9a321442007-02-12 00:55:35 -08001979static const struct file_operations fib_triestat_fops = {
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001980 .owner = THIS_MODULE,
1981 .open = fib_triestat_seq_open,
1982 .read = seq_read,
1983 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07001984 .release = single_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07001985};
1986
Alexander Duyckadaf9812014-12-31 10:55:47 -08001987static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
Robert Olsson19baf832005-06-21 12:43:18 -07001988{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09001989 struct fib_trie_iter *iter = seq->private;
1990 struct net *net = seq_file_net(seq);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07001991 loff_t idx = 0;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001992 unsigned int h;
Robert Olsson19baf832005-06-21 12:43:18 -07001993
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001994 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1995 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07001996 struct fib_table *tb;
1997
Sasha Levinb67bfe02013-02-27 17:06:00 -08001998 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08001999 struct tnode *n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002000
2001 for (n = fib_trie_get_first(iter,
2002 (struct trie *) tb->tb_data);
2003 n; n = fib_trie_get_next(iter))
2004 if (pos == idx++) {
2005 iter->tb = tb;
2006 return n;
2007 }
2008 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002009 }
Robert Olsson19baf832005-06-21 12:43:18 -07002010
Robert Olsson19baf832005-06-21 12:43:18 -07002011 return NULL;
2012}
2013
2014static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002015 __acquires(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002016{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002017 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002018 return fib_trie_get_idx(seq, *pos);
Robert Olsson19baf832005-06-21 12:43:18 -07002019}
2020
2021static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2022{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002023 struct fib_trie_iter *iter = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002024 struct net *net = seq_file_net(seq);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002025 struct fib_table *tb = iter->tb;
2026 struct hlist_node *tb_node;
2027 unsigned int h;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002028 struct tnode *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002029
Robert Olsson19baf832005-06-21 12:43:18 -07002030 ++*pos;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002031 /* next node in same table */
2032 n = fib_trie_get_next(iter);
2033 if (n)
2034 return n;
Olof Johansson91b9a272005-08-09 20:24:39 -07002035
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002036 /* walk rest of this hash chain */
2037 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
Eric Dumazet0a5c0472011-03-31 01:51:35 -07002038 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002039 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2040 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2041 if (n)
2042 goto found;
2043 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002044
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002045 /* new hash chain */
2046 while (++h < FIB_TABLE_HASHSZ) {
2047 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
Sasha Levinb67bfe02013-02-27 17:06:00 -08002048 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002049 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2050 if (n)
2051 goto found;
2052 }
2053 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002054 return NULL;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002055
2056found:
2057 iter->tb = tb;
2058 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002059}
2060
2061static void fib_trie_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002062 __releases(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002063{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002064 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002065}
2066
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002067static void seq_indent(struct seq_file *seq, int n)
2068{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002069 while (n-- > 0)
2070 seq_puts(seq, " ");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002071}
Robert Olsson19baf832005-06-21 12:43:18 -07002072
Eric Dumazet28d36e32008-01-14 23:09:56 -08002073static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002074{
Stephen Hemminger132adf52007-03-08 20:44:43 -08002075 switch (s) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002076 case RT_SCOPE_UNIVERSE: return "universe";
2077 case RT_SCOPE_SITE: return "site";
2078 case RT_SCOPE_LINK: return "link";
2079 case RT_SCOPE_HOST: return "host";
2080 case RT_SCOPE_NOWHERE: return "nowhere";
2081 default:
Eric Dumazet28d36e32008-01-14 23:09:56 -08002082 snprintf(buf, len, "scope=%d", s);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002083 return buf;
2084 }
2085}
2086
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -07002087static const char *const rtn_type_names[__RTN_MAX] = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002088 [RTN_UNSPEC] = "UNSPEC",
2089 [RTN_UNICAST] = "UNICAST",
2090 [RTN_LOCAL] = "LOCAL",
2091 [RTN_BROADCAST] = "BROADCAST",
2092 [RTN_ANYCAST] = "ANYCAST",
2093 [RTN_MULTICAST] = "MULTICAST",
2094 [RTN_BLACKHOLE] = "BLACKHOLE",
2095 [RTN_UNREACHABLE] = "UNREACHABLE",
2096 [RTN_PROHIBIT] = "PROHIBIT",
2097 [RTN_THROW] = "THROW",
2098 [RTN_NAT] = "NAT",
2099 [RTN_XRESOLVE] = "XRESOLVE",
2100};
2101
Eric Dumazeta034ee32010-09-09 23:32:28 +00002102static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002103{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002104 if (t < __RTN_MAX && rtn_type_names[t])
2105 return rtn_type_names[t];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002106 snprintf(buf, len, "type %u", t);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002107 return buf;
2108}
2109
2110/* Pretty print the trie */
Robert Olsson19baf832005-06-21 12:43:18 -07002111static int fib_trie_seq_show(struct seq_file *seq, void *v)
2112{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002113 const struct fib_trie_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002114 struct tnode *n = v;
Robert Olsson19baf832005-06-21 12:43:18 -07002115
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002116 if (!node_parent_rcu(n))
2117 fib_table_print(seq, iter->tb);
Robert Olsson095b8502007-01-26 19:06:01 -08002118
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002119 if (IS_TNODE(n)) {
Alexander Duyckadaf9812014-12-31 10:55:47 -08002120 __be32 prf = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002121
Alexander Duycke9b44012014-12-31 10:56:12 -08002122 seq_indent(seq, iter->depth-1);
2123 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2124 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2125 n->full_children, n->empty_children);
Olof Johansson91b9a272005-08-09 20:24:39 -07002126 } else {
Stephen Hemminger13280422008-01-22 21:54:37 -08002127 struct leaf_info *li;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002128 __be32 val = htonl(n->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002129
2130 seq_indent(seq, iter->depth);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002131 seq_printf(seq, " |-- %pI4\n", &val);
Eric Dumazet28d36e32008-01-14 23:09:56 -08002132
Alexander Duyckadaf9812014-12-31 10:55:47 -08002133 hlist_for_each_entry_rcu(li, &n->list, hlist) {
Stephen Hemminger13280422008-01-22 21:54:37 -08002134 struct fib_alias *fa;
Eric Dumazet28d36e32008-01-14 23:09:56 -08002135
Stephen Hemminger13280422008-01-22 21:54:37 -08002136 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2137 char buf1[32], buf2[32];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002138
Stephen Hemminger13280422008-01-22 21:54:37 -08002139 seq_indent(seq, iter->depth+1);
2140 seq_printf(seq, " /%d %s %s", li->plen,
2141 rtn_scope(buf1, sizeof(buf1),
David S. Miller37e826c2011-03-24 18:06:47 -07002142 fa->fa_info->fib_scope),
Stephen Hemminger13280422008-01-22 21:54:37 -08002143 rtn_type(buf2, sizeof(buf2),
2144 fa->fa_type));
2145 if (fa->fa_tos)
Denis V. Lunevb9c4d822008-02-05 02:58:45 -08002146 seq_printf(seq, " tos=%d", fa->fa_tos);
Stephen Hemminger13280422008-01-22 21:54:37 -08002147 seq_putc(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002148 }
2149 }
Robert Olsson19baf832005-06-21 12:43:18 -07002150 }
2151
2152 return 0;
2153}
2154
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002155static const struct seq_operations fib_trie_seq_ops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002156 .start = fib_trie_seq_start,
2157 .next = fib_trie_seq_next,
2158 .stop = fib_trie_seq_stop,
2159 .show = fib_trie_seq_show,
Robert Olsson19baf832005-06-21 12:43:18 -07002160};
2161
2162static int fib_trie_seq_open(struct inode *inode, struct file *file)
2163{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002164 return seq_open_net(inode, file, &fib_trie_seq_ops,
2165 sizeof(struct fib_trie_iter));
Robert Olsson19baf832005-06-21 12:43:18 -07002166}
2167
Arjan van de Ven9a321442007-02-12 00:55:35 -08002168static const struct file_operations fib_trie_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002169 .owner = THIS_MODULE,
2170 .open = fib_trie_seq_open,
2171 .read = seq_read,
2172 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002173 .release = seq_release_net,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002174};
2175
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002176struct fib_route_iter {
2177 struct seq_net_private p;
2178 struct trie *main_trie;
2179 loff_t pos;
2180 t_key key;
2181};
2182
Alexander Duyckadaf9812014-12-31 10:55:47 -08002183static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002184{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002185 struct tnode *l = NULL;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002186 struct trie *t = iter->main_trie;
2187
2188 /* use cache location of last found key */
2189 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2190 pos -= iter->pos;
2191 else {
2192 iter->pos = 0;
2193 l = trie_firstleaf(t);
2194 }
2195
2196 while (l && pos-- > 0) {
2197 iter->pos++;
2198 l = trie_nextleaf(l);
2199 }
2200
2201 if (l)
2202 iter->key = pos; /* remember it */
2203 else
2204 iter->pos = 0; /* forget it */
2205
2206 return l;
2207}
2208
2209static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2210 __acquires(RCU)
2211{
2212 struct fib_route_iter *iter = seq->private;
2213 struct fib_table *tb;
2214
2215 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002216 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002217 if (!tb)
2218 return NULL;
2219
2220 iter->main_trie = (struct trie *) tb->tb_data;
2221 if (*pos == 0)
2222 return SEQ_START_TOKEN;
2223 else
2224 return fib_route_get_idx(iter, *pos - 1);
2225}
2226
2227static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2228{
2229 struct fib_route_iter *iter = seq->private;
Alexander Duyckadaf9812014-12-31 10:55:47 -08002230 struct tnode *l = v;
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002231
2232 ++*pos;
2233 if (v == SEQ_START_TOKEN) {
2234 iter->pos = 0;
2235 l = trie_firstleaf(iter->main_trie);
2236 } else {
2237 iter->pos++;
2238 l = trie_nextleaf(l);
2239 }
2240
2241 if (l)
2242 iter->key = l->key;
2243 else
2244 iter->pos = 0;
2245 return l;
2246}
2247
2248static void fib_route_seq_stop(struct seq_file *seq, void *v)
2249 __releases(RCU)
2250{
2251 rcu_read_unlock();
2252}
2253
Eric Dumazeta034ee32010-09-09 23:32:28 +00002254static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002255{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002256 unsigned int flags = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002257
Eric Dumazeta034ee32010-09-09 23:32:28 +00002258 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2259 flags = RTF_REJECT;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002260 if (fi && fi->fib_nh->nh_gw)
2261 flags |= RTF_GATEWAY;
Al Viro32ab5f82006-09-26 22:21:45 -07002262 if (mask == htonl(0xFFFFFFFF))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002263 flags |= RTF_HOST;
2264 flags |= RTF_UP;
2265 return flags;
2266}
2267
2268/*
2269 * This outputs /proc/net/route.
2270 * The format of the file is not supposed to be changed
Eric Dumazeta034ee32010-09-09 23:32:28 +00002271 * and needs to be same as fib_hash output to avoid breaking
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002272 * legacy utilities
2273 */
2274static int fib_route_seq_show(struct seq_file *seq, void *v)
2275{
Alexander Duyckadaf9812014-12-31 10:55:47 -08002276 struct tnode *l = v;
Stephen Hemminger13280422008-01-22 21:54:37 -08002277 struct leaf_info *li;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002278
2279 if (v == SEQ_START_TOKEN) {
2280 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2281 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2282 "\tWindow\tIRTT");
2283 return 0;
2284 }
2285
Sasha Levinb67bfe02013-02-27 17:06:00 -08002286 hlist_for_each_entry_rcu(li, &l->list, hlist) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002287 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07002288 __be32 mask, prefix;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002289
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002290 mask = inet_make_mask(li->plen);
2291 prefix = htonl(l->key);
2292
2293 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
Herbert Xu1371e372005-10-15 09:42:39 +10002294 const struct fib_info *fi = fa->fa_info;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002295 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002296
2297 if (fa->fa_type == RTN_BROADCAST
2298 || fa->fa_type == RTN_MULTICAST)
2299 continue;
2300
Tetsuo Handa652586d2013-11-14 14:31:57 -08002301 seq_setwidth(seq, 127);
2302
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002303 if (fi)
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002304 seq_printf(seq,
2305 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002306 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002307 fi->fib_dev ? fi->fib_dev->name : "*",
2308 prefix,
2309 fi->fib_nh->nh_gw, flags, 0, 0,
2310 fi->fib_priority,
2311 mask,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002312 (fi->fib_advmss ?
2313 fi->fib_advmss + 40 : 0),
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002314 fi->fib_window,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002315 fi->fib_rtt >> 3);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002316 else
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002317 seq_printf(seq,
2318 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002319 "%d\t%08X\t%d\t%u\t%u",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002320 prefix, 0, flags, 0, 0, 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002321 mask, 0, 0, 0);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002322
Tetsuo Handa652586d2013-11-14 14:31:57 -08002323 seq_pad(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002324 }
2325 }
2326
2327 return 0;
2328}
2329
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002330static const struct seq_operations fib_route_seq_ops = {
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002331 .start = fib_route_seq_start,
2332 .next = fib_route_seq_next,
2333 .stop = fib_route_seq_stop,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002334 .show = fib_route_seq_show,
2335};
2336
2337static int fib_route_seq_open(struct inode *inode, struct file *file)
2338{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002339 return seq_open_net(inode, file, &fib_route_seq_ops,
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002340 sizeof(struct fib_route_iter));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002341}
2342
Arjan van de Ven9a321442007-02-12 00:55:35 -08002343static const struct file_operations fib_route_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002344 .owner = THIS_MODULE,
2345 .open = fib_route_seq_open,
2346 .read = seq_read,
2347 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002348 .release = seq_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002349};
2350
Denis V. Lunev61a02652008-01-10 03:21:09 -08002351int __net_init fib_proc_init(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002352{
Gao fengd4beaa62013-02-18 01:34:54 +00002353 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002354 goto out1;
2355
Gao fengd4beaa62013-02-18 01:34:54 +00002356 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2357 &fib_triestat_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002358 goto out2;
2359
Gao fengd4beaa62013-02-18 01:34:54 +00002360 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002361 goto out3;
2362
Robert Olsson19baf832005-06-21 12:43:18 -07002363 return 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002364
2365out3:
Gao fengece31ff2013-02-18 01:34:56 +00002366 remove_proc_entry("fib_triestat", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002367out2:
Gao fengece31ff2013-02-18 01:34:56 +00002368 remove_proc_entry("fib_trie", net->proc_net);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002369out1:
2370 return -ENOMEM;
Robert Olsson19baf832005-06-21 12:43:18 -07002371}
2372
Denis V. Lunev61a02652008-01-10 03:21:09 -08002373void __net_exit fib_proc_exit(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002374{
Gao fengece31ff2013-02-18 01:34:56 +00002375 remove_proc_entry("fib_trie", net->proc_net);
2376 remove_proc_entry("fib_triestat", net->proc_net);
2377 remove_proc_entry("route", net->proc_net);
Robert Olsson19baf832005-06-21 12:43:18 -07002378}
2379
2380#endif /* CONFIG_PROC_FS */