blob: 835242e74aaa3372957e46291c08bdc16ec59882 [file] [log] [blame]
Michel Lespinassefff3fd82012-10-08 16:31:23 -07001#include <linux/module.h>
Davidlohr Buesoa54dae02017-07-10 15:51:46 -07002#include <linux/moduleparam.h>
Michel Lespinassefff3fd82012-10-08 16:31:23 -07003#include <linux/interval_tree.h>
4#include <linux/random.h>
Davidlohr Buesoa54dae02017-07-10 15:51:46 -07005#include <linux/slab.h>
Michel Lespinassefff3fd82012-10-08 16:31:23 -07006#include <asm/timex.h>
7
Davidlohr Buesoa54dae02017-07-10 15:51:46 -07008#define __param(type, name, init, msg) \
9 static type name = init; \
10 module_param(name, type, 0444); \
11 MODULE_PARM_DESC(name, msg);
12
13__param(int, nnodes, 100, "Number of nodes in the interval tree");
Davidlohr Bueso0b548e32017-11-17 15:28:27 -080014__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070015
16__param(int, nsearches, 100, "Number of searches to the interval tree");
Davidlohr Bueso0b548e32017-11-17 15:28:27 -080017__param(int, search_loops, 1000, "Number of iterations searching the tree");
Davidlohr Buesoc46ecce2017-07-10 15:51:52 -070018__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070019
Davidlohr Buesoa8ec14d2017-07-10 15:51:49 -070020__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
Michel Lespinassefff3fd82012-10-08 16:31:23 -070021
Davidlohr Buesof808c132017-09-08 16:15:08 -070022static struct rb_root_cached root = RB_ROOT_CACHED;
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070023static struct interval_tree_node *nodes = NULL;
24static u32 *queries = NULL;
Michel Lespinassefff3fd82012-10-08 16:31:23 -070025
26static struct rnd_state rnd;
27
28static inline unsigned long
Davidlohr Buesof808c132017-09-08 16:15:08 -070029search(struct rb_root_cached *root, unsigned long start, unsigned long last)
Michel Lespinassefff3fd82012-10-08 16:31:23 -070030{
31 struct interval_tree_node *node;
32 unsigned long results = 0;
33
Davidlohr Buesoc46ecce2017-07-10 15:51:52 -070034 for (node = interval_tree_iter_first(root, start, last); node;
35 node = interval_tree_iter_next(node, start, last))
Michel Lespinassefff3fd82012-10-08 16:31:23 -070036 results++;
37 return results;
38}
39
40static void init(void)
41{
42 int i;
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070043
44 for (i = 0; i < nnodes; i++) {
Davidlohr Buesoa8ec14d2017-07-10 15:51:49 -070045 u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
46 u32 a = (prandom_u32_state(&rnd) >> 4) % b;
47
48 nodes[i].start = a;
49 nodes[i].last = b;
Michel Lespinassefff3fd82012-10-08 16:31:23 -070050 }
Davidlohr Buesoa8ec14d2017-07-10 15:51:49 -070051
52 /*
53 * Limit the search scope to what the user defined.
54 * Otherwise we are merely measuring empty walks,
55 * which is pointless.
56 */
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070057 for (i = 0; i < nsearches; i++)
Davidlohr Buesoa8ec14d2017-07-10 15:51:49 -070058 queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
Michel Lespinassefff3fd82012-10-08 16:31:23 -070059}
60
61static int interval_tree_test_init(void)
62{
63 int i, j;
64 unsigned long results;
65 cycles_t time1, time2, time;
66
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070067 nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
68 if (!nodes)
69 return -ENOMEM;
70
71 queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
72 if (!queries) {
73 kfree(nodes);
74 return -ENOMEM;
75 }
76
Michel Lespinassefff3fd82012-10-08 16:31:23 -070077 printk(KERN_ALERT "interval tree insert/remove");
78
Akinobu Mita496f2f92012-12-17 16:04:23 -080079 prandom_seed_state(&rnd, 3141592653589793238ULL);
Michel Lespinassefff3fd82012-10-08 16:31:23 -070080 init();
81
82 time1 = get_cycles();
83
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070084 for (i = 0; i < perf_loops; i++) {
85 for (j = 0; j < nnodes; j++)
Michel Lespinassefff3fd82012-10-08 16:31:23 -070086 interval_tree_insert(nodes + j, &root);
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070087 for (j = 0; j < nnodes; j++)
Michel Lespinassefff3fd82012-10-08 16:31:23 -070088 interval_tree_remove(nodes + j, &root);
89 }
90
91 time2 = get_cycles();
92 time = time2 - time1;
93
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070094 time = div_u64(time, perf_loops);
Michel Lespinassefff3fd82012-10-08 16:31:23 -070095 printk(" -> %llu cycles\n", (unsigned long long)time);
96
97 printk(KERN_ALERT "interval tree search");
98
Davidlohr Buesoa54dae02017-07-10 15:51:46 -070099 for (j = 0; j < nnodes; j++)
Michel Lespinassefff3fd82012-10-08 16:31:23 -0700100 interval_tree_insert(nodes + j, &root);
101
102 time1 = get_cycles();
103
104 results = 0;
Davidlohr Buesoa54dae02017-07-10 15:51:46 -0700105 for (i = 0; i < search_loops; i++)
Davidlohr Buesoc46ecce2017-07-10 15:51:52 -0700106 for (j = 0; j < nsearches; j++) {
107 unsigned long start = search_all ? 0 : queries[j];
108 unsigned long last = search_all ? max_endpoint : queries[j];
109
110 results += search(&root, start, last);
111 }
Michel Lespinassefff3fd82012-10-08 16:31:23 -0700112
113 time2 = get_cycles();
114 time = time2 - time1;
115
Davidlohr Buesoa54dae02017-07-10 15:51:46 -0700116 time = div_u64(time, search_loops);
117 results = div_u64(results, search_loops);
Michel Lespinassefff3fd82012-10-08 16:31:23 -0700118 printk(" -> %llu cycles (%lu results)\n",
119 (unsigned long long)time, results);
120
Davidlohr Buesoa54dae02017-07-10 15:51:46 -0700121 kfree(queries);
122 kfree(nodes);
123
Michel Lespinassefff3fd82012-10-08 16:31:23 -0700124 return -EAGAIN; /* Fail will directly unload the module */
125}
126
127static void interval_tree_test_exit(void)
128{
129 printk(KERN_ALERT "test exit\n");
130}
131
132module_init(interval_tree_test_init)
133module_exit(interval_tree_test_exit)
134
135MODULE_LICENSE("GPL");
136MODULE_AUTHOR("Michel Lespinasse");
137MODULE_DESCRIPTION("Interval Tree test");