blob: 5deee56434fc5f34499d7fd17b1f47fd8084b8b6 [file] [log] [blame]
Arne Jansenda5c8132011-09-13 12:29:12 +02001/*
2 * Copyright (C) 2011 STRATO AG
3 * written by Arne Jansen <sensille@gmx.net>
4 * Distributed under the GNU GPL license version 2.
5 */
6
7#include <linux/slab.h>
Arne Jansenda5c8132011-09-13 12:29:12 +02008#include "ulist.h"
Wang Shilong4c7a6f72014-01-29 00:25:34 +08009#include "ctree.h"
Arne Jansenda5c8132011-09-13 12:29:12 +020010
11/*
12 * ulist is a generic data structure to hold a collection of unique u64
13 * values. The only operations it supports is adding to the list and
14 * enumerating it.
15 * It is possible to store an auxiliary value along with the key.
16 *
Arne Jansenda5c8132011-09-13 12:29:12 +020017 * A sample usage for ulists is the enumeration of directed graphs without
18 * visiting a node twice. The pseudo-code could look like this:
19 *
20 * ulist = ulist_alloc();
21 * ulist_add(ulist, root);
Jan Schmidtcd1b4132012-05-22 14:56:50 +020022 * ULIST_ITER_INIT(&uiter);
Arne Jansenda5c8132011-09-13 12:29:12 +020023 *
Jan Schmidtcd1b4132012-05-22 14:56:50 +020024 * while ((elem = ulist_next(ulist, &uiter)) {
Arne Jansenda5c8132011-09-13 12:29:12 +020025 * for (all child nodes n in elem)
26 * ulist_add(ulist, n);
27 * do something useful with the node;
28 * }
29 * ulist_free(ulist);
30 *
Nicholas D Steeves01327612016-05-19 21:18:45 -040031 * This assumes the graph nodes are addressable by u64. This stems from the
Arne Jansenda5c8132011-09-13 12:29:12 +020032 * usage for tree enumeration in btrfs, where the logical addresses are
33 * 64 bit.
34 *
35 * It is also useful for tree enumeration which could be done elegantly
36 * recursively, but is not possible due to kernel stack limitations. The
37 * loop would be similar to the above.
38 */
39
40/**
41 * ulist_init - freshly initialize a ulist
42 * @ulist: the ulist to initialize
43 *
44 * Note: don't use this function to init an already used ulist, use
45 * ulist_reinit instead.
46 */
47void ulist_init(struct ulist *ulist)
48{
Wang Shilong4c7a6f72014-01-29 00:25:34 +080049 INIT_LIST_HEAD(&ulist->nodes);
Wang Shilongf7f82b82013-04-12 12:12:17 +000050 ulist->root = RB_ROOT;
Wang Shilong4c7a6f72014-01-29 00:25:34 +080051 ulist->nnodes = 0;
Arne Jansenda5c8132011-09-13 12:29:12 +020052}
Arne Jansenda5c8132011-09-13 12:29:12 +020053
54/**
55 * ulist_fini - free up additionally allocated memory for the ulist
56 * @ulist: the ulist from which to free the additional memory
57 *
58 * This is useful in cases where the base 'struct ulist' has been statically
59 * allocated.
60 */
David Sterba9d0379332017-02-13 13:40:16 +010061void ulist_fini(struct ulist *ulist)
Arne Jansenda5c8132011-09-13 12:29:12 +020062{
Wang Shilong4c7a6f72014-01-29 00:25:34 +080063 struct ulist_node *node;
64 struct ulist_node *next;
65
66 list_for_each_entry_safe(node, next, &ulist->nodes, list) {
67 kfree(node);
68 }
Wang Shilongf7f82b82013-04-12 12:12:17 +000069 ulist->root = RB_ROOT;
Wang Shilong4c7a6f72014-01-29 00:25:34 +080070 INIT_LIST_HEAD(&ulist->nodes);
Arne Jansenda5c8132011-09-13 12:29:12 +020071}
Arne Jansenda5c8132011-09-13 12:29:12 +020072
73/**
74 * ulist_reinit - prepare a ulist for reuse
75 * @ulist: ulist to be reused
76 *
77 * Free up all additional memory allocated for the list elements and reinit
78 * the ulist.
79 */
80void ulist_reinit(struct ulist *ulist)
81{
82 ulist_fini(ulist);
83 ulist_init(ulist);
84}
Arne Jansenda5c8132011-09-13 12:29:12 +020085
86/**
87 * ulist_alloc - dynamically allocate a ulist
88 * @gfp_mask: allocation flags to for base allocation
89 *
90 * The allocated ulist will be returned in an initialized state.
91 */
Daniel J Blueman2eec6c82012-04-26 00:37:14 +080092struct ulist *ulist_alloc(gfp_t gfp_mask)
Arne Jansenda5c8132011-09-13 12:29:12 +020093{
94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
95
96 if (!ulist)
97 return NULL;
98
99 ulist_init(ulist);
100
101 return ulist;
102}
Arne Jansenda5c8132011-09-13 12:29:12 +0200103
104/**
105 * ulist_free - free dynamically allocated ulist
106 * @ulist: ulist to free
107 *
108 * It is not necessary to call ulist_fini before.
109 */
110void ulist_free(struct ulist *ulist)
111{
112 if (!ulist)
113 return;
114 ulist_fini(ulist);
115 kfree(ulist);
116}
Arne Jansenda5c8132011-09-13 12:29:12 +0200117
Wang Shilongf7f82b82013-04-12 12:12:17 +0000118static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
119{
120 struct rb_node *n = ulist->root.rb_node;
121 struct ulist_node *u = NULL;
122
123 while (n) {
124 u = rb_entry(n, struct ulist_node, rb_node);
125 if (u->val < val)
126 n = n->rb_right;
127 else if (u->val > val)
128 n = n->rb_left;
129 else
130 return u;
131 }
132 return NULL;
133}
134
Qu Wenruod4b80402015-04-20 09:26:02 +0800135static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
136{
137 rb_erase(&node->rb_node, &ulist->root);
138 list_del(&node->list);
139 kfree(node);
140 BUG_ON(ulist->nnodes == 0);
141 ulist->nnodes--;
142}
143
Wang Shilongf7f82b82013-04-12 12:12:17 +0000144static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
145{
146 struct rb_node **p = &ulist->root.rb_node;
147 struct rb_node *parent = NULL;
148 struct ulist_node *cur = NULL;
149
150 while (*p) {
151 parent = *p;
152 cur = rb_entry(parent, struct ulist_node, rb_node);
153
154 if (cur->val < ins->val)
155 p = &(*p)->rb_right;
156 else if (cur->val > ins->val)
157 p = &(*p)->rb_left;
158 else
159 return -EEXIST;
160 }
161 rb_link_node(&ins->rb_node, parent, p);
162 rb_insert_color(&ins->rb_node, &ulist->root);
163 return 0;
164}
165
Arne Jansenda5c8132011-09-13 12:29:12 +0200166/**
167 * ulist_add - add an element to the ulist
168 * @ulist: ulist to add the element to
169 * @val: value to add to ulist
170 * @aux: auxiliary value to store along with val
171 * @gfp_mask: flags to use for allocation
172 *
173 * Note: locking must be provided by the caller. In case of rwlocks write
174 * locking is needed
175 *
176 * Add an element to a ulist. The @val will only be added if it doesn't
177 * already exist. If it is added, the auxiliary value @aux is stored along with
178 * it. In case @val already exists in the ulist, @aux is ignored, even if
179 * it differs from the already stored value.
180 *
181 * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
182 * inserted.
183 * In case of allocation failure -ENOMEM is returned and the ulist stays
184 * unaltered.
185 */
Alexander Block34d73f52012-07-28 16:18:58 +0200186int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
Arne Jansenda5c8132011-09-13 12:29:12 +0200187{
Jan Schmidt33019582012-05-30 18:05:21 +0200188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
189}
190
Alexander Block34d73f52012-07-28 16:18:58 +0200191int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
192 u64 *old_aux, gfp_t gfp_mask)
Jan Schmidt33019582012-05-30 18:05:21 +0200193{
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800194 int ret;
195 struct ulist_node *node;
196
Wang Shilongf7f82b82013-04-12 12:12:17 +0000197 node = ulist_rbtree_search(ulist, val);
198 if (node) {
199 if (old_aux)
200 *old_aux = node->aux;
201 return 0;
Arne Jansenda5c8132011-09-13 12:29:12 +0200202 }
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800203 node = kmalloc(sizeof(*node), gfp_mask);
204 if (!node)
205 return -ENOMEM;
Arne Jansenda5c8132011-09-13 12:29:12 +0200206
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800207 node->val = val;
208 node->aux = aux;
Liu Bo35f03992013-06-28 12:37:45 +0800209
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800210 ret = ulist_rbtree_insert(ulist, node);
211 ASSERT(!ret);
212 list_add_tail(&node->list, &ulist->nodes);
213 ulist->nnodes++;
Arne Jansenda5c8132011-09-13 12:29:12 +0200214
215 return 1;
216}
Arne Jansenda5c8132011-09-13 12:29:12 +0200217
Qu Wenruod4b80402015-04-20 09:26:02 +0800218/*
219 * ulist_del - delete one node from ulist
220 * @ulist: ulist to remove node from
221 * @val: value to delete
222 * @aux: aux to delete
223 *
224 * The deletion will only be done when *BOTH* val and aux matches.
225 * Return 0 for successful delete.
226 * Return > 0 for not found.
227 */
228int ulist_del(struct ulist *ulist, u64 val, u64 aux)
229{
230 struct ulist_node *node;
231
232 node = ulist_rbtree_search(ulist, val);
233 /* Not found */
234 if (!node)
235 return 1;
236
237 if (node->aux != aux)
238 return 1;
239
240 /* Found and delete */
241 ulist_rbtree_erase(ulist, node);
242 return 0;
243}
244
Arne Jansenda5c8132011-09-13 12:29:12 +0200245/**
246 * ulist_next - iterate ulist
247 * @ulist: ulist to iterate
Jan Schmidtcd1b4132012-05-22 14:56:50 +0200248 * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
Arne Jansenda5c8132011-09-13 12:29:12 +0200249 *
250 * Note: locking must be provided by the caller. In case of rwlocks only read
251 * locking is needed
252 *
Jan Schmidtcd1b4132012-05-22 14:56:50 +0200253 * This function is used to iterate an ulist.
254 * It returns the next element from the ulist or %NULL when the
Arne Jansenda5c8132011-09-13 12:29:12 +0200255 * end is reached. No guarantee is made with respect to the order in which
256 * the elements are returned. They might neither be returned in order of
257 * addition nor in ascending order.
258 * It is allowed to call ulist_add during an enumeration. Newly added items
259 * are guaranteed to show up in the running enumeration.
260 */
Jan Schmidtcd1b4132012-05-22 14:56:50 +0200261struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
Arne Jansenda5c8132011-09-13 12:29:12 +0200262{
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800263 struct ulist_node *node;
Arne Jansenda5c8132011-09-13 12:29:12 +0200264
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800265 if (list_empty(&ulist->nodes))
266 return NULL;
267 if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
268 return NULL;
269 if (uiter->cur_list) {
270 uiter->cur_list = uiter->cur_list->next;
271 } else {
272 uiter->cur_list = ulist->nodes.next;
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800273 }
274 node = list_entry(uiter->cur_list, struct ulist_node, list);
Wang Shilong4c7a6f72014-01-29 00:25:34 +0800275 return node;
Arne Jansenda5c8132011-09-13 12:29:12 +0200276}