Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Peter Zijlstra | ade3f51 | 2015-05-27 11:09:37 +0930 | [diff] [blame] | 2 | /* |
| 3 | * Latched RB-trees |
| 4 | * |
| 5 | * Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org> |
| 6 | * |
| 7 | * Since RB-trees have non-atomic modifications they're not immediately suited |
| 8 | * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for |
| 9 | * lockless lookups; we cannot guarantee they return a correct result. |
| 10 | * |
| 11 | * The simplest solution is a seqlock + RB-tree, this will allow lockless |
| 12 | * lookups; but has the constraint (inherent to the seqlock) that read sides |
| 13 | * cannot nest in write sides. |
| 14 | * |
| 15 | * If we need to allow unconditional lookups (say as required for NMI context |
| 16 | * usage) we need a more complex setup; this data structure provides this by |
| 17 | * employing the latch technique -- see @raw_write_seqcount_latch -- to |
| 18 | * implement a latched RB-tree which does allow for unconditional lookups by |
| 19 | * virtue of always having (at least) one stable copy of the tree. |
| 20 | * |
| 21 | * However, while we have the guarantee that there is at all times one stable |
| 22 | * copy, this does not guarantee an iteration will not observe modifications. |
| 23 | * What might have been a stable copy at the start of the iteration, need not |
| 24 | * remain so for the duration of the iteration. |
| 25 | * |
| 26 | * Therefore, this does require a lockless RB-tree iteration to be non-fatal; |
| 27 | * see the comment in lib/rbtree.c. Note however that we only require the first |
| 28 | * condition -- not seeing partial stores -- because the latch thing isolates |
| 29 | * us from loops. If we were to interrupt a modification the lookup would be |
| 30 | * pointed at the stable tree and complete while the modification was halted. |
| 31 | */ |
| 32 | |
| 33 | #ifndef RB_TREE_LATCH_H |
| 34 | #define RB_TREE_LATCH_H |
| 35 | |
| 36 | #include <linux/rbtree.h> |
| 37 | #include <linux/seqlock.h> |
Sebastian Andrzej Siewior | 2075b16 | 2018-05-11 16:02:14 -0700 | [diff] [blame] | 38 | #include <linux/rcupdate.h> |
Peter Zijlstra | ade3f51 | 2015-05-27 11:09:37 +0930 | [diff] [blame] | 39 | |
| 40 | struct latch_tree_node { |
| 41 | struct rb_node node[2]; |
| 42 | }; |
| 43 | |
| 44 | struct latch_tree_root { |
| 45 | seqcount_t seq; |
| 46 | struct rb_root tree[2]; |
| 47 | }; |
| 48 | |
| 49 | /** |
| 50 | * latch_tree_ops - operators to define the tree order |
| 51 | * @less: used for insertion; provides the (partial) order between two elements. |
| 52 | * @comp: used for lookups; provides the order between the search key and an element. |
| 53 | * |
| 54 | * The operators are related like: |
| 55 | * |
| 56 | * comp(a->key,b) < 0 := less(a,b) |
| 57 | * comp(a->key,b) > 0 := less(b,a) |
| 58 | * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) |
| 59 | * |
| 60 | * If these operators define a partial order on the elements we make no |
| 61 | * guarantee on which of the elements matching the key is found. See |
| 62 | * latch_tree_find(). |
| 63 | */ |
| 64 | struct latch_tree_ops { |
| 65 | bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b); |
| 66 | int (*comp)(void *key, struct latch_tree_node *b); |
| 67 | }; |
| 68 | |
| 69 | static __always_inline struct latch_tree_node * |
| 70 | __lt_from_rb(struct rb_node *node, int idx) |
| 71 | { |
| 72 | return container_of(node, struct latch_tree_node, node[idx]); |
| 73 | } |
| 74 | |
| 75 | static __always_inline void |
| 76 | __lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx, |
| 77 | bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b)) |
| 78 | { |
| 79 | struct rb_root *root = <r->tree[idx]; |
| 80 | struct rb_node **link = &root->rb_node; |
| 81 | struct rb_node *node = <n->node[idx]; |
| 82 | struct rb_node *parent = NULL; |
| 83 | struct latch_tree_node *ltp; |
| 84 | |
| 85 | while (*link) { |
| 86 | parent = *link; |
| 87 | ltp = __lt_from_rb(parent, idx); |
| 88 | |
| 89 | if (less(ltn, ltp)) |
| 90 | link = &parent->rb_left; |
| 91 | else |
| 92 | link = &parent->rb_right; |
| 93 | } |
| 94 | |
| 95 | rb_link_node_rcu(node, parent, link); |
| 96 | rb_insert_color(node, root); |
| 97 | } |
| 98 | |
| 99 | static __always_inline void |
| 100 | __lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx) |
| 101 | { |
| 102 | rb_erase(<n->node[idx], <r->tree[idx]); |
| 103 | } |
| 104 | |
| 105 | static __always_inline struct latch_tree_node * |
| 106 | __lt_find(void *key, struct latch_tree_root *ltr, int idx, |
| 107 | int (*comp)(void *key, struct latch_tree_node *node)) |
| 108 | { |
| 109 | struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); |
| 110 | struct latch_tree_node *ltn; |
| 111 | int c; |
| 112 | |
| 113 | while (node) { |
| 114 | ltn = __lt_from_rb(node, idx); |
| 115 | c = comp(key, ltn); |
| 116 | |
| 117 | if (c < 0) |
| 118 | node = rcu_dereference_raw(node->rb_left); |
| 119 | else if (c > 0) |
| 120 | node = rcu_dereference_raw(node->rb_right); |
| 121 | else |
| 122 | return ltn; |
| 123 | } |
| 124 | |
| 125 | return NULL; |
| 126 | } |
| 127 | |
| 128 | /** |
| 129 | * latch_tree_insert() - insert @node into the trees @root |
| 130 | * @node: nodes to insert |
| 131 | * @root: trees to insert @node into |
| 132 | * @ops: operators defining the node order |
| 133 | * |
| 134 | * It inserts @node into @root in an ordered fashion such that we can always |
| 135 | * observe one complete tree. See the comment for raw_write_seqcount_latch(). |
| 136 | * |
| 137 | * The inserts use rcu_assign_pointer() to publish the element such that the |
| 138 | * tree structure is stored before we can observe the new @node. |
| 139 | * |
| 140 | * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be |
| 141 | * serialized. |
| 142 | */ |
| 143 | static __always_inline void |
| 144 | latch_tree_insert(struct latch_tree_node *node, |
| 145 | struct latch_tree_root *root, |
| 146 | const struct latch_tree_ops *ops) |
| 147 | { |
| 148 | raw_write_seqcount_latch(&root->seq); |
| 149 | __lt_insert(node, root, 0, ops->less); |
| 150 | raw_write_seqcount_latch(&root->seq); |
| 151 | __lt_insert(node, root, 1, ops->less); |
| 152 | } |
| 153 | |
| 154 | /** |
| 155 | * latch_tree_erase() - removes @node from the trees @root |
| 156 | * @node: nodes to remote |
| 157 | * @root: trees to remove @node from |
| 158 | * @ops: operators defining the node order |
| 159 | * |
| 160 | * Removes @node from the trees @root in an ordered fashion such that we can |
| 161 | * always observe one complete tree. See the comment for |
| 162 | * raw_write_seqcount_latch(). |
| 163 | * |
| 164 | * It is assumed that @node will observe one RCU quiescent state before being |
| 165 | * reused of freed. |
| 166 | * |
| 167 | * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be |
| 168 | * serialized. |
| 169 | */ |
| 170 | static __always_inline void |
| 171 | latch_tree_erase(struct latch_tree_node *node, |
| 172 | struct latch_tree_root *root, |
| 173 | const struct latch_tree_ops *ops) |
| 174 | { |
| 175 | raw_write_seqcount_latch(&root->seq); |
| 176 | __lt_erase(node, root, 0); |
| 177 | raw_write_seqcount_latch(&root->seq); |
| 178 | __lt_erase(node, root, 1); |
| 179 | } |
| 180 | |
| 181 | /** |
| 182 | * latch_tree_find() - find the node matching @key in the trees @root |
| 183 | * @key: search key |
| 184 | * @root: trees to search for @key |
| 185 | * @ops: operators defining the node order |
| 186 | * |
| 187 | * Does a lockless lookup in the trees @root for the node matching @key. |
| 188 | * |
| 189 | * It is assumed that this is called while holding the appropriate RCU read |
| 190 | * side lock. |
| 191 | * |
| 192 | * If the operators define a partial order on the elements (there are multiple |
| 193 | * elements which have the same key value) it is undefined which of these |
| 194 | * elements will be found. Nor is it possible to iterate the tree to find |
| 195 | * further elements with the same key value. |
| 196 | * |
| 197 | * Returns: a pointer to the node matching @key or NULL. |
| 198 | */ |
| 199 | static __always_inline struct latch_tree_node * |
| 200 | latch_tree_find(void *key, struct latch_tree_root *root, |
| 201 | const struct latch_tree_ops *ops) |
| 202 | { |
| 203 | struct latch_tree_node *node; |
| 204 | unsigned int seq; |
| 205 | |
| 206 | do { |
| 207 | seq = raw_read_seqcount_latch(&root->seq); |
| 208 | node = __lt_find(key, root, seq & 1, ops->comp); |
| 209 | } while (read_seqcount_retry(&root->seq, seq)); |
| 210 | |
| 211 | return node; |
| 212 | } |
| 213 | |
| 214 | #endif /* RB_TREE_LATCH_H */ |