Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | * |
| 27 | **************************************************************************/ |
| 28 | |
| 29 | /* |
| 30 | * Generic simple memory manager implementation. Intended to be used as a base |
| 31 | * class implementation for more advanced memory managers. |
| 32 | * |
| 33 | * Note that the algorithm used is quite simple and there might be substantial |
| 34 | * performance gains if a smarter free list is implemented. Currently it is just an |
| 35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
| 36 | * is used instead. At least if we expect heavy fragmentation. |
| 37 | * |
| 38 | * Aligned allocations can also see improvement. |
| 39 | * |
| 40 | * Authors: |
Jan Engelhardt | 96de0e2 | 2007-10-19 23:21:04 +0200 | [diff] [blame] | 41 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 42 | */ |
| 43 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 44 | #include <drm/drmP.h> |
| 45 | #include <drm/drm_mm.h> |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 46 | #include <linux/slab.h> |
Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 47 | #include <linux/seq_file.h> |
Paul Gortmaker | 2d1a8a4 | 2011-08-30 18:16:33 -0400 | [diff] [blame] | 48 | #include <linux/export.h> |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 49 | #include <linux/interval_tree_generic.h> |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 50 | |
Daniel Vetter | 93110be | 2014-01-23 00:31:48 +0100 | [diff] [blame] | 51 | /** |
| 52 | * DOC: Overview |
| 53 | * |
| 54 | * drm_mm provides a simple range allocator. The drivers are free to use the |
| 55 | * resource allocator from the linux core if it suits them, the upside of drm_mm |
| 56 | * is that it's in the DRM core. Which means that it's easier to extend for |
| 57 | * some of the crazier special purpose needs of gpus. |
| 58 | * |
| 59 | * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. |
| 60 | * Drivers are free to embed either of them into their own suitable |
| 61 | * datastructures. drm_mm itself will not do any allocations of its own, so if |
| 62 | * drivers choose not to embed nodes they need to still allocate them |
| 63 | * themselves. |
| 64 | * |
| 65 | * The range allocator also supports reservation of preallocated blocks. This is |
| 66 | * useful for taking over initial mode setting configurations from the firmware, |
| 67 | * where an object needs to be created which exactly matches the firmware's |
| 68 | * scanout target. As long as the range is still free it can be inserted anytime |
| 69 | * after the allocator is initialized, which helps with avoiding looped |
| 70 | * depencies in the driver load sequence. |
| 71 | * |
| 72 | * drm_mm maintains a stack of most recently freed holes, which of all |
| 73 | * simplistic datastructures seems to be a fairly decent approach to clustering |
| 74 | * allocations and avoiding too much fragmentation. This means free space |
| 75 | * searches are O(num_holes). Given that all the fancy features drm_mm supports |
| 76 | * something better would be fairly complex and since gfx thrashing is a fairly |
| 77 | * steep cliff not a real concern. Removing a node again is O(1). |
| 78 | * |
| 79 | * drm_mm supports a few features: Alignment and range restrictions can be |
| 80 | * supplied. Further more every &drm_mm_node has a color value (which is just an |
| 81 | * opaqua unsigned long) which in conjunction with a driver callback can be used |
| 82 | * to implement sophisticated placement restrictions. The i915 DRM driver uses |
| 83 | * this to implement guard pages between incompatible caching domains in the |
| 84 | * graphics TT. |
| 85 | * |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 86 | * Two behaviors are supported for searching and allocating: bottom-up and top-down. |
| 87 | * The default is bottom-up. Top-down allocation can be used if the memory area |
| 88 | * has different restrictions, or just to reduce fragmentation. |
| 89 | * |
Daniel Vetter | 93110be | 2014-01-23 00:31:48 +0100 | [diff] [blame] | 90 | * Finally iteration helpers to walk all nodes and all holes are provided as are |
| 91 | * some basic allocator dumpers for debugging. |
| 92 | */ |
| 93 | |
David Herrmann | c700c67 | 2013-07-27 13:39:28 +0200 | [diff] [blame] | 94 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 95 | u64 size, |
David Herrmann | c700c67 | 2013-07-27 13:39:28 +0200 | [diff] [blame] | 96 | unsigned alignment, |
| 97 | unsigned long color, |
| 98 | enum drm_mm_search_flags flags); |
| 99 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 100 | u64 size, |
David Herrmann | c700c67 | 2013-07-27 13:39:28 +0200 | [diff] [blame] | 101 | unsigned alignment, |
| 102 | unsigned long color, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 103 | u64 start, |
| 104 | u64 end, |
David Herrmann | c700c67 | 2013-07-27 13:39:28 +0200 | [diff] [blame] | 105 | enum drm_mm_search_flags flags); |
Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 106 | |
Chris Wilson | 5705670 | 2016-10-31 09:08:06 +0000 | [diff] [blame^] | 107 | #ifdef CONFIG_DRM_DEBUG_MM |
| 108 | #define STACKDEPTH 32 |
| 109 | #define BUFSZ 4096 |
| 110 | |
| 111 | static noinline void save_stack(struct drm_mm_node *node) |
| 112 | { |
| 113 | unsigned long entries[STACKDEPTH]; |
| 114 | struct stack_trace trace = { |
| 115 | .entries = entries, |
| 116 | .max_entries = STACKDEPTH, |
| 117 | .skip = 1 |
| 118 | }; |
| 119 | |
| 120 | save_stack_trace(&trace); |
| 121 | if (trace.nr_entries != 0 && |
| 122 | trace.entries[trace.nr_entries-1] == ULONG_MAX) |
| 123 | trace.nr_entries--; |
| 124 | |
| 125 | /* May be called under spinlock, so avoid sleeping */ |
| 126 | node->stack = depot_save_stack(&trace, GFP_NOWAIT); |
| 127 | } |
| 128 | |
| 129 | static void show_leaks(struct drm_mm *mm) |
| 130 | { |
| 131 | struct drm_mm_node *node; |
| 132 | unsigned long entries[STACKDEPTH]; |
| 133 | char *buf; |
| 134 | |
| 135 | buf = kmalloc(BUFSZ, GFP_KERNEL); |
| 136 | if (!buf) |
| 137 | return; |
| 138 | |
| 139 | list_for_each_entry(node, &mm->head_node.node_list, node_list) { |
| 140 | struct stack_trace trace = { |
| 141 | .entries = entries, |
| 142 | .max_entries = STACKDEPTH |
| 143 | }; |
| 144 | |
| 145 | if (!node->stack) { |
| 146 | DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", |
| 147 | node->start, node->size); |
| 148 | continue; |
| 149 | } |
| 150 | |
| 151 | depot_fetch_stack(node->stack, &trace); |
| 152 | snprint_stack_trace(buf, BUFSZ, &trace, 0); |
| 153 | DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", |
| 154 | node->start, node->size, buf); |
| 155 | } |
| 156 | |
| 157 | kfree(buf); |
| 158 | } |
| 159 | |
| 160 | #undef STACKDEPTH |
| 161 | #undef BUFSZ |
| 162 | #else |
| 163 | static void save_stack(struct drm_mm_node *node) { } |
| 164 | static void show_leaks(struct drm_mm *mm) { } |
| 165 | #endif |
| 166 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 167 | #define START(node) ((node)->start) |
| 168 | #define LAST(node) ((node)->start + (node)->size - 1) |
| 169 | |
| 170 | INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, |
| 171 | u64, __subtree_last, |
| 172 | START, LAST, static inline, drm_mm_interval_tree) |
| 173 | |
| 174 | struct drm_mm_node * |
| 175 | drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last) |
| 176 | { |
| 177 | return drm_mm_interval_tree_iter_first(&mm->interval_tree, |
| 178 | start, last); |
| 179 | } |
| 180 | EXPORT_SYMBOL(drm_mm_interval_first); |
| 181 | |
| 182 | struct drm_mm_node * |
| 183 | drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last) |
| 184 | { |
| 185 | return drm_mm_interval_tree_iter_next(node, start, last); |
| 186 | } |
| 187 | EXPORT_SYMBOL(drm_mm_interval_next); |
| 188 | |
| 189 | static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, |
| 190 | struct drm_mm_node *node) |
| 191 | { |
| 192 | struct drm_mm *mm = hole_node->mm; |
| 193 | struct rb_node **link, *rb; |
| 194 | struct drm_mm_node *parent; |
| 195 | |
| 196 | node->__subtree_last = LAST(node); |
| 197 | |
| 198 | if (hole_node->allocated) { |
| 199 | rb = &hole_node->rb; |
| 200 | while (rb) { |
| 201 | parent = rb_entry(rb, struct drm_mm_node, rb); |
| 202 | if (parent->__subtree_last >= node->__subtree_last) |
| 203 | break; |
| 204 | |
| 205 | parent->__subtree_last = node->__subtree_last; |
| 206 | rb = rb_parent(rb); |
| 207 | } |
| 208 | |
| 209 | rb = &hole_node->rb; |
| 210 | link = &hole_node->rb.rb_right; |
| 211 | } else { |
| 212 | rb = NULL; |
| 213 | link = &mm->interval_tree.rb_node; |
| 214 | } |
| 215 | |
| 216 | while (*link) { |
| 217 | rb = *link; |
| 218 | parent = rb_entry(rb, struct drm_mm_node, rb); |
| 219 | if (parent->__subtree_last < node->__subtree_last) |
| 220 | parent->__subtree_last = node->__subtree_last; |
| 221 | if (node->start < parent->start) |
| 222 | link = &parent->rb.rb_left; |
| 223 | else |
| 224 | link = &parent->rb.rb_right; |
| 225 | } |
| 226 | |
| 227 | rb_link_node(&node->rb, rb, link); |
| 228 | rb_insert_augmented(&node->rb, |
| 229 | &mm->interval_tree, |
| 230 | &drm_mm_interval_tree_augment); |
| 231 | } |
| 232 | |
Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 233 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
| 234 | struct drm_mm_node *node, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 235 | u64 size, unsigned alignment, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 236 | unsigned long color, |
| 237 | enum drm_mm_allocator_flags flags) |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 238 | { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 239 | struct drm_mm *mm = hole_node->mm; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 240 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
| 241 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
| 242 | u64 adj_start = hole_start; |
| 243 | u64 adj_end = hole_end; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 244 | |
Chris Wilson | 9e8944a | 2012-11-15 11:32:17 +0000 | [diff] [blame] | 245 | BUG_ON(node->allocated); |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 246 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 247 | if (mm->color_adjust) |
| 248 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 249 | |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 250 | if (flags & DRM_MM_CREATE_TOP) |
| 251 | adj_start = adj_end - size; |
| 252 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 253 | if (alignment) { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 254 | u64 tmp = adj_start; |
| 255 | unsigned rem; |
| 256 | |
| 257 | rem = do_div(tmp, alignment); |
| 258 | if (rem) { |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 259 | if (flags & DRM_MM_CREATE_TOP) |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 260 | adj_start -= rem; |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 261 | else |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 262 | adj_start += alignment - rem; |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 263 | } |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 264 | } |
| 265 | |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 266 | BUG_ON(adj_start < hole_start); |
| 267 | BUG_ON(adj_end > hole_end); |
| 268 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 269 | if (adj_start == hole_start) { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 270 | hole_node->hole_follows = 0; |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 271 | list_del(&hole_node->hole_stack); |
| 272 | } |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 273 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 274 | node->start = adj_start; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 275 | node->size = size; |
| 276 | node->mm = mm; |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 277 | node->color = color; |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 278 | node->allocated = 1; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 279 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 280 | list_add(&node->node_list, &hole_node->node_list); |
| 281 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 282 | drm_mm_interval_tree_add_node(hole_node, node); |
| 283 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 284 | BUG_ON(node->start + node->size > adj_end); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 285 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 286 | node->hole_follows = 0; |
Chris Wilson | 9e8944a | 2012-11-15 11:32:17 +0000 | [diff] [blame] | 287 | if (__drm_mm_hole_node_start(node) < hole_end) { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 288 | list_add(&node->hole_stack, &mm->hole_stack); |
| 289 | node->hole_follows = 1; |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 290 | } |
Chris Wilson | 5705670 | 2016-10-31 09:08:06 +0000 | [diff] [blame^] | 291 | |
| 292 | save_stack(node); |
Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 293 | } |
| 294 | |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 295 | /** |
| 296 | * drm_mm_reserve_node - insert an pre-initialized node |
| 297 | * @mm: drm_mm allocator to insert @node into |
| 298 | * @node: drm_mm_node to insert |
| 299 | * |
| 300 | * This functions inserts an already set-up drm_mm_node into the allocator, |
| 301 | * meaning that start, size and color must be set by the caller. This is useful |
| 302 | * to initialize the allocator with preallocated objects which must be set-up |
| 303 | * before the range allocator can be set-up, e.g. when taking over a firmware |
| 304 | * framebuffer. |
| 305 | * |
| 306 | * Returns: |
| 307 | * 0 on success, -ENOSPC if there's no hole where @node is. |
| 308 | */ |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 309 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
Chris Wilson | 5973c7e | 2012-11-15 11:32:16 +0000 | [diff] [blame] | 310 | { |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 311 | u64 end = node->start + node->size; |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 312 | struct drm_mm_node *hole; |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 313 | u64 hole_start, hole_end; |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 314 | |
Chris Wilson | aafdcfd | 2016-08-03 19:26:28 +0100 | [diff] [blame] | 315 | if (WARN_ON(node->size == 0)) |
| 316 | return -EINVAL; |
| 317 | |
Heinrich Schuchardt | b80d394 | 2016-05-18 22:17:19 +0200 | [diff] [blame] | 318 | end = node->start + node->size; |
| 319 | |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 320 | /* Find the relevant hole to add our node to */ |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 321 | hole = drm_mm_interval_tree_iter_first(&mm->interval_tree, |
| 322 | node->start, ~(u64)0); |
| 323 | if (hole) { |
| 324 | if (hole->start < end) |
| 325 | return -ENOSPC; |
| 326 | } else { |
| 327 | hole = list_entry(&mm->head_node.node_list, |
| 328 | typeof(*hole), node_list); |
Chris Wilson | 5973c7e | 2012-11-15 11:32:16 +0000 | [diff] [blame] | 329 | } |
| 330 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 331 | hole = list_last_entry(&hole->node_list, typeof(*hole), node_list); |
| 332 | if (!hole->hole_follows) |
| 333 | return -ENOSPC; |
| 334 | |
| 335 | hole_start = __drm_mm_hole_node_start(hole); |
| 336 | hole_end = __drm_mm_hole_node_end(hole); |
| 337 | if (hole_start > node->start || hole_end < end) |
| 338 | return -ENOSPC; |
| 339 | |
| 340 | node->mm = mm; |
| 341 | node->allocated = 1; |
| 342 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 343 | list_add(&node->node_list, &hole->node_list); |
| 344 | |
| 345 | drm_mm_interval_tree_add_node(hole, node); |
| 346 | |
| 347 | if (node->start == hole_start) { |
| 348 | hole->hole_follows = 0; |
Chris Wilson | a787900 | 2016-08-03 16:04:11 +0100 | [diff] [blame] | 349 | list_del(&hole->hole_stack); |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | node->hole_follows = 0; |
| 353 | if (end != hole_end) { |
| 354 | list_add(&node->hole_stack, &mm->hole_stack); |
| 355 | node->hole_follows = 1; |
| 356 | } |
| 357 | |
Chris Wilson | 5705670 | 2016-10-31 09:08:06 +0000 | [diff] [blame^] | 358 | save_stack(node); |
| 359 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 360 | return 0; |
Chris Wilson | 5973c7e | 2012-11-15 11:32:16 +0000 | [diff] [blame] | 361 | } |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 362 | EXPORT_SYMBOL(drm_mm_reserve_node); |
Chris Wilson | 5973c7e | 2012-11-15 11:32:16 +0000 | [diff] [blame] | 363 | |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 364 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 365 | * drm_mm_insert_node_generic - search for space and insert @node |
| 366 | * @mm: drm_mm to allocate from |
| 367 | * @node: preallocate node to insert |
| 368 | * @size: size of the allocation |
| 369 | * @alignment: alignment of the allocation |
| 370 | * @color: opaque tag value to use for this node |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 371 | * @sflags: flags to fine-tune the allocation search |
| 372 | * @aflags: flags to fine-tune the allocation behavior |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 373 | * |
| 374 | * The preallocated node must be cleared to 0. |
| 375 | * |
| 376 | * Returns: |
| 377 | * 0 on success, -ENOSPC if there's no suitable hole. |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 378 | */ |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 379 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 380 | u64 size, unsigned alignment, |
David Herrmann | 31e5d7c | 2013-07-27 13:36:27 +0200 | [diff] [blame] | 381 | unsigned long color, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 382 | enum drm_mm_search_flags sflags, |
| 383 | enum drm_mm_allocator_flags aflags) |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 384 | { |
| 385 | struct drm_mm_node *hole_node; |
| 386 | |
Chris Wilson | aafdcfd | 2016-08-03 19:26:28 +0100 | [diff] [blame] | 387 | if (WARN_ON(size == 0)) |
| 388 | return -EINVAL; |
| 389 | |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 390 | hole_node = drm_mm_search_free_generic(mm, size, alignment, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 391 | color, sflags); |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 392 | if (!hole_node) |
| 393 | return -ENOSPC; |
| 394 | |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 395 | drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 396 | return 0; |
| 397 | } |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 398 | EXPORT_SYMBOL(drm_mm_insert_node_generic); |
| 399 | |
Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 400 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
| 401 | struct drm_mm_node *node, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 402 | u64 size, unsigned alignment, |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 403 | unsigned long color, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 404 | u64 start, u64 end, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 405 | enum drm_mm_allocator_flags flags) |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 406 | { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 407 | struct drm_mm *mm = hole_node->mm; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 408 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
| 409 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
| 410 | u64 adj_start = hole_start; |
| 411 | u64 adj_end = hole_end; |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 412 | |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 413 | BUG_ON(!hole_node->hole_follows || node->allocated); |
| 414 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 415 | if (adj_start < start) |
| 416 | adj_start = start; |
Chris Wilson | 901593f | 2012-12-19 16:51:06 +0000 | [diff] [blame] | 417 | if (adj_end > end) |
| 418 | adj_end = end; |
| 419 | |
| 420 | if (mm->color_adjust) |
| 421 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 422 | |
Michel Thierry | fafecc0 | 2015-08-16 04:02:28 +0100 | [diff] [blame] | 423 | if (flags & DRM_MM_CREATE_TOP) |
| 424 | adj_start = adj_end - size; |
| 425 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 426 | if (alignment) { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 427 | u64 tmp = adj_start; |
| 428 | unsigned rem; |
| 429 | |
| 430 | rem = do_div(tmp, alignment); |
| 431 | if (rem) { |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 432 | if (flags & DRM_MM_CREATE_TOP) |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 433 | adj_start -= rem; |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 434 | else |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 435 | adj_start += alignment - rem; |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 436 | } |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 437 | } |
| 438 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 439 | if (adj_start == hole_start) { |
| 440 | hole_node->hole_follows = 0; |
| 441 | list_del(&hole_node->hole_stack); |
| 442 | } |
| 443 | |
| 444 | node->start = adj_start; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 445 | node->size = size; |
| 446 | node->mm = mm; |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 447 | node->color = color; |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 448 | node->allocated = 1; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 449 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 450 | list_add(&node->node_list, &hole_node->node_list); |
| 451 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 452 | drm_mm_interval_tree_add_node(hole_node, node); |
| 453 | |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 454 | BUG_ON(node->start < start); |
| 455 | BUG_ON(node->start < adj_start); |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 456 | BUG_ON(node->start + node->size > adj_end); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 457 | BUG_ON(node->start + node->size > end); |
| 458 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 459 | node->hole_follows = 0; |
Chris Wilson | 9e8944a | 2012-11-15 11:32:17 +0000 | [diff] [blame] | 460 | if (__drm_mm_hole_node_start(node) < hole_end) { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 461 | list_add(&node->hole_stack, &mm->hole_stack); |
| 462 | node->hole_follows = 1; |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 463 | } |
Chris Wilson | 5705670 | 2016-10-31 09:08:06 +0000 | [diff] [blame^] | 464 | |
| 465 | save_stack(node); |
Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 466 | } |
| 467 | |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 468 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 469 | * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node |
| 470 | * @mm: drm_mm to allocate from |
| 471 | * @node: preallocate node to insert |
| 472 | * @size: size of the allocation |
| 473 | * @alignment: alignment of the allocation |
| 474 | * @color: opaque tag value to use for this node |
| 475 | * @start: start of the allowed range for this node |
| 476 | * @end: end of the allowed range for this node |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 477 | * @sflags: flags to fine-tune the allocation search |
| 478 | * @aflags: flags to fine-tune the allocation behavior |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 479 | * |
| 480 | * The preallocated node must be cleared to 0. |
| 481 | * |
| 482 | * Returns: |
| 483 | * 0 on success, -ENOSPC if there's no suitable hole. |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 484 | */ |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 485 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 486 | u64 size, unsigned alignment, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 487 | unsigned long color, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 488 | u64 start, u64 end, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 489 | enum drm_mm_search_flags sflags, |
| 490 | enum drm_mm_allocator_flags aflags) |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 491 | { |
| 492 | struct drm_mm_node *hole_node; |
| 493 | |
Chris Wilson | aafdcfd | 2016-08-03 19:26:28 +0100 | [diff] [blame] | 494 | if (WARN_ON(size == 0)) |
| 495 | return -EINVAL; |
| 496 | |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 497 | hole_node = drm_mm_search_free_in_range_generic(mm, |
| 498 | size, alignment, color, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 499 | start, end, sflags); |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 500 | if (!hole_node) |
| 501 | return -ENOSPC; |
| 502 | |
| 503 | drm_mm_insert_helper_range(hole_node, node, |
| 504 | size, alignment, color, |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 505 | start, end, aflags); |
Chris Wilson | b810345 | 2012-12-07 20:37:06 +0000 | [diff] [blame] | 506 | return 0; |
| 507 | } |
| 508 | EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); |
| 509 | |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 510 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 511 | * drm_mm_remove_node - Remove a memory node from the allocator. |
| 512 | * @node: drm_mm_node to remove |
| 513 | * |
| 514 | * This just removes a node from its drm_mm allocator. The node does not need to |
| 515 | * be cleared again before it can be re-inserted into this or any other drm_mm |
| 516 | * allocator. It is a bug to call this function on a un-allocated node. |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 517 | */ |
| 518 | void drm_mm_remove_node(struct drm_mm_node *node) |
| 519 | { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 520 | struct drm_mm *mm = node->mm; |
| 521 | struct drm_mm_node *prev_node; |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 522 | |
Ben Widawsky | 3ef80a8 | 2013-08-13 18:09:08 -0700 | [diff] [blame] | 523 | if (WARN_ON(!node->allocated)) |
| 524 | return; |
| 525 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 526 | BUG_ON(node->scanned_block || node->scanned_prev_free |
| 527 | || node->scanned_next_free); |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 528 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 529 | prev_node = |
| 530 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 531 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 532 | if (node->hole_follows) { |
Chris Wilson | 9e8944a | 2012-11-15 11:32:17 +0000 | [diff] [blame] | 533 | BUG_ON(__drm_mm_hole_node_start(node) == |
| 534 | __drm_mm_hole_node_end(node)); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 535 | list_del(&node->hole_stack); |
| 536 | } else |
Chris Wilson | 9e8944a | 2012-11-15 11:32:17 +0000 | [diff] [blame] | 537 | BUG_ON(__drm_mm_hole_node_start(node) != |
| 538 | __drm_mm_hole_node_end(node)); |
| 539 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 540 | |
| 541 | if (!prev_node->hole_follows) { |
| 542 | prev_node->hole_follows = 1; |
| 543 | list_add(&prev_node->hole_stack, &mm->hole_stack); |
| 544 | } else |
| 545 | list_move(&prev_node->hole_stack, &mm->hole_stack); |
| 546 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 547 | drm_mm_interval_tree_remove(node, &mm->interval_tree); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 548 | list_del(&node->node_list); |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 549 | node->allocated = 0; |
| 550 | } |
| 551 | EXPORT_SYMBOL(drm_mm_remove_node); |
| 552 | |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 553 | static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 554 | { |
Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 555 | if (end - start < size) |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 556 | return 0; |
| 557 | |
| 558 | if (alignment) { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 559 | u64 tmp = start; |
| 560 | unsigned rem; |
| 561 | |
| 562 | rem = do_div(tmp, alignment); |
Krzysztof Kolasa | 046d669 | 2015-03-15 20:22:36 +0100 | [diff] [blame] | 563 | if (rem) |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 564 | start += alignment - rem; |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 565 | } |
| 566 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 567 | return end >= start + size; |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 568 | } |
| 569 | |
David Herrmann | c700c67 | 2013-07-27 13:39:28 +0200 | [diff] [blame] | 570 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 571 | u64 size, |
David Herrmann | c700c67 | 2013-07-27 13:39:28 +0200 | [diff] [blame] | 572 | unsigned alignment, |
| 573 | unsigned long color, |
| 574 | enum drm_mm_search_flags flags) |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 575 | { |
Dave Airlie | 5591051 | 2007-07-11 16:53:40 +1000 | [diff] [blame] | 576 | struct drm_mm_node *entry; |
| 577 | struct drm_mm_node *best; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 578 | u64 adj_start; |
| 579 | u64 adj_end; |
| 580 | u64 best_size; |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 581 | |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 582 | BUG_ON(mm->scanned_blocks); |
| 583 | |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 584 | best = NULL; |
| 585 | best_size = ~0UL; |
| 586 | |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 587 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
| 588 | flags & DRM_MM_SEARCH_BELOW) { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 589 | u64 hole_size = adj_end - adj_start; |
Michel Dänzer | 145bccd | 2014-03-19 17:37:14 +0900 | [diff] [blame] | 590 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 591 | if (mm->color_adjust) { |
| 592 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
| 593 | if (adj_end <= adj_start) |
| 594 | continue; |
| 595 | } |
| 596 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 597 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 598 | continue; |
| 599 | |
David Herrmann | 31e5d7c | 2013-07-27 13:36:27 +0200 | [diff] [blame] | 600 | if (!(flags & DRM_MM_SEARCH_BEST)) |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 601 | return entry; |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 602 | |
Michel Dänzer | 145bccd | 2014-03-19 17:37:14 +0900 | [diff] [blame] | 603 | if (hole_size < best_size) { |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 604 | best = entry; |
Michel Dänzer | 145bccd | 2014-03-19 17:37:14 +0900 | [diff] [blame] | 605 | best_size = hole_size; |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 606 | } |
| 607 | } |
| 608 | |
| 609 | return best; |
| 610 | } |
| 611 | |
David Herrmann | c700c67 | 2013-07-27 13:39:28 +0200 | [diff] [blame] | 612 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 613 | u64 size, |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 614 | unsigned alignment, |
| 615 | unsigned long color, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 616 | u64 start, |
| 617 | u64 end, |
David Herrmann | 31e5d7c | 2013-07-27 13:36:27 +0200 | [diff] [blame] | 618 | enum drm_mm_search_flags flags) |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 619 | { |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 620 | struct drm_mm_node *entry; |
| 621 | struct drm_mm_node *best; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 622 | u64 adj_start; |
| 623 | u64 adj_end; |
| 624 | u64 best_size; |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 625 | |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 626 | BUG_ON(mm->scanned_blocks); |
| 627 | |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 628 | best = NULL; |
| 629 | best_size = ~0UL; |
| 630 | |
Lauri Kasanen | 62347f9 | 2014-04-02 20:03:57 +0300 | [diff] [blame] | 631 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
| 632 | flags & DRM_MM_SEARCH_BELOW) { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 633 | u64 hole_size = adj_end - adj_start; |
Michel Dänzer | 145bccd | 2014-03-19 17:37:14 +0900 | [diff] [blame] | 634 | |
Chris Wilson | 9e8944a | 2012-11-15 11:32:17 +0000 | [diff] [blame] | 635 | if (adj_start < start) |
| 636 | adj_start = start; |
| 637 | if (adj_end > end) |
| 638 | adj_end = end; |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 639 | |
| 640 | if (mm->color_adjust) { |
| 641 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
| 642 | if (adj_end <= adj_start) |
| 643 | continue; |
| 644 | } |
| 645 | |
Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 646 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 647 | continue; |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 648 | |
David Herrmann | 31e5d7c | 2013-07-27 13:36:27 +0200 | [diff] [blame] | 649 | if (!(flags & DRM_MM_SEARCH_BEST)) |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 650 | return entry; |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 651 | |
Michel Dänzer | 145bccd | 2014-03-19 17:37:14 +0900 | [diff] [blame] | 652 | if (hole_size < best_size) { |
Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 653 | best = entry; |
Michel Dänzer | 145bccd | 2014-03-19 17:37:14 +0900 | [diff] [blame] | 654 | best_size = hole_size; |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 655 | } |
| 656 | } |
| 657 | |
| 658 | return best; |
| 659 | } |
Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 660 | |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 661 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 662 | * drm_mm_replace_node - move an allocation from @old to @new |
| 663 | * @old: drm_mm_node to remove from the allocator |
| 664 | * @new: drm_mm_node which should inherit @old's allocation |
| 665 | * |
| 666 | * This is useful for when drivers embed the drm_mm_node structure and hence |
| 667 | * can't move allocations by reassigning pointers. It's a combination of remove |
| 668 | * and insert with the guarantee that the allocation start will match. |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 669 | */ |
| 670 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
| 671 | { |
| 672 | list_replace(&old->node_list, &new->node_list); |
Daniel Vetter | 2bbd449 | 2011-05-06 23:47:53 +0200 | [diff] [blame] | 673 | list_replace(&old->hole_stack, &new->hole_stack); |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 674 | rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 675 | new->hole_follows = old->hole_follows; |
| 676 | new->mm = old->mm; |
| 677 | new->start = old->start; |
| 678 | new->size = old->size; |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 679 | new->color = old->color; |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 680 | new->__subtree_last = old->__subtree_last; |
Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 681 | |
| 682 | old->allocated = 0; |
| 683 | new->allocated = 1; |
| 684 | } |
| 685 | EXPORT_SYMBOL(drm_mm_replace_node); |
| 686 | |
| 687 | /** |
Daniel Vetter | 93110be | 2014-01-23 00:31:48 +0100 | [diff] [blame] | 688 | * DOC: lru scan roaster |
| 689 | * |
| 690 | * Very often GPUs need to have continuous allocations for a given object. When |
| 691 | * evicting objects to make space for a new one it is therefore not most |
| 692 | * efficient when we simply start to select all objects from the tail of an LRU |
| 693 | * until there's a suitable hole: Especially for big objects or nodes that |
| 694 | * otherwise have special allocation constraints there's a good chance we evict |
| 695 | * lots of (smaller) objects unecessarily. |
| 696 | * |
| 697 | * The DRM range allocator supports this use-case through the scanning |
| 698 | * interfaces. First a scan operation needs to be initialized with |
| 699 | * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds |
| 700 | * objects to the roaster (probably by walking an LRU list, but this can be |
| 701 | * freely implemented) until a suitable hole is found or there's no further |
| 702 | * evitable object. |
| 703 | * |
| 704 | * The the driver must walk through all objects again in exactly the reverse |
| 705 | * order to restore the allocator state. Note that while the allocator is used |
| 706 | * in the scan mode no other operation is allowed. |
| 707 | * |
| 708 | * Finally the driver evicts all objects selected in the scan. Adding and |
| 709 | * removing an object is O(1), and since freeing a node is also O(1) the overall |
| 710 | * complexity is O(scanned_objects). So like the free stack which needs to be |
| 711 | * walked before a scan operation even begins this is linear in the number of |
| 712 | * objects. It doesn't seem to hurt badly. |
| 713 | */ |
| 714 | |
| 715 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 716 | * drm_mm_init_scan - initialize lru scanning |
| 717 | * @mm: drm_mm to scan |
| 718 | * @size: size of the allocation |
| 719 | * @alignment: alignment of the allocation |
| 720 | * @color: opaque tag value to use for the allocation |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 721 | * |
| 722 | * This simply sets up the scanning routines with the parameters for the desired |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 723 | * hole. Note that there's no need to specify allocation flags, since they only |
| 724 | * change the place a node is allocated from within a suitable hole. |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 725 | * |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 726 | * Warning: |
| 727 | * As long as the scan list is non-empty, no other operations than |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 728 | * adding/removing nodes to/from the scan list are allowed. |
| 729 | */ |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 730 | void drm_mm_init_scan(struct drm_mm *mm, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 731 | u64 size, |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 732 | unsigned alignment, |
| 733 | unsigned long color) |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 734 | { |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 735 | mm->scan_color = color; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 736 | mm->scan_alignment = alignment; |
| 737 | mm->scan_size = size; |
| 738 | mm->scanned_blocks = 0; |
| 739 | mm->scan_hit_start = 0; |
Chris Wilson | 901593f | 2012-12-19 16:51:06 +0000 | [diff] [blame] | 740 | mm->scan_hit_end = 0; |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 741 | mm->scan_check_range = 0; |
Daniel Vetter | ae0cec2 | 2011-02-18 17:59:15 +0100 | [diff] [blame] | 742 | mm->prev_scanned_node = NULL; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 743 | } |
| 744 | EXPORT_SYMBOL(drm_mm_init_scan); |
| 745 | |
| 746 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 747 | * drm_mm_init_scan - initialize range-restricted lru scanning |
| 748 | * @mm: drm_mm to scan |
| 749 | * @size: size of the allocation |
| 750 | * @alignment: alignment of the allocation |
| 751 | * @color: opaque tag value to use for the allocation |
| 752 | * @start: start of the allowed range for the allocation |
| 753 | * @end: end of the allowed range for the allocation |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 754 | * |
| 755 | * This simply sets up the scanning routines with the parameters for the desired |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 756 | * hole. Note that there's no need to specify allocation flags, since they only |
| 757 | * change the place a node is allocated from within a suitable hole. |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 758 | * |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 759 | * Warning: |
| 760 | * As long as the scan list is non-empty, no other operations than |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 761 | * adding/removing nodes to/from the scan list are allowed. |
| 762 | */ |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 763 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 764 | u64 size, |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 765 | unsigned alignment, |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 766 | unsigned long color, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 767 | u64 start, |
| 768 | u64 end) |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 769 | { |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 770 | mm->scan_color = color; |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 771 | mm->scan_alignment = alignment; |
| 772 | mm->scan_size = size; |
| 773 | mm->scanned_blocks = 0; |
| 774 | mm->scan_hit_start = 0; |
Chris Wilson | 901593f | 2012-12-19 16:51:06 +0000 | [diff] [blame] | 775 | mm->scan_hit_end = 0; |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 776 | mm->scan_start = start; |
| 777 | mm->scan_end = end; |
| 778 | mm->scan_check_range = 1; |
Daniel Vetter | ae0cec2 | 2011-02-18 17:59:15 +0100 | [diff] [blame] | 779 | mm->prev_scanned_node = NULL; |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 780 | } |
| 781 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); |
| 782 | |
| 783 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 784 | * drm_mm_scan_add_block - add a node to the scan list |
| 785 | * @node: drm_mm_node to add |
| 786 | * |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 787 | * Add a node to the scan list that might be freed to make space for the desired |
| 788 | * hole. |
| 789 | * |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 790 | * Returns: |
| 791 | * True if a hole has been found, false otherwise. |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 792 | */ |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 793 | bool drm_mm_scan_add_block(struct drm_mm_node *node) |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 794 | { |
| 795 | struct drm_mm *mm = node->mm; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 796 | struct drm_mm_node *prev_node; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 797 | u64 hole_start, hole_end; |
| 798 | u64 adj_start, adj_end; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 799 | |
| 800 | mm->scanned_blocks++; |
| 801 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 802 | BUG_ON(node->scanned_block); |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 803 | node->scanned_block = 1; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 804 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 805 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
| 806 | node_list); |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 807 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 808 | node->scanned_preceeds_hole = prev_node->hole_follows; |
| 809 | prev_node->hole_follows = 1; |
| 810 | list_del(&node->node_list); |
| 811 | node->node_list.prev = &prev_node->node_list; |
Daniel Vetter | ae0cec2 | 2011-02-18 17:59:15 +0100 | [diff] [blame] | 812 | node->node_list.next = &mm->prev_scanned_node->node_list; |
| 813 | mm->prev_scanned_node = node; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 814 | |
Chris Wilson | 901593f | 2012-12-19 16:51:06 +0000 | [diff] [blame] | 815 | adj_start = hole_start = drm_mm_hole_node_start(prev_node); |
| 816 | adj_end = hole_end = drm_mm_hole_node_end(prev_node); |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 817 | |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 818 | if (mm->scan_check_range) { |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 819 | if (adj_start < mm->scan_start) |
| 820 | adj_start = mm->scan_start; |
| 821 | if (adj_end > mm->scan_end) |
| 822 | adj_end = mm->scan_end; |
Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 823 | } |
| 824 | |
Chris Wilson | 901593f | 2012-12-19 16:51:06 +0000 | [diff] [blame] | 825 | if (mm->color_adjust) |
| 826 | mm->color_adjust(prev_node, mm->scan_color, |
| 827 | &adj_start, &adj_end); |
| 828 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 829 | if (check_free_hole(adj_start, adj_end, |
Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 830 | mm->scan_size, mm->scan_alignment)) { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 831 | mm->scan_hit_start = hole_start; |
Chris Wilson | 901593f | 2012-12-19 16:51:06 +0000 | [diff] [blame] | 832 | mm->scan_hit_end = hole_end; |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 833 | return true; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 834 | } |
| 835 | |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 836 | return false; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 837 | } |
| 838 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
| 839 | |
| 840 | /** |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 841 | * drm_mm_scan_remove_block - remove a node from the scan list |
| 842 | * @node: drm_mm_node to remove |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 843 | * |
| 844 | * Nodes _must_ be removed in the exact same order from the scan list as they |
| 845 | * have been added, otherwise the internal state of the memory manager will be |
| 846 | * corrupted. |
| 847 | * |
| 848 | * When the scan list is empty, the selected memory nodes can be freed. An |
David Herrmann | 31e5d7c | 2013-07-27 13:36:27 +0200 | [diff] [blame] | 849 | * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then |
| 850 | * return the just freed block (because its at the top of the free_stack list). |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 851 | * |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 852 | * Returns: |
| 853 | * True if this block should be evicted, false otherwise. Will always |
| 854 | * return false when no hole has been found. |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 855 | */ |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 856 | bool drm_mm_scan_remove_block(struct drm_mm_node *node) |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 857 | { |
| 858 | struct drm_mm *mm = node->mm; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 859 | struct drm_mm_node *prev_node; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 860 | |
| 861 | mm->scanned_blocks--; |
| 862 | |
| 863 | BUG_ON(!node->scanned_block); |
| 864 | node->scanned_block = 0; |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 865 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 866 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
| 867 | node_list); |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 868 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 869 | prev_node->hole_follows = node->scanned_preceeds_hole; |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 870 | list_add(&node->node_list, &prev_node->node_list); |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 871 | |
Chris Wilson | 901593f | 2012-12-19 16:51:06 +0000 | [diff] [blame] | 872 | return (drm_mm_hole_node_end(node) > mm->scan_hit_start && |
| 873 | node->start < mm->scan_hit_end); |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 874 | } |
| 875 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
| 876 | |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 877 | /** |
| 878 | * drm_mm_clean - checks whether an allocator is clean |
| 879 | * @mm: drm_mm allocator to check |
| 880 | * |
| 881 | * Returns: |
| 882 | * True if the allocator is completely free, false if there's still a node |
| 883 | * allocated in it. |
| 884 | */ |
| 885 | bool drm_mm_clean(struct drm_mm * mm) |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 886 | { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 887 | struct list_head *head = &mm->head_node.node_list; |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 888 | |
| 889 | return (head->next->next == head); |
| 890 | } |
Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 891 | EXPORT_SYMBOL(drm_mm_clean); |
Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 892 | |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 893 | /** |
| 894 | * drm_mm_init - initialize a drm-mm allocator |
| 895 | * @mm: the drm_mm structure to initialize |
| 896 | * @start: start of the range managed by @mm |
| 897 | * @size: end of the range managed by @mm |
| 898 | * |
| 899 | * Note that @mm must be cleared to 0 before calling this function. |
| 900 | */ |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 901 | void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 902 | { |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 903 | INIT_LIST_HEAD(&mm->hole_stack); |
Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 904 | mm->scanned_blocks = 0; |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 905 | |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 906 | /* Clever trick to avoid a special case in the free hole tracking. */ |
| 907 | INIT_LIST_HEAD(&mm->head_node.node_list); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 908 | mm->head_node.hole_follows = 1; |
| 909 | mm->head_node.scanned_block = 0; |
| 910 | mm->head_node.scanned_prev_free = 0; |
| 911 | mm->head_node.scanned_next_free = 0; |
| 912 | mm->head_node.mm = mm; |
| 913 | mm->head_node.start = start + size; |
| 914 | mm->head_node.size = start - mm->head_node.start; |
| 915 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); |
| 916 | |
Chris Wilson | 202b52b | 2016-08-03 16:04:09 +0100 | [diff] [blame] | 917 | mm->interval_tree = RB_ROOT; |
| 918 | |
Chris Wilson | 6b9d89b | 2012-07-10 11:15:23 +0100 | [diff] [blame] | 919 | mm->color_adjust = NULL; |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 920 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 921 | EXPORT_SYMBOL(drm_mm_init); |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 922 | |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 923 | /** |
| 924 | * drm_mm_takedown - clean up a drm_mm allocator |
| 925 | * @mm: drm_mm allocator to clean up |
| 926 | * |
| 927 | * Note that it is a bug to call this function on an allocator which is not |
| 928 | * clean. |
| 929 | */ |
Chris Wilson | 5705670 | 2016-10-31 09:08:06 +0000 | [diff] [blame^] | 930 | void drm_mm_takedown(struct drm_mm *mm) |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 931 | { |
Chris Wilson | 5705670 | 2016-10-31 09:08:06 +0000 | [diff] [blame^] | 932 | if (WARN(!list_empty(&mm->head_node.node_list), |
| 933 | "Memory manager not clean during takedown.\n")) |
| 934 | show_leaks(mm); |
| 935 | |
Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 936 | } |
Dave Airlie | f453ba0 | 2008-11-07 14:05:41 -0800 | [diff] [blame] | 937 | EXPORT_SYMBOL(drm_mm_takedown); |
Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 938 | |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 939 | static u64 drm_mm_debug_hole(struct drm_mm_node *entry, |
| 940 | const char *prefix) |
Daniel Vetter | 2c54b13 | 2013-07-01 22:01:02 +0200 | [diff] [blame] | 941 | { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 942 | u64 hole_start, hole_end, hole_size; |
Daniel Vetter | 2c54b13 | 2013-07-01 22:01:02 +0200 | [diff] [blame] | 943 | |
| 944 | if (entry->hole_follows) { |
| 945 | hole_start = drm_mm_hole_node_start(entry); |
| 946 | hole_end = drm_mm_hole_node_end(entry); |
| 947 | hole_size = hole_end - hole_start; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 948 | pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, |
| 949 | hole_end, hole_size); |
Daniel Vetter | 2c54b13 | 2013-07-01 22:01:02 +0200 | [diff] [blame] | 950 | return hole_size; |
| 951 | } |
| 952 | |
| 953 | return 0; |
| 954 | } |
| 955 | |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 956 | /** |
| 957 | * drm_mm_debug_table - dump allocator state to dmesg |
| 958 | * @mm: drm_mm allocator to dump |
| 959 | * @prefix: prefix to use for dumping to dmesg |
| 960 | */ |
Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 961 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
| 962 | { |
| 963 | struct drm_mm_node *entry; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 964 | u64 total_used = 0, total_free = 0, total = 0; |
Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 965 | |
Daniel Vetter | 2c54b13 | 2013-07-01 22:01:02 +0200 | [diff] [blame] | 966 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 967 | |
| 968 | drm_mm_for_each_node(entry, mm) { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 969 | pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, |
| 970 | entry->start + entry->size, entry->size); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 971 | total_used += entry->size; |
Daniel Vetter | 2c54b13 | 2013-07-01 22:01:02 +0200 | [diff] [blame] | 972 | total_free += drm_mm_debug_hole(entry, prefix); |
Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 973 | } |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 974 | total = total_free + total_used; |
| 975 | |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 976 | pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, |
| 977 | total_used, total_free); |
Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 978 | } |
| 979 | EXPORT_SYMBOL(drm_mm_debug_table); |
| 980 | |
Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 981 | #if defined(CONFIG_DEBUG_FS) |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 982 | static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
Daniel Vetter | 3a359f0 | 2013-04-20 12:08:11 +0200 | [diff] [blame] | 983 | { |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 984 | u64 hole_start, hole_end, hole_size; |
Daniel Vetter | 3a359f0 | 2013-04-20 12:08:11 +0200 | [diff] [blame] | 985 | |
| 986 | if (entry->hole_follows) { |
| 987 | hole_start = drm_mm_hole_node_start(entry); |
| 988 | hole_end = drm_mm_hole_node_end(entry); |
| 989 | hole_size = hole_end - hole_start; |
Russell King | 2f15791 | 2015-05-28 10:36:27 +0100 | [diff] [blame] | 990 | seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 991 | hole_end, hole_size); |
Daniel Vetter | 3a359f0 | 2013-04-20 12:08:11 +0200 | [diff] [blame] | 992 | return hole_size; |
| 993 | } |
| 994 | |
| 995 | return 0; |
| 996 | } |
| 997 | |
Daniel Vetter | e18c041 | 2014-01-23 00:39:13 +0100 | [diff] [blame] | 998 | /** |
| 999 | * drm_mm_dump_table - dump allocator state to a seq_file |
| 1000 | * @m: seq_file to dump to |
| 1001 | * @mm: drm_mm allocator to dump |
| 1002 | */ |
Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 1003 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
| 1004 | { |
| 1005 | struct drm_mm_node *entry; |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 1006 | u64 total_used = 0, total_free = 0, total = 0; |
Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 1007 | |
Daniel Vetter | 3a359f0 | 2013-04-20 12:08:11 +0200 | [diff] [blame] | 1008 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 1009 | |
| 1010 | drm_mm_for_each_node(entry, mm) { |
Russell King | 2f15791 | 2015-05-28 10:36:27 +0100 | [diff] [blame] | 1011 | seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 1012 | entry->start + entry->size, entry->size); |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 1013 | total_used += entry->size; |
Daniel Vetter | 3a359f0 | 2013-04-20 12:08:11 +0200 | [diff] [blame] | 1014 | total_free += drm_mm_dump_hole(m, entry); |
Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 1015 | } |
Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 1016 | total = total_free + total_used; |
| 1017 | |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 1018 | seq_printf(m, "total: %llu, used %llu free %llu\n", total, |
| 1019 | total_used, total_free); |
Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 1020 | return 0; |
| 1021 | } |
| 1022 | EXPORT_SYMBOL(drm_mm_dump_table); |
| 1023 | #endif |