blob: 276a7a27c1667db8e86e1b2671f16404abb67c9f [file] [log] [blame]
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +10001/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
Jan Engelhardt96de0e22007-10-19 23:21:04 +020041 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +100042 */
43
David Howells760285e2012-10-02 18:01:07 +010044#include <drm/drmP.h>
45#include <drm/drm_mm.h>
Thomas Hellstrom1d584202007-01-08 22:25:47 +110046#include <linux/slab.h>
Dave Airliefa8a1232009-08-26 13:13:37 +100047#include <linux/seq_file.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040048#include <linux/export.h>
Thomas Hellstrom1d584202007-01-08 22:25:47 +110049
Daniel Vetter93110be2014-01-23 00:31:48 +010050/**
51 * DOC: Overview
52 *
53 * drm_mm provides a simple range allocator. The drivers are free to use the
54 * resource allocator from the linux core if it suits them, the upside of drm_mm
55 * is that it's in the DRM core. Which means that it's easier to extend for
56 * some of the crazier special purpose needs of gpus.
57 *
58 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
59 * Drivers are free to embed either of them into their own suitable
60 * datastructures. drm_mm itself will not do any allocations of its own, so if
61 * drivers choose not to embed nodes they need to still allocate them
62 * themselves.
63 *
64 * The range allocator also supports reservation of preallocated blocks. This is
65 * useful for taking over initial mode setting configurations from the firmware,
66 * where an object needs to be created which exactly matches the firmware's
67 * scanout target. As long as the range is still free it can be inserted anytime
68 * after the allocator is initialized, which helps with avoiding looped
69 * depencies in the driver load sequence.
70 *
71 * drm_mm maintains a stack of most recently freed holes, which of all
72 * simplistic datastructures seems to be a fairly decent approach to clustering
73 * allocations and avoiding too much fragmentation. This means free space
74 * searches are O(num_holes). Given that all the fancy features drm_mm supports
75 * something better would be fairly complex and since gfx thrashing is a fairly
76 * steep cliff not a real concern. Removing a node again is O(1).
77 *
78 * drm_mm supports a few features: Alignment and range restrictions can be
79 * supplied. Further more every &drm_mm_node has a color value (which is just an
80 * opaqua unsigned long) which in conjunction with a driver callback can be used
81 * to implement sophisticated placement restrictions. The i915 DRM driver uses
82 * this to implement guard pages between incompatible caching domains in the
83 * graphics TT.
84 *
85 * Finally iteration helpers to walk all nodes and all holes are provided as are
86 * some basic allocator dumpers for debugging.
87 */
88
David Herrmannc700c672013-07-27 13:39:28 +020089static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
90 unsigned long size,
91 unsigned alignment,
92 unsigned long color,
93 enum drm_mm_search_flags flags);
94static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
95 unsigned long size,
96 unsigned alignment,
97 unsigned long color,
98 unsigned long start,
99 unsigned long end,
100 enum drm_mm_search_flags flags);
Jerome Glisse249d6042009-04-08 17:11:16 +0200101
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100102static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
103 struct drm_mm_node *node,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100104 unsigned long size, unsigned alignment,
105 unsigned long color)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000106{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100107 struct drm_mm *mm = hole_node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100108 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
109 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100110 unsigned long adj_start = hole_start;
111 unsigned long adj_end = hole_end;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100112
Chris Wilson9e8944a2012-11-15 11:32:17 +0000113 BUG_ON(node->allocated);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100114
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100115 if (mm->color_adjust)
116 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100117
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100118 if (alignment) {
119 unsigned tmp = adj_start % alignment;
120 if (tmp)
121 adj_start += alignment - tmp;
122 }
123
124 if (adj_start == hole_start) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100125 hole_node->hole_follows = 0;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100126 list_del(&hole_node->hole_stack);
127 }
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000128
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100129 node->start = adj_start;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100130 node->size = size;
131 node->mm = mm;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100132 node->color = color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100133 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100134
135 INIT_LIST_HEAD(&node->hole_stack);
136 list_add(&node->node_list, &hole_node->node_list);
137
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100138 BUG_ON(node->start + node->size > adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100139
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100140 node->hole_follows = 0;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000141 if (__drm_mm_hole_node_start(node) < hole_end) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100142 list_add(&node->hole_stack, &mm->hole_stack);
143 node->hole_follows = 1;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000144 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100145}
146
Ben Widawsky338710e2013-07-05 14:41:03 -0700147int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
Chris Wilson5973c7e2012-11-15 11:32:16 +0000148{
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700149 struct drm_mm_node *hole;
Ben Widawsky338710e2013-07-05 14:41:03 -0700150 unsigned long end = node->start + node->size;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000151 unsigned long hole_start;
152 unsigned long hole_end;
Chris Wilson5973c7e2012-11-15 11:32:16 +0000153
Ben Widawsky338710e2013-07-05 14:41:03 -0700154 BUG_ON(node == NULL);
155
156 /* Find the relevant hole to add our node to */
Chris Wilson9e8944a2012-11-15 11:32:17 +0000157 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
Ben Widawsky338710e2013-07-05 14:41:03 -0700158 if (hole_start > node->start || hole_end < end)
Chris Wilson5973c7e2012-11-15 11:32:16 +0000159 continue;
160
Chris Wilson5973c7e2012-11-15 11:32:16 +0000161 node->mm = mm;
162 node->allocated = 1;
163
164 INIT_LIST_HEAD(&node->hole_stack);
165 list_add(&node->node_list, &hole->node_list);
166
Ben Widawsky338710e2013-07-05 14:41:03 -0700167 if (node->start == hole_start) {
Chris Wilson5973c7e2012-11-15 11:32:16 +0000168 hole->hole_follows = 0;
169 list_del_init(&hole->hole_stack);
170 }
171
172 node->hole_follows = 0;
173 if (end != hole_end) {
174 list_add(&node->hole_stack, &mm->hole_stack);
175 node->hole_follows = 1;
176 }
177
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700178 return 0;
Chris Wilson5973c7e2012-11-15 11:32:16 +0000179 }
180
Ben Widawsky338710e2013-07-05 14:41:03 -0700181 WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
182 node->start, node->size);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700183 return -ENOSPC;
Chris Wilson5973c7e2012-11-15 11:32:16 +0000184}
Ben Widawsky338710e2013-07-05 14:41:03 -0700185EXPORT_SYMBOL(drm_mm_reserve_node);
Chris Wilson5973c7e2012-11-15 11:32:16 +0000186
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100187/**
188 * Search for free space and insert a preallocated memory node. Returns
189 * -ENOSPC if no suitable free area is available. The preallocated memory node
190 * must be cleared.
191 */
Chris Wilsonb8103452012-12-07 20:37:06 +0000192int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
193 unsigned long size, unsigned alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200194 unsigned long color,
195 enum drm_mm_search_flags flags)
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100196{
197 struct drm_mm_node *hole_node;
198
Chris Wilsonb8103452012-12-07 20:37:06 +0000199 hole_node = drm_mm_search_free_generic(mm, size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200200 color, flags);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100201 if (!hole_node)
202 return -ENOSPC;
203
Chris Wilsonb8103452012-12-07 20:37:06 +0000204 drm_mm_insert_helper(hole_node, node, size, alignment, color);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100205 return 0;
206}
Chris Wilsonb8103452012-12-07 20:37:06 +0000207EXPORT_SYMBOL(drm_mm_insert_node_generic);
208
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100209static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
210 struct drm_mm_node *node,
211 unsigned long size, unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100212 unsigned long color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100213 unsigned long start, unsigned long end)
Jerome Glissea2e68e92009-12-07 15:52:56 +0100214{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100215 struct drm_mm *mm = hole_node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100216 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
217 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100218 unsigned long adj_start = hole_start;
219 unsigned long adj_end = hole_end;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100220
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100221 BUG_ON(!hole_node->hole_follows || node->allocated);
222
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100223 if (adj_start < start)
224 adj_start = start;
Chris Wilson901593f2012-12-19 16:51:06 +0000225 if (adj_end > end)
226 adj_end = end;
227
228 if (mm->color_adjust)
229 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100230
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100231 if (alignment) {
232 unsigned tmp = adj_start % alignment;
233 if (tmp)
234 adj_start += alignment - tmp;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100235 }
236
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100237 if (adj_start == hole_start) {
238 hole_node->hole_follows = 0;
239 list_del(&hole_node->hole_stack);
240 }
241
242 node->start = adj_start;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100243 node->size = size;
244 node->mm = mm;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100245 node->color = color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100246 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100247
248 INIT_LIST_HEAD(&node->hole_stack);
249 list_add(&node->node_list, &hole_node->node_list);
250
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100251 BUG_ON(node->start + node->size > adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100252 BUG_ON(node->start + node->size > end);
253
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100254 node->hole_follows = 0;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000255 if (__drm_mm_hole_node_start(node) < hole_end) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100256 list_add(&node->hole_stack, &mm->hole_stack);
257 node->hole_follows = 1;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100258 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100259}
260
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100261/**
262 * Search for free space and insert a preallocated memory node. Returns
263 * -ENOSPC if no suitable free area is available. This is for range
264 * restricted allocations. The preallocated memory node must be cleared.
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000265 */
Chris Wilsonb8103452012-12-07 20:37:06 +0000266int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
267 unsigned long size, unsigned alignment, unsigned long color,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200268 unsigned long start, unsigned long end,
269 enum drm_mm_search_flags flags)
Chris Wilsonb8103452012-12-07 20:37:06 +0000270{
271 struct drm_mm_node *hole_node;
272
273 hole_node = drm_mm_search_free_in_range_generic(mm,
274 size, alignment, color,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200275 start, end, flags);
Chris Wilsonb8103452012-12-07 20:37:06 +0000276 if (!hole_node)
277 return -ENOSPC;
278
279 drm_mm_insert_helper_range(hole_node, node,
280 size, alignment, color,
281 start, end);
282 return 0;
283}
284EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
285
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100286/**
287 * Remove a memory node from the allocator.
288 */
289void drm_mm_remove_node(struct drm_mm_node *node)
290{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100291 struct drm_mm *mm = node->mm;
292 struct drm_mm_node *prev_node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000293
Ben Widawsky3ef80a82013-08-13 18:09:08 -0700294 if (WARN_ON(!node->allocated))
295 return;
296
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100297 BUG_ON(node->scanned_block || node->scanned_prev_free
298 || node->scanned_next_free);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000299
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100300 prev_node =
301 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100302
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100303 if (node->hole_follows) {
Chris Wilson9e8944a2012-11-15 11:32:17 +0000304 BUG_ON(__drm_mm_hole_node_start(node) ==
305 __drm_mm_hole_node_end(node));
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100306 list_del(&node->hole_stack);
307 } else
Chris Wilson9e8944a2012-11-15 11:32:17 +0000308 BUG_ON(__drm_mm_hole_node_start(node) !=
309 __drm_mm_hole_node_end(node));
310
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100311
312 if (!prev_node->hole_follows) {
313 prev_node->hole_follows = 1;
314 list_add(&prev_node->hole_stack, &mm->hole_stack);
315 } else
316 list_move(&prev_node->hole_stack, &mm->hole_stack);
317
318 list_del(&node->node_list);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100319 node->allocated = 0;
320}
321EXPORT_SYMBOL(drm_mm_remove_node);
322
Daniel Vetter75214732010-08-26 21:44:17 +0200323static int check_free_hole(unsigned long start, unsigned long end,
324 unsigned long size, unsigned alignment)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100325{
Daniel Vetter75214732010-08-26 21:44:17 +0200326 if (end - start < size)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100327 return 0;
328
329 if (alignment) {
Daniel Vetter75214732010-08-26 21:44:17 +0200330 unsigned tmp = start % alignment;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100331 if (tmp)
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100332 start += alignment - tmp;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100333 }
334
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100335 return end >= start + size;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100336}
337
David Herrmannc700c672013-07-27 13:39:28 +0200338static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
339 unsigned long size,
340 unsigned alignment,
341 unsigned long color,
342 enum drm_mm_search_flags flags)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000343{
Dave Airlie55910512007-07-11 16:53:40 +1000344 struct drm_mm_node *entry;
345 struct drm_mm_node *best;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000346 unsigned long adj_start;
347 unsigned long adj_end;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000348 unsigned long best_size;
349
Daniel Vetter709ea972010-07-02 15:02:16 +0100350 BUG_ON(mm->scanned_blocks);
351
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000352 best = NULL;
353 best_size = ~0UL;
354
Chris Wilson9e8944a2012-11-15 11:32:17 +0000355 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100356 if (mm->color_adjust) {
357 mm->color_adjust(entry, color, &adj_start, &adj_end);
358 if (adj_end <= adj_start)
359 continue;
360 }
361
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100362 if (!check_free_hole(adj_start, adj_end, size, alignment))
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100363 continue;
364
David Herrmann31e5d7c2013-07-27 13:36:27 +0200365 if (!(flags & DRM_MM_SEARCH_BEST))
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100366 return entry;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100367
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100368 if (entry->size < best_size) {
369 best = entry;
370 best_size = entry->size;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000371 }
372 }
373
374 return best;
375}
376
David Herrmannc700c672013-07-27 13:39:28 +0200377static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100378 unsigned long size,
379 unsigned alignment,
380 unsigned long color,
381 unsigned long start,
382 unsigned long end,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200383 enum drm_mm_search_flags flags)
Jerome Glissea2e68e92009-12-07 15:52:56 +0100384{
Jerome Glissea2e68e92009-12-07 15:52:56 +0100385 struct drm_mm_node *entry;
386 struct drm_mm_node *best;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000387 unsigned long adj_start;
388 unsigned long adj_end;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100389 unsigned long best_size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100390
Daniel Vetter709ea972010-07-02 15:02:16 +0100391 BUG_ON(mm->scanned_blocks);
392
Jerome Glissea2e68e92009-12-07 15:52:56 +0100393 best = NULL;
394 best_size = ~0UL;
395
Chris Wilson9e8944a2012-11-15 11:32:17 +0000396 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
397 if (adj_start < start)
398 adj_start = start;
399 if (adj_end > end)
400 adj_end = end;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100401
402 if (mm->color_adjust) {
403 mm->color_adjust(entry, color, &adj_start, &adj_end);
404 if (adj_end <= adj_start)
405 continue;
406 }
407
Daniel Vetter75214732010-08-26 21:44:17 +0200408 if (!check_free_hole(adj_start, adj_end, size, alignment))
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100409 continue;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100410
David Herrmann31e5d7c2013-07-27 13:36:27 +0200411 if (!(flags & DRM_MM_SEARCH_BEST))
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100412 return entry;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100413
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100414 if (entry->size < best_size) {
415 best = entry;
416 best_size = entry->size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100417 }
418 }
419
420 return best;
421}
Jerome Glissea2e68e92009-12-07 15:52:56 +0100422
Daniel Vetter709ea972010-07-02 15:02:16 +0100423/**
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100424 * Moves an allocation. To be used with embedded struct drm_mm_node.
425 */
426void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
427{
428 list_replace(&old->node_list, &new->node_list);
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200429 list_replace(&old->hole_stack, &new->hole_stack);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100430 new->hole_follows = old->hole_follows;
431 new->mm = old->mm;
432 new->start = old->start;
433 new->size = old->size;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100434 new->color = old->color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100435
436 old->allocated = 0;
437 new->allocated = 1;
438}
439EXPORT_SYMBOL(drm_mm_replace_node);
440
441/**
Daniel Vetter93110be2014-01-23 00:31:48 +0100442 * DOC: lru scan roaster
443 *
444 * Very often GPUs need to have continuous allocations for a given object. When
445 * evicting objects to make space for a new one it is therefore not most
446 * efficient when we simply start to select all objects from the tail of an LRU
447 * until there's a suitable hole: Especially for big objects or nodes that
448 * otherwise have special allocation constraints there's a good chance we evict
449 * lots of (smaller) objects unecessarily.
450 *
451 * The DRM range allocator supports this use-case through the scanning
452 * interfaces. First a scan operation needs to be initialized with
453 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
454 * objects to the roaster (probably by walking an LRU list, but this can be
455 * freely implemented) until a suitable hole is found or there's no further
456 * evitable object.
457 *
458 * The the driver must walk through all objects again in exactly the reverse
459 * order to restore the allocator state. Note that while the allocator is used
460 * in the scan mode no other operation is allowed.
461 *
462 * Finally the driver evicts all objects selected in the scan. Adding and
463 * removing an object is O(1), and since freeing a node is also O(1) the overall
464 * complexity is O(scanned_objects). So like the free stack which needs to be
465 * walked before a scan operation even begins this is linear in the number of
466 * objects. It doesn't seem to hurt badly.
467 */
468
469/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100470 * Initializa lru scanning.
471 *
472 * This simply sets up the scanning routines with the parameters for the desired
473 * hole.
474 *
475 * Warning: As long as the scan list is non-empty, no other operations than
476 * adding/removing nodes to/from the scan list are allowed.
477 */
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100478void drm_mm_init_scan(struct drm_mm *mm,
479 unsigned long size,
480 unsigned alignment,
481 unsigned long color)
Daniel Vetter709ea972010-07-02 15:02:16 +0100482{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100483 mm->scan_color = color;
Daniel Vetter709ea972010-07-02 15:02:16 +0100484 mm->scan_alignment = alignment;
485 mm->scan_size = size;
486 mm->scanned_blocks = 0;
487 mm->scan_hit_start = 0;
Chris Wilson901593f2012-12-19 16:51:06 +0000488 mm->scan_hit_end = 0;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200489 mm->scan_check_range = 0;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100490 mm->prev_scanned_node = NULL;
Daniel Vetter709ea972010-07-02 15:02:16 +0100491}
492EXPORT_SYMBOL(drm_mm_init_scan);
493
494/**
Daniel Vetterd935cc62010-09-16 15:13:11 +0200495 * Initializa lru scanning.
496 *
497 * This simply sets up the scanning routines with the parameters for the desired
498 * hole. This version is for range-restricted scans.
499 *
500 * Warning: As long as the scan list is non-empty, no other operations than
501 * adding/removing nodes to/from the scan list are allowed.
502 */
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100503void drm_mm_init_scan_with_range(struct drm_mm *mm,
504 unsigned long size,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200505 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100506 unsigned long color,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200507 unsigned long start,
508 unsigned long end)
509{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100510 mm->scan_color = color;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200511 mm->scan_alignment = alignment;
512 mm->scan_size = size;
513 mm->scanned_blocks = 0;
514 mm->scan_hit_start = 0;
Chris Wilson901593f2012-12-19 16:51:06 +0000515 mm->scan_hit_end = 0;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200516 mm->scan_start = start;
517 mm->scan_end = end;
518 mm->scan_check_range = 1;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100519 mm->prev_scanned_node = NULL;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200520}
521EXPORT_SYMBOL(drm_mm_init_scan_with_range);
522
523/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100524 * Add a node to the scan list that might be freed to make space for the desired
525 * hole.
526 *
527 * Returns non-zero, if a hole has been found, zero otherwise.
528 */
529int drm_mm_scan_add_block(struct drm_mm_node *node)
530{
531 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100532 struct drm_mm_node *prev_node;
533 unsigned long hole_start, hole_end;
Chris Wilson901593f2012-12-19 16:51:06 +0000534 unsigned long adj_start, adj_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100535
536 mm->scanned_blocks++;
537
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100538 BUG_ON(node->scanned_block);
Daniel Vetter709ea972010-07-02 15:02:16 +0100539 node->scanned_block = 1;
Daniel Vetter709ea972010-07-02 15:02:16 +0100540
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100541 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
542 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100543
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100544 node->scanned_preceeds_hole = prev_node->hole_follows;
545 prev_node->hole_follows = 1;
546 list_del(&node->node_list);
547 node->node_list.prev = &prev_node->node_list;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100548 node->node_list.next = &mm->prev_scanned_node->node_list;
549 mm->prev_scanned_node = node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100550
Chris Wilson901593f2012-12-19 16:51:06 +0000551 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
552 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100553
Daniel Vetterd935cc62010-09-16 15:13:11 +0200554 if (mm->scan_check_range) {
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100555 if (adj_start < mm->scan_start)
556 adj_start = mm->scan_start;
557 if (adj_end > mm->scan_end)
558 adj_end = mm->scan_end;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200559 }
560
Chris Wilson901593f2012-12-19 16:51:06 +0000561 if (mm->color_adjust)
562 mm->color_adjust(prev_node, mm->scan_color,
563 &adj_start, &adj_end);
564
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100565 if (check_free_hole(adj_start, adj_end,
Daniel Vetter75214732010-08-26 21:44:17 +0200566 mm->scan_size, mm->scan_alignment)) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100567 mm->scan_hit_start = hole_start;
Chris Wilson901593f2012-12-19 16:51:06 +0000568 mm->scan_hit_end = hole_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100569 return 1;
570 }
571
572 return 0;
573}
574EXPORT_SYMBOL(drm_mm_scan_add_block);
575
576/**
577 * Remove a node from the scan list.
578 *
579 * Nodes _must_ be removed in the exact same order from the scan list as they
580 * have been added, otherwise the internal state of the memory manager will be
581 * corrupted.
582 *
583 * When the scan list is empty, the selected memory nodes can be freed. An
David Herrmann31e5d7c2013-07-27 13:36:27 +0200584 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
585 * return the just freed block (because its at the top of the free_stack list).
Daniel Vetter709ea972010-07-02 15:02:16 +0100586 *
587 * Returns one if this block should be evicted, zero otherwise. Will always
588 * return zero when no hole has been found.
589 */
590int drm_mm_scan_remove_block(struct drm_mm_node *node)
591{
592 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100593 struct drm_mm_node *prev_node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100594
595 mm->scanned_blocks--;
596
597 BUG_ON(!node->scanned_block);
598 node->scanned_block = 0;
Daniel Vetter709ea972010-07-02 15:02:16 +0100599
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100600 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
601 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100602
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100603 prev_node->hole_follows = node->scanned_preceeds_hole;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100604 list_add(&node->node_list, &prev_node->node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100605
Chris Wilson901593f2012-12-19 16:51:06 +0000606 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
607 node->start < mm->scan_hit_end);
Daniel Vetter709ea972010-07-02 15:02:16 +0100608}
609EXPORT_SYMBOL(drm_mm_scan_remove_block);
610
Dave Airlie55910512007-07-11 16:53:40 +1000611int drm_mm_clean(struct drm_mm * mm)
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100612{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100613 struct list_head *head = &mm->head_node.node_list;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100614
615 return (head->next->next == head);
616}
Jerome Glisse249d6042009-04-08 17:11:16 +0200617EXPORT_SYMBOL(drm_mm_clean);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100618
David Herrmann77ef8bb2013-07-01 20:32:58 +0200619void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000620{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100621 INIT_LIST_HEAD(&mm->hole_stack);
Daniel Vetter709ea972010-07-02 15:02:16 +0100622 mm->scanned_blocks = 0;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000623
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100624 /* Clever trick to avoid a special case in the free hole tracking. */
625 INIT_LIST_HEAD(&mm->head_node.node_list);
626 INIT_LIST_HEAD(&mm->head_node.hole_stack);
627 mm->head_node.hole_follows = 1;
628 mm->head_node.scanned_block = 0;
629 mm->head_node.scanned_prev_free = 0;
630 mm->head_node.scanned_next_free = 0;
631 mm->head_node.mm = mm;
632 mm->head_node.start = start + size;
633 mm->head_node.size = start - mm->head_node.start;
634 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
635
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100636 mm->color_adjust = NULL;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000637}
Eric Anholt673a3942008-07-30 12:06:12 -0700638EXPORT_SYMBOL(drm_mm_init);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000639
Dave Airlie55910512007-07-11 16:53:40 +1000640void drm_mm_takedown(struct drm_mm * mm)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000641{
David Herrmannc700c672013-07-27 13:39:28 +0200642 WARN(!list_empty(&mm->head_node.node_list),
643 "Memory manager not clean during takedown.\n");
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000644}
Dave Airlief453ba02008-11-07 14:05:41 -0800645EXPORT_SYMBOL(drm_mm_takedown);
Dave Airliefa8a1232009-08-26 13:13:37 +1000646
Daniel Vetter2c54b132013-07-01 22:01:02 +0200647static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
648 const char *prefix)
649{
650 unsigned long hole_start, hole_end, hole_size;
651
652 if (entry->hole_follows) {
653 hole_start = drm_mm_hole_node_start(entry);
654 hole_end = drm_mm_hole_node_end(entry);
655 hole_size = hole_end - hole_start;
656 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
657 prefix, hole_start, hole_end,
658 hole_size);
659 return hole_size;
660 }
661
662 return 0;
663}
664
Jerome Glisse99d7e482009-12-09 21:55:09 +0100665void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
666{
667 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100668 unsigned long total_used = 0, total_free = 0, total = 0;
Jerome Glisse99d7e482009-12-09 21:55:09 +0100669
Daniel Vetter2c54b132013-07-01 22:01:02 +0200670 total_free += drm_mm_debug_hole(&mm->head_node, prefix);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100671
672 drm_mm_for_each_node(entry, mm) {
673 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
Jerome Glisse99d7e482009-12-09 21:55:09 +0100674 prefix, entry->start, entry->start + entry->size,
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100675 entry->size);
676 total_used += entry->size;
Daniel Vetter2c54b132013-07-01 22:01:02 +0200677 total_free += drm_mm_debug_hole(entry, prefix);
Jerome Glisse99d7e482009-12-09 21:55:09 +0100678 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100679 total = total_free + total_used;
680
681 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
Jerome Glisse99d7e482009-12-09 21:55:09 +0100682 total_used, total_free);
683}
684EXPORT_SYMBOL(drm_mm_debug_table);
685
Dave Airliefa8a1232009-08-26 13:13:37 +1000686#if defined(CONFIG_DEBUG_FS)
Daniel Vetter3a359f02013-04-20 12:08:11 +0200687static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
688{
689 unsigned long hole_start, hole_end, hole_size;
690
691 if (entry->hole_follows) {
692 hole_start = drm_mm_hole_node_start(entry);
693 hole_end = drm_mm_hole_node_end(entry);
694 hole_size = hole_end - hole_start;
695 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
696 hole_start, hole_end, hole_size);
697 return hole_size;
698 }
699
700 return 0;
701}
702
Dave Airliefa8a1232009-08-26 13:13:37 +1000703int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
704{
705 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100706 unsigned long total_used = 0, total_free = 0, total = 0;
Dave Airliefa8a1232009-08-26 13:13:37 +1000707
Daniel Vetter3a359f02013-04-20 12:08:11 +0200708 total_free += drm_mm_dump_hole(m, &mm->head_node);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100709
710 drm_mm_for_each_node(entry, mm) {
711 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
712 entry->start, entry->start + entry->size,
713 entry->size);
714 total_used += entry->size;
Daniel Vetter3a359f02013-04-20 12:08:11 +0200715 total_free += drm_mm_dump_hole(m, entry);
Dave Airliefa8a1232009-08-26 13:13:37 +1000716 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100717 total = total_free + total_used;
718
719 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
Dave Airliefa8a1232009-08-26 13:13:37 +1000720 return 0;
721}
722EXPORT_SYMBOL(drm_mm_dump_table);
723#endif