blob: 0761a03cdbb2d4f0af455097d1c09d3710c2749f [file] [log] [blame]
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +10001/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
Jan Engelhardt96de0e22007-10-19 23:21:04 +020041 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +100042 */
43
David Howells760285e2012-10-02 18:01:07 +010044#include <drm/drmP.h>
45#include <drm/drm_mm.h>
Thomas Hellstrom1d584202007-01-08 22:25:47 +110046#include <linux/slab.h>
Dave Airliefa8a1232009-08-26 13:13:37 +100047#include <linux/seq_file.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040048#include <linux/export.h>
Thomas Hellstrom1d584202007-01-08 22:25:47 +110049
Jerome Glisse249d6042009-04-08 17:11:16 +020050#define MM_UNUSED_TARGET 4
51
Jerome Glisse249d6042009-04-08 17:11:16 +020052static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
Thomas Hellstrom1d584202007-01-08 22:25:47 +110053{
Dave Airlie55910512007-07-11 16:53:40 +100054 struct drm_mm_node *child;
Thomas Hellstrom1d584202007-01-08 22:25:47 +110055
Jerome Glisse249d6042009-04-08 17:11:16 +020056 if (atomic)
Daniel Vetter709ea972010-07-02 15:02:16 +010057 child = kzalloc(sizeof(*child), GFP_ATOMIC);
Jerome Glisse249d6042009-04-08 17:11:16 +020058 else
Daniel Vetter709ea972010-07-02 15:02:16 +010059 child = kzalloc(sizeof(*child), GFP_KERNEL);
Jerome Glisse249d6042009-04-08 17:11:16 +020060
61 if (unlikely(child == NULL)) {
62 spin_lock(&mm->unused_lock);
63 if (list_empty(&mm->unused_nodes))
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010068 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
Jerome Glisse249d6042009-04-08 17:11:16 +020070 --mm->num_unused;
71 }
72 spin_unlock(&mm->unused_lock);
73 }
74 return child;
75}
76
Jerome Glissea698cf32009-11-13 20:56:58 +010077/* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
79 *
80 * Returns 0 on success or -ENOMEM if allocation fails.
81 */
Jerome Glisse249d6042009-04-08 17:11:16 +020082int drm_mm_pre_get(struct drm_mm *mm)
83{
84 struct drm_mm_node *node;
85
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
Daniel Vetter709ea972010-07-02 15:02:16 +010089 node = kzalloc(sizeof(*node), GFP_KERNEL);
Jerome Glisse249d6042009-04-08 17:11:16 +020090 spin_lock(&mm->unused_lock);
91
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
96 }
97 ++mm->num_unused;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010098 list_add_tail(&node->node_list, &mm->unused_nodes);
Jerome Glisse249d6042009-04-08 17:11:16 +020099 }
100 spin_unlock(&mm->unused_lock);
101 return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
104
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
Jerome Glisse249d6042009-04-08 17:11:16 +0200106{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100107 return hole_node->start + hole_node->size;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100108}
109
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100111{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100112 struct drm_mm_node *next_node =
113 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 node_list);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100115
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100116 return next_node->start;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100117}
118
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100119static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100121 unsigned long size, unsigned alignment,
122 unsigned long color)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000123{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100124 struct drm_mm *mm = hole_node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100125 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100127 unsigned long adj_start = hole_start;
128 unsigned long adj_end = hole_end;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100129
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100130 BUG_ON(!hole_node->hole_follows || node->allocated);
131
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100132 if (mm->color_adjust)
133 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100134
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100135 if (alignment) {
136 unsigned tmp = adj_start % alignment;
137 if (tmp)
138 adj_start += alignment - tmp;
139 }
140
141 if (adj_start == hole_start) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100142 hole_node->hole_follows = 0;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100143 list_del(&hole_node->hole_stack);
144 }
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000145
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100146 node->start = adj_start;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100147 node->size = size;
148 node->mm = mm;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100149 node->color = color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100150 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100151
152 INIT_LIST_HEAD(&node->hole_stack);
153 list_add(&node->node_list, &hole_node->node_list);
154
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100155 BUG_ON(node->start + node->size > adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100156
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100157 node->hole_follows = 0;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100158 if (node->start + node->size < hole_end) {
159 list_add(&node->hole_stack, &mm->hole_stack);
160 node->hole_follows = 1;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000161 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100162}
163
164struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
165 unsigned long size,
166 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100167 unsigned long color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100168 int atomic)
169{
170 struct drm_mm_node *node;
171
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100172 node = drm_mm_kmalloc(hole_node->mm, atomic);
173 if (unlikely(node == NULL))
174 return NULL;
175
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100176 drm_mm_insert_helper(hole_node, node, size, alignment, color);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100177
Chris Wilsone6c03c52009-05-22 14:14:22 +0100178 return node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000179}
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200180EXPORT_SYMBOL(drm_mm_get_block_generic);
Jerome Glisse249d6042009-04-08 17:11:16 +0200181
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100182/**
183 * Search for free space and insert a preallocated memory node. Returns
184 * -ENOSPC if no suitable free area is available. The preallocated memory node
185 * must be cleared.
186 */
187int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
188 unsigned long size, unsigned alignment)
189{
190 struct drm_mm_node *hole_node;
191
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100192 hole_node = drm_mm_search_free(mm, size, alignment, false);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100193 if (!hole_node)
194 return -ENOSPC;
195
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100196 drm_mm_insert_helper(hole_node, node, size, alignment, 0);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100197
198 return 0;
199}
200EXPORT_SYMBOL(drm_mm_insert_node);
201
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100202static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
203 struct drm_mm_node *node,
204 unsigned long size, unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100205 unsigned long color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100206 unsigned long start, unsigned long end)
Jerome Glissea2e68e92009-12-07 15:52:56 +0100207{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100208 struct drm_mm *mm = hole_node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100209 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
210 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100211 unsigned long adj_start = hole_start;
212 unsigned long adj_end = hole_end;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100213
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100214 BUG_ON(!hole_node->hole_follows || node->allocated);
215
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100216 if (mm->color_adjust)
217 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100218
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100219 if (adj_start < start)
220 adj_start = start;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100221
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100222 if (alignment) {
223 unsigned tmp = adj_start % alignment;
224 if (tmp)
225 adj_start += alignment - tmp;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100226 }
227
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100228 if (adj_start == hole_start) {
229 hole_node->hole_follows = 0;
230 list_del(&hole_node->hole_stack);
231 }
232
233 node->start = adj_start;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100234 node->size = size;
235 node->mm = mm;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100236 node->color = color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100237 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100238
239 INIT_LIST_HEAD(&node->hole_stack);
240 list_add(&node->node_list, &hole_node->node_list);
241
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100242 BUG_ON(node->start + node->size > adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100243 BUG_ON(node->start + node->size > end);
244
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100245 node->hole_follows = 0;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100246 if (node->start + node->size < hole_end) {
247 list_add(&node->hole_stack, &mm->hole_stack);
248 node->hole_follows = 1;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100249 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100250}
251
252struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
253 unsigned long size,
254 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100255 unsigned long color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100256 unsigned long start,
257 unsigned long end,
258 int atomic)
259{
260 struct drm_mm_node *node;
261
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100262 node = drm_mm_kmalloc(hole_node->mm, atomic);
263 if (unlikely(node == NULL))
264 return NULL;
265
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100266 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100267 start, end);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100268
Jerome Glissea2e68e92009-12-07 15:52:56 +0100269 return node;
270}
271EXPORT_SYMBOL(drm_mm_get_block_range_generic);
272
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100273/**
274 * Search for free space and insert a preallocated memory node. Returns
275 * -ENOSPC if no suitable free area is available. This is for range
276 * restricted allocations. The preallocated memory node must be cleared.
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000277 */
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100278int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
279 unsigned long size, unsigned alignment,
280 unsigned long start, unsigned long end)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000281{
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100282 struct drm_mm_node *hole_node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000283
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100284 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100285 start, end, false);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100286 if (!hole_node)
287 return -ENOSPC;
288
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100289 drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100290 start, end);
291
292 return 0;
293}
294EXPORT_SYMBOL(drm_mm_insert_node_in_range);
295
296/**
297 * Remove a memory node from the allocator.
298 */
299void drm_mm_remove_node(struct drm_mm_node *node)
300{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100301 struct drm_mm *mm = node->mm;
302 struct drm_mm_node *prev_node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000303
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100304 BUG_ON(node->scanned_block || node->scanned_prev_free
305 || node->scanned_next_free);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000306
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100307 prev_node =
308 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100309
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100310 if (node->hole_follows) {
311 BUG_ON(drm_mm_hole_node_start(node)
312 == drm_mm_hole_node_end(node));
313 list_del(&node->hole_stack);
314 } else
315 BUG_ON(drm_mm_hole_node_start(node)
316 != drm_mm_hole_node_end(node));
317
318 if (!prev_node->hole_follows) {
319 prev_node->hole_follows = 1;
320 list_add(&prev_node->hole_stack, &mm->hole_stack);
321 } else
322 list_move(&prev_node->hole_stack, &mm->hole_stack);
323
324 list_del(&node->node_list);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100325 node->allocated = 0;
326}
327EXPORT_SYMBOL(drm_mm_remove_node);
328
329/*
330 * Remove a memory node from the allocator and free the allocated struct
331 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
332 * drm_mm_get_block functions.
333 */
334void drm_mm_put_block(struct drm_mm_node *node)
335{
336
337 struct drm_mm *mm = node->mm;
338
339 drm_mm_remove_node(node);
340
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100341 spin_lock(&mm->unused_lock);
342 if (mm->num_unused < MM_UNUSED_TARGET) {
343 list_add(&node->node_list, &mm->unused_nodes);
344 ++mm->num_unused;
345 } else
346 kfree(node);
347 spin_unlock(&mm->unused_lock);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000348}
Eric Anholt673a3942008-07-30 12:06:12 -0700349EXPORT_SYMBOL(drm_mm_put_block);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000350
Daniel Vetter75214732010-08-26 21:44:17 +0200351static int check_free_hole(unsigned long start, unsigned long end,
352 unsigned long size, unsigned alignment)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100353{
Daniel Vetter75214732010-08-26 21:44:17 +0200354 if (end - start < size)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100355 return 0;
356
357 if (alignment) {
Daniel Vetter75214732010-08-26 21:44:17 +0200358 unsigned tmp = start % alignment;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100359 if (tmp)
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100360 start += alignment - tmp;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100361 }
362
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100363 return end >= start + size;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100364}
365
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100366struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
367 unsigned long size,
368 unsigned alignment,
369 unsigned long color,
370 bool best_match)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000371{
Dave Airlie55910512007-07-11 16:53:40 +1000372 struct drm_mm_node *entry;
373 struct drm_mm_node *best;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000374 unsigned long best_size;
375
Daniel Vetter709ea972010-07-02 15:02:16 +0100376 BUG_ON(mm->scanned_blocks);
377
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000378 best = NULL;
379 best_size = ~0UL;
380
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100381 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100382 unsigned long adj_start = drm_mm_hole_node_start(entry);
383 unsigned long adj_end = drm_mm_hole_node_end(entry);
384
385 if (mm->color_adjust) {
386 mm->color_adjust(entry, color, &adj_start, &adj_end);
387 if (adj_end <= adj_start)
388 continue;
389 }
390
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100391 BUG_ON(!entry->hole_follows);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100392 if (!check_free_hole(adj_start, adj_end, size, alignment))
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100393 continue;
394
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100395 if (!best_match)
396 return entry;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100397
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100398 if (entry->size < best_size) {
399 best = entry;
400 best_size = entry->size;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000401 }
402 }
403
404 return best;
405}
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100406EXPORT_SYMBOL(drm_mm_search_free_generic);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000407
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100408struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
409 unsigned long size,
410 unsigned alignment,
411 unsigned long color,
412 unsigned long start,
413 unsigned long end,
414 bool best_match)
Jerome Glissea2e68e92009-12-07 15:52:56 +0100415{
Jerome Glissea2e68e92009-12-07 15:52:56 +0100416 struct drm_mm_node *entry;
417 struct drm_mm_node *best;
418 unsigned long best_size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100419
Daniel Vetter709ea972010-07-02 15:02:16 +0100420 BUG_ON(mm->scanned_blocks);
421
Jerome Glissea2e68e92009-12-07 15:52:56 +0100422 best = NULL;
423 best_size = ~0UL;
424
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100425 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
426 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
427 start : drm_mm_hole_node_start(entry);
428 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
429 end : drm_mm_hole_node_end(entry);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100430
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100431 BUG_ON(!entry->hole_follows);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100432
433 if (mm->color_adjust) {
434 mm->color_adjust(entry, color, &adj_start, &adj_end);
435 if (adj_end <= adj_start)
436 continue;
437 }
438
Daniel Vetter75214732010-08-26 21:44:17 +0200439 if (!check_free_hole(adj_start, adj_end, size, alignment))
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100440 continue;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100441
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100442 if (!best_match)
443 return entry;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100444
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100445 if (entry->size < best_size) {
446 best = entry;
447 best_size = entry->size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100448 }
449 }
450
451 return best;
452}
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100453EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100454
Daniel Vetter709ea972010-07-02 15:02:16 +0100455/**
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100456 * Moves an allocation. To be used with embedded struct drm_mm_node.
457 */
458void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
459{
460 list_replace(&old->node_list, &new->node_list);
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200461 list_replace(&old->hole_stack, &new->hole_stack);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100462 new->hole_follows = old->hole_follows;
463 new->mm = old->mm;
464 new->start = old->start;
465 new->size = old->size;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100466 new->color = old->color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100467
468 old->allocated = 0;
469 new->allocated = 1;
470}
471EXPORT_SYMBOL(drm_mm_replace_node);
472
473/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100474 * Initializa lru scanning.
475 *
476 * This simply sets up the scanning routines with the parameters for the desired
477 * hole.
478 *
479 * Warning: As long as the scan list is non-empty, no other operations than
480 * adding/removing nodes to/from the scan list are allowed.
481 */
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100482void drm_mm_init_scan(struct drm_mm *mm,
483 unsigned long size,
484 unsigned alignment,
485 unsigned long color)
Daniel Vetter709ea972010-07-02 15:02:16 +0100486{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100487 mm->scan_color = color;
Daniel Vetter709ea972010-07-02 15:02:16 +0100488 mm->scan_alignment = alignment;
489 mm->scan_size = size;
490 mm->scanned_blocks = 0;
491 mm->scan_hit_start = 0;
492 mm->scan_hit_size = 0;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200493 mm->scan_check_range = 0;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100494 mm->prev_scanned_node = NULL;
Daniel Vetter709ea972010-07-02 15:02:16 +0100495}
496EXPORT_SYMBOL(drm_mm_init_scan);
497
498/**
Daniel Vetterd935cc62010-09-16 15:13:11 +0200499 * Initializa lru scanning.
500 *
501 * This simply sets up the scanning routines with the parameters for the desired
502 * hole. This version is for range-restricted scans.
503 *
504 * Warning: As long as the scan list is non-empty, no other operations than
505 * adding/removing nodes to/from the scan list are allowed.
506 */
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100507void drm_mm_init_scan_with_range(struct drm_mm *mm,
508 unsigned long size,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200509 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100510 unsigned long color,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200511 unsigned long start,
512 unsigned long end)
513{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100514 mm->scan_color = color;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200515 mm->scan_alignment = alignment;
516 mm->scan_size = size;
517 mm->scanned_blocks = 0;
518 mm->scan_hit_start = 0;
519 mm->scan_hit_size = 0;
520 mm->scan_start = start;
521 mm->scan_end = end;
522 mm->scan_check_range = 1;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100523 mm->prev_scanned_node = NULL;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200524}
525EXPORT_SYMBOL(drm_mm_init_scan_with_range);
526
527/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100528 * Add a node to the scan list that might be freed to make space for the desired
529 * hole.
530 *
531 * Returns non-zero, if a hole has been found, zero otherwise.
532 */
533int drm_mm_scan_add_block(struct drm_mm_node *node)
534{
535 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100536 struct drm_mm_node *prev_node;
537 unsigned long hole_start, hole_end;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200538 unsigned long adj_start;
539 unsigned long adj_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100540
541 mm->scanned_blocks++;
542
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100543 BUG_ON(node->scanned_block);
Daniel Vetter709ea972010-07-02 15:02:16 +0100544 node->scanned_block = 1;
Daniel Vetter709ea972010-07-02 15:02:16 +0100545
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100546 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
547 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100548
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100549 node->scanned_preceeds_hole = prev_node->hole_follows;
550 prev_node->hole_follows = 1;
551 list_del(&node->node_list);
552 node->node_list.prev = &prev_node->node_list;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100553 node->node_list.next = &mm->prev_scanned_node->node_list;
554 mm->prev_scanned_node = node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100555
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100556 hole_start = drm_mm_hole_node_start(prev_node);
557 hole_end = drm_mm_hole_node_end(prev_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100558
559 adj_start = hole_start;
560 adj_end = hole_end;
561
562 if (mm->color_adjust)
563 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
564
Daniel Vetterd935cc62010-09-16 15:13:11 +0200565 if (mm->scan_check_range) {
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100566 if (adj_start < mm->scan_start)
567 adj_start = mm->scan_start;
568 if (adj_end > mm->scan_end)
569 adj_end = mm->scan_end;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200570 }
571
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100572 if (check_free_hole(adj_start, adj_end,
Daniel Vetter75214732010-08-26 21:44:17 +0200573 mm->scan_size, mm->scan_alignment)) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100574 mm->scan_hit_start = hole_start;
575 mm->scan_hit_size = hole_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100576
577 return 1;
578 }
579
580 return 0;
581}
582EXPORT_SYMBOL(drm_mm_scan_add_block);
583
584/**
585 * Remove a node from the scan list.
586 *
587 * Nodes _must_ be removed in the exact same order from the scan list as they
588 * have been added, otherwise the internal state of the memory manager will be
589 * corrupted.
590 *
591 * When the scan list is empty, the selected memory nodes can be freed. An
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300592 * immediately following drm_mm_search_free with best_match = 0 will then return
Daniel Vetter709ea972010-07-02 15:02:16 +0100593 * the just freed block (because its at the top of the free_stack list).
594 *
595 * Returns one if this block should be evicted, zero otherwise. Will always
596 * return zero when no hole has been found.
597 */
598int drm_mm_scan_remove_block(struct drm_mm_node *node)
599{
600 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100601 struct drm_mm_node *prev_node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100602
603 mm->scanned_blocks--;
604
605 BUG_ON(!node->scanned_block);
606 node->scanned_block = 0;
Daniel Vetter709ea972010-07-02 15:02:16 +0100607
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100608 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
609 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100610
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100611 prev_node->hole_follows = node->scanned_preceeds_hole;
612 INIT_LIST_HEAD(&node->node_list);
613 list_add(&node->node_list, &prev_node->node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100614
615 /* Only need to check for containement because start&size for the
616 * complete resulting free block (not just the desired part) is
617 * stored. */
618 if (node->start >= mm->scan_hit_start &&
619 node->start + node->size
620 <= mm->scan_hit_start + mm->scan_hit_size) {
621 return 1;
622 }
623
624 return 0;
625}
626EXPORT_SYMBOL(drm_mm_scan_remove_block);
627
Dave Airlie55910512007-07-11 16:53:40 +1000628int drm_mm_clean(struct drm_mm * mm)
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100629{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100630 struct list_head *head = &mm->head_node.node_list;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100631
632 return (head->next->next == head);
633}
Jerome Glisse249d6042009-04-08 17:11:16 +0200634EXPORT_SYMBOL(drm_mm_clean);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100635
Dave Airlie55910512007-07-11 16:53:40 +1000636int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000637{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100638 INIT_LIST_HEAD(&mm->hole_stack);
Jerome Glisse249d6042009-04-08 17:11:16 +0200639 INIT_LIST_HEAD(&mm->unused_nodes);
640 mm->num_unused = 0;
Daniel Vetter709ea972010-07-02 15:02:16 +0100641 mm->scanned_blocks = 0;
Jerome Glisse249d6042009-04-08 17:11:16 +0200642 spin_lock_init(&mm->unused_lock);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000643
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100644 /* Clever trick to avoid a special case in the free hole tracking. */
645 INIT_LIST_HEAD(&mm->head_node.node_list);
646 INIT_LIST_HEAD(&mm->head_node.hole_stack);
647 mm->head_node.hole_follows = 1;
648 mm->head_node.scanned_block = 0;
649 mm->head_node.scanned_prev_free = 0;
650 mm->head_node.scanned_next_free = 0;
651 mm->head_node.mm = mm;
652 mm->head_node.start = start + size;
653 mm->head_node.size = start - mm->head_node.start;
654 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
655
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100656 mm->color_adjust = NULL;
657
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100658 return 0;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000659}
Eric Anholt673a3942008-07-30 12:06:12 -0700660EXPORT_SYMBOL(drm_mm_init);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000661
Dave Airlie55910512007-07-11 16:53:40 +1000662void drm_mm_takedown(struct drm_mm * mm)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000663{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100664 struct drm_mm_node *entry, *next;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000665
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100666 if (!list_empty(&mm->head_node.node_list)) {
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000667 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
668 return;
669 }
670
Jerome Glisse249d6042009-04-08 17:11:16 +0200671 spin_lock(&mm->unused_lock);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100672 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
673 list_del(&entry->node_list);
Jerome Glisse249d6042009-04-08 17:11:16 +0200674 kfree(entry);
675 --mm->num_unused;
676 }
677 spin_unlock(&mm->unused_lock);
678
679 BUG_ON(mm->num_unused != 0);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000680}
Dave Airlief453ba02008-11-07 14:05:41 -0800681EXPORT_SYMBOL(drm_mm_takedown);
Dave Airliefa8a1232009-08-26 13:13:37 +1000682
Jerome Glisse99d7e482009-12-09 21:55:09 +0100683void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
684{
685 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100686 unsigned long total_used = 0, total_free = 0, total = 0;
687 unsigned long hole_start, hole_end, hole_size;
Jerome Glisse99d7e482009-12-09 21:55:09 +0100688
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100689 hole_start = drm_mm_hole_node_start(&mm->head_node);
690 hole_end = drm_mm_hole_node_end(&mm->head_node);
691 hole_size = hole_end - hole_start;
692 if (hole_size)
693 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
694 prefix, hole_start, hole_end,
695 hole_size);
696 total_free += hole_size;
697
698 drm_mm_for_each_node(entry, mm) {
699 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
Jerome Glisse99d7e482009-12-09 21:55:09 +0100700 prefix, entry->start, entry->start + entry->size,
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100701 entry->size);
702 total_used += entry->size;
703
704 if (entry->hole_follows) {
705 hole_start = drm_mm_hole_node_start(entry);
706 hole_end = drm_mm_hole_node_end(entry);
707 hole_size = hole_end - hole_start;
708 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
709 prefix, hole_start, hole_end,
710 hole_size);
711 total_free += hole_size;
712 }
Jerome Glisse99d7e482009-12-09 21:55:09 +0100713 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100714 total = total_free + total_used;
715
716 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
Jerome Glisse99d7e482009-12-09 21:55:09 +0100717 total_used, total_free);
718}
719EXPORT_SYMBOL(drm_mm_debug_table);
720
Dave Airliefa8a1232009-08-26 13:13:37 +1000721#if defined(CONFIG_DEBUG_FS)
722int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
723{
724 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100725 unsigned long total_used = 0, total_free = 0, total = 0;
726 unsigned long hole_start, hole_end, hole_size;
Dave Airliefa8a1232009-08-26 13:13:37 +1000727
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100728 hole_start = drm_mm_hole_node_start(&mm->head_node);
729 hole_end = drm_mm_hole_node_end(&mm->head_node);
730 hole_size = hole_end - hole_start;
731 if (hole_size)
732 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
733 hole_start, hole_end, hole_size);
734 total_free += hole_size;
735
736 drm_mm_for_each_node(entry, mm) {
737 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
738 entry->start, entry->start + entry->size,
739 entry->size);
740 total_used += entry->size;
741 if (entry->hole_follows) {
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200742 hole_start = drm_mm_hole_node_start(entry);
743 hole_end = drm_mm_hole_node_end(entry);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100744 hole_size = hole_end - hole_start;
745 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
746 hole_start, hole_end, hole_size);
747 total_free += hole_size;
748 }
Dave Airliefa8a1232009-08-26 13:13:37 +1000749 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100750 total = total_free + total_used;
751
752 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
Dave Airliefa8a1232009-08-26 13:13:37 +1000753 return 0;
754}
755EXPORT_SYMBOL(drm_mm_dump_table);
756#endif