blob: 959186cbf3280b90b911ff54373531ee577eef97 [file] [log] [blame]
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +10001/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
Jan Engelhardt96de0e22007-10-19 23:21:04 +020041 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +100042 */
43
44#include "drmP.h"
Jerome Glisse249d6042009-04-08 17:11:16 +020045#include "drm_mm.h"
Thomas Hellstrom1d584202007-01-08 22:25:47 +110046#include <linux/slab.h>
Dave Airliefa8a1232009-08-26 13:13:37 +100047#include <linux/seq_file.h>
Thomas Hellstrom1d584202007-01-08 22:25:47 +110048
Jerome Glisse249d6042009-04-08 17:11:16 +020049#define MM_UNUSED_TARGET 4
50
Jerome Glisse249d6042009-04-08 17:11:16 +020051static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
Thomas Hellstrom1d584202007-01-08 22:25:47 +110052{
Dave Airlie55910512007-07-11 16:53:40 +100053 struct drm_mm_node *child;
Thomas Hellstrom1d584202007-01-08 22:25:47 +110054
Jerome Glisse249d6042009-04-08 17:11:16 +020055 if (atomic)
Daniel Vetter709ea972010-07-02 15:02:16 +010056 child = kzalloc(sizeof(*child), GFP_ATOMIC);
Jerome Glisse249d6042009-04-08 17:11:16 +020057 else
Daniel Vetter709ea972010-07-02 15:02:16 +010058 child = kzalloc(sizeof(*child), GFP_KERNEL);
Jerome Glisse249d6042009-04-08 17:11:16 +020059
60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock);
62 if (list_empty(&mm->unused_nodes))
63 child = NULL;
64 else {
65 child =
66 list_entry(mm->unused_nodes.next,
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010067 struct drm_mm_node, node_list);
68 list_del(&child->node_list);
Jerome Glisse249d6042009-04-08 17:11:16 +020069 --mm->num_unused;
70 }
71 spin_unlock(&mm->unused_lock);
72 }
73 return child;
74}
75
Jerome Glissea698cf32009-11-13 20:56:58 +010076/* drm_mm_pre_get() - pre allocate drm_mm_node structure
77 * drm_mm: memory manager struct we are pre-allocating for
78 *
79 * Returns 0 on success or -ENOMEM if allocation fails.
80 */
Jerome Glisse249d6042009-04-08 17:11:16 +020081int drm_mm_pre_get(struct drm_mm *mm)
82{
83 struct drm_mm_node *node;
84
85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock);
Daniel Vetter709ea972010-07-02 15:02:16 +010088 node = kzalloc(sizeof(*node), GFP_KERNEL);
Jerome Glisse249d6042009-04-08 17:11:16 +020089 spin_lock(&mm->unused_lock);
90
91 if (unlikely(node == NULL)) {
92 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 spin_unlock(&mm->unused_lock);
94 return ret;
95 }
96 ++mm->num_unused;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010097 list_add_tail(&node->node_list, &mm->unused_nodes);
Jerome Glisse249d6042009-04-08 17:11:16 +020098 }
99 spin_unlock(&mm->unused_lock);
100 return 0;
101}
102EXPORT_SYMBOL(drm_mm_pre_get);
103
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100104static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
Jerome Glisse249d6042009-04-08 17:11:16 +0200105{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100106 return hole_node->start + hole_node->size;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100107}
108
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100109static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100110{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100111 struct drm_mm_node *next_node =
112 list_entry(hole_node->node_list.next, struct drm_mm_node,
113 node_list);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100114
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100115 return next_node->start;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100116}
117
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100118static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
119 struct drm_mm_node *node,
120 unsigned long size, unsigned alignment)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000121{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100122 struct drm_mm *mm = hole_node->mm;
123 unsigned long tmp = 0, wasted = 0;
124 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
125 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
126
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100127 BUG_ON(!hole_node->hole_follows || node->allocated);
128
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000129 if (alignment)
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100130 tmp = hole_start % alignment;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100131
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100132 if (!tmp) {
133 hole_node->hole_follows = 0;
134 list_del_init(&hole_node->hole_stack);
135 } else
136 wasted = alignment - tmp;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000137
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100138 node->start = hole_start + wasted;
139 node->size = size;
140 node->mm = mm;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100141 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100142
143 INIT_LIST_HEAD(&node->hole_stack);
144 list_add(&node->node_list, &hole_node->node_list);
145
146 BUG_ON(node->start + node->size > hole_end);
147
148 if (node->start + node->size < hole_end) {
149 list_add(&node->hole_stack, &mm->hole_stack);
150 node->hole_follows = 1;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000151 } else {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100152 node->hole_follows = 0;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000153 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100154}
155
156struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
157 unsigned long size,
158 unsigned alignment,
159 int atomic)
160{
161 struct drm_mm_node *node;
162
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100163 node = drm_mm_kmalloc(hole_node->mm, atomic);
164 if (unlikely(node == NULL))
165 return NULL;
166
167 drm_mm_insert_helper(hole_node, node, size, alignment);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100168
Chris Wilsone6c03c52009-05-22 14:14:22 +0100169 return node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000170}
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200171EXPORT_SYMBOL(drm_mm_get_block_generic);
Jerome Glisse249d6042009-04-08 17:11:16 +0200172
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100173/**
174 * Search for free space and insert a preallocated memory node. Returns
175 * -ENOSPC if no suitable free area is available. The preallocated memory node
176 * must be cleared.
177 */
178int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
179 unsigned long size, unsigned alignment)
180{
181 struct drm_mm_node *hole_node;
182
183 hole_node = drm_mm_search_free(mm, size, alignment, 0);
184 if (!hole_node)
185 return -ENOSPC;
186
187 drm_mm_insert_helper(hole_node, node, size, alignment);
188
189 return 0;
190}
191EXPORT_SYMBOL(drm_mm_insert_node);
192
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100193static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
194 struct drm_mm_node *node,
195 unsigned long size, unsigned alignment,
196 unsigned long start, unsigned long end)
Jerome Glissea2e68e92009-12-07 15:52:56 +0100197{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100198 struct drm_mm *mm = hole_node->mm;
199 unsigned long tmp = 0, wasted = 0;
200 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
201 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100202
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100203 BUG_ON(!hole_node->hole_follows || node->allocated);
204
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100205 if (hole_start < start)
206 wasted += start - hole_start;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100207 if (alignment)
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100208 tmp = (hole_start + wasted) % alignment;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100209
210 if (tmp)
211 wasted += alignment - tmp;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100212
213 if (!wasted) {
214 hole_node->hole_follows = 0;
215 list_del_init(&hole_node->hole_stack);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100216 }
217
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100218 node->start = hole_start + wasted;
219 node->size = size;
220 node->mm = mm;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100221 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100222
223 INIT_LIST_HEAD(&node->hole_stack);
224 list_add(&node->node_list, &hole_node->node_list);
225
226 BUG_ON(node->start + node->size > hole_end);
227 BUG_ON(node->start + node->size > end);
228
229 if (node->start + node->size < hole_end) {
230 list_add(&node->hole_stack, &mm->hole_stack);
231 node->hole_follows = 1;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100232 } else {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100233 node->hole_follows = 0;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100234 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100235}
236
237struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
238 unsigned long size,
239 unsigned alignment,
240 unsigned long start,
241 unsigned long end,
242 int atomic)
243{
244 struct drm_mm_node *node;
245
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100246 node = drm_mm_kmalloc(hole_node->mm, atomic);
247 if (unlikely(node == NULL))
248 return NULL;
249
250 drm_mm_insert_helper_range(hole_node, node, size, alignment,
251 start, end);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100252
Jerome Glissea2e68e92009-12-07 15:52:56 +0100253 return node;
254}
255EXPORT_SYMBOL(drm_mm_get_block_range_generic);
256
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100257/**
258 * Search for free space and insert a preallocated memory node. Returns
259 * -ENOSPC if no suitable free area is available. This is for range
260 * restricted allocations. The preallocated memory node must be cleared.
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000261 */
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100262int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
263 unsigned long size, unsigned alignment,
264 unsigned long start, unsigned long end)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000265{
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100266 struct drm_mm_node *hole_node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000267
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100268 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
269 start, end, 0);
270 if (!hole_node)
271 return -ENOSPC;
272
273 drm_mm_insert_helper_range(hole_node, node, size, alignment,
274 start, end);
275
276 return 0;
277}
278EXPORT_SYMBOL(drm_mm_insert_node_in_range);
279
280/**
281 * Remove a memory node from the allocator.
282 */
283void drm_mm_remove_node(struct drm_mm_node *node)
284{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100285 struct drm_mm *mm = node->mm;
286 struct drm_mm_node *prev_node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000287
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100288 BUG_ON(node->scanned_block || node->scanned_prev_free
289 || node->scanned_next_free);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000290
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100291 prev_node =
292 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100293
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100294 if (node->hole_follows) {
295 BUG_ON(drm_mm_hole_node_start(node)
296 == drm_mm_hole_node_end(node));
297 list_del(&node->hole_stack);
298 } else
299 BUG_ON(drm_mm_hole_node_start(node)
300 != drm_mm_hole_node_end(node));
301
302 if (!prev_node->hole_follows) {
303 prev_node->hole_follows = 1;
304 list_add(&prev_node->hole_stack, &mm->hole_stack);
305 } else
306 list_move(&prev_node->hole_stack, &mm->hole_stack);
307
308 list_del(&node->node_list);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100309 node->allocated = 0;
310}
311EXPORT_SYMBOL(drm_mm_remove_node);
312
313/*
314 * Remove a memory node from the allocator and free the allocated struct
315 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
316 * drm_mm_get_block functions.
317 */
318void drm_mm_put_block(struct drm_mm_node *node)
319{
320
321 struct drm_mm *mm = node->mm;
322
323 drm_mm_remove_node(node);
324
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100325 spin_lock(&mm->unused_lock);
326 if (mm->num_unused < MM_UNUSED_TARGET) {
327 list_add(&node->node_list, &mm->unused_nodes);
328 ++mm->num_unused;
329 } else
330 kfree(node);
331 spin_unlock(&mm->unused_lock);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000332}
Eric Anholt673a3942008-07-30 12:06:12 -0700333EXPORT_SYMBOL(drm_mm_put_block);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000334
Daniel Vetter75214732010-08-26 21:44:17 +0200335static int check_free_hole(unsigned long start, unsigned long end,
336 unsigned long size, unsigned alignment)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100337{
338 unsigned wasted = 0;
339
Daniel Vetter75214732010-08-26 21:44:17 +0200340 if (end - start < size)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100341 return 0;
342
343 if (alignment) {
Daniel Vetter75214732010-08-26 21:44:17 +0200344 unsigned tmp = start % alignment;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100345 if (tmp)
346 wasted = alignment - tmp;
347 }
348
Daniel Vetter75214732010-08-26 21:44:17 +0200349 if (end >= start + size + wasted) {
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100350 return 1;
351 }
352
353 return 0;
354}
355
Jerome Glisse249d6042009-04-08 17:11:16 +0200356struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
357 unsigned long size,
358 unsigned alignment, int best_match)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000359{
Dave Airlie55910512007-07-11 16:53:40 +1000360 struct drm_mm_node *entry;
361 struct drm_mm_node *best;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000362 unsigned long best_size;
363
Daniel Vetter709ea972010-07-02 15:02:16 +0100364 BUG_ON(mm->scanned_blocks);
365
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000366 best = NULL;
367 best_size = ~0UL;
368
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100369 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
370 BUG_ON(!entry->hole_follows);
371 if (!check_free_hole(drm_mm_hole_node_start(entry),
372 drm_mm_hole_node_end(entry),
Daniel Vetter75214732010-08-26 21:44:17 +0200373 size, alignment))
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100374 continue;
375
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100376 if (!best_match)
377 return entry;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100378
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100379 if (entry->size < best_size) {
380 best = entry;
381 best_size = entry->size;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000382 }
383 }
384
385 return best;
386}
Jerome Glisse249d6042009-04-08 17:11:16 +0200387EXPORT_SYMBOL(drm_mm_search_free);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000388
Jerome Glissea2e68e92009-12-07 15:52:56 +0100389struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
390 unsigned long size,
391 unsigned alignment,
392 unsigned long start,
393 unsigned long end,
394 int best_match)
395{
Jerome Glissea2e68e92009-12-07 15:52:56 +0100396 struct drm_mm_node *entry;
397 struct drm_mm_node *best;
398 unsigned long best_size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100399
Daniel Vetter709ea972010-07-02 15:02:16 +0100400 BUG_ON(mm->scanned_blocks);
401
Jerome Glissea2e68e92009-12-07 15:52:56 +0100402 best = NULL;
403 best_size = ~0UL;
404
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100405 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
406 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
407 start : drm_mm_hole_node_start(entry);
408 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
409 end : drm_mm_hole_node_end(entry);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100410
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100411 BUG_ON(!entry->hole_follows);
Daniel Vetter75214732010-08-26 21:44:17 +0200412 if (!check_free_hole(adj_start, adj_end, size, alignment))
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100413 continue;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100414
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100415 if (!best_match)
416 return entry;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100417
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100418 if (entry->size < best_size) {
419 best = entry;
420 best_size = entry->size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100421 }
422 }
423
424 return best;
425}
426EXPORT_SYMBOL(drm_mm_search_free_in_range);
427
Daniel Vetter709ea972010-07-02 15:02:16 +0100428/**
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100429 * Moves an allocation. To be used with embedded struct drm_mm_node.
430 */
431void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
432{
433 list_replace(&old->node_list, &new->node_list);
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200434 list_replace(&old->hole_stack, &new->hole_stack);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100435 new->hole_follows = old->hole_follows;
436 new->mm = old->mm;
437 new->start = old->start;
438 new->size = old->size;
439
440 old->allocated = 0;
441 new->allocated = 1;
442}
443EXPORT_SYMBOL(drm_mm_replace_node);
444
445/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100446 * Initializa lru scanning.
447 *
448 * This simply sets up the scanning routines with the parameters for the desired
449 * hole.
450 *
451 * Warning: As long as the scan list is non-empty, no other operations than
452 * adding/removing nodes to/from the scan list are allowed.
453 */
454void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
455 unsigned alignment)
456{
457 mm->scan_alignment = alignment;
458 mm->scan_size = size;
459 mm->scanned_blocks = 0;
460 mm->scan_hit_start = 0;
461 mm->scan_hit_size = 0;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200462 mm->scan_check_range = 0;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100463 mm->prev_scanned_node = NULL;
Daniel Vetter709ea972010-07-02 15:02:16 +0100464}
465EXPORT_SYMBOL(drm_mm_init_scan);
466
467/**
Daniel Vetterd935cc62010-09-16 15:13:11 +0200468 * Initializa lru scanning.
469 *
470 * This simply sets up the scanning routines with the parameters for the desired
471 * hole. This version is for range-restricted scans.
472 *
473 * Warning: As long as the scan list is non-empty, no other operations than
474 * adding/removing nodes to/from the scan list are allowed.
475 */
476void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
477 unsigned alignment,
478 unsigned long start,
479 unsigned long end)
480{
481 mm->scan_alignment = alignment;
482 mm->scan_size = size;
483 mm->scanned_blocks = 0;
484 mm->scan_hit_start = 0;
485 mm->scan_hit_size = 0;
486 mm->scan_start = start;
487 mm->scan_end = end;
488 mm->scan_check_range = 1;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100489 mm->prev_scanned_node = NULL;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200490}
491EXPORT_SYMBOL(drm_mm_init_scan_with_range);
492
493/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100494 * Add a node to the scan list that might be freed to make space for the desired
495 * hole.
496 *
497 * Returns non-zero, if a hole has been found, zero otherwise.
498 */
499int drm_mm_scan_add_block(struct drm_mm_node *node)
500{
501 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100502 struct drm_mm_node *prev_node;
503 unsigned long hole_start, hole_end;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200504 unsigned long adj_start;
505 unsigned long adj_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100506
507 mm->scanned_blocks++;
508
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100509 BUG_ON(node->scanned_block);
Daniel Vetter709ea972010-07-02 15:02:16 +0100510 node->scanned_block = 1;
Daniel Vetter709ea972010-07-02 15:02:16 +0100511
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100512 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
513 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100514
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100515 node->scanned_preceeds_hole = prev_node->hole_follows;
516 prev_node->hole_follows = 1;
517 list_del(&node->node_list);
518 node->node_list.prev = &prev_node->node_list;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100519 node->node_list.next = &mm->prev_scanned_node->node_list;
520 mm->prev_scanned_node = node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100521
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100522 hole_start = drm_mm_hole_node_start(prev_node);
523 hole_end = drm_mm_hole_node_end(prev_node);
Daniel Vetterd935cc62010-09-16 15:13:11 +0200524 if (mm->scan_check_range) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100525 adj_start = hole_start < mm->scan_start ?
526 mm->scan_start : hole_start;
527 adj_end = hole_end > mm->scan_end ?
528 mm->scan_end : hole_end;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200529 } else {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100530 adj_start = hole_start;
531 adj_end = hole_end;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200532 }
533
534 if (check_free_hole(adj_start , adj_end,
Daniel Vetter75214732010-08-26 21:44:17 +0200535 mm->scan_size, mm->scan_alignment)) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100536 mm->scan_hit_start = hole_start;
537 mm->scan_hit_size = hole_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100538
539 return 1;
540 }
541
542 return 0;
543}
544EXPORT_SYMBOL(drm_mm_scan_add_block);
545
546/**
547 * Remove a node from the scan list.
548 *
549 * Nodes _must_ be removed in the exact same order from the scan list as they
550 * have been added, otherwise the internal state of the memory manager will be
551 * corrupted.
552 *
553 * When the scan list is empty, the selected memory nodes can be freed. An
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300554 * immediately following drm_mm_search_free with best_match = 0 will then return
Daniel Vetter709ea972010-07-02 15:02:16 +0100555 * the just freed block (because its at the top of the free_stack list).
556 *
557 * Returns one if this block should be evicted, zero otherwise. Will always
558 * return zero when no hole has been found.
559 */
560int drm_mm_scan_remove_block(struct drm_mm_node *node)
561{
562 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100563 struct drm_mm_node *prev_node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100564
565 mm->scanned_blocks--;
566
567 BUG_ON(!node->scanned_block);
568 node->scanned_block = 0;
Daniel Vetter709ea972010-07-02 15:02:16 +0100569
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100570 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
571 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100572
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100573 prev_node->hole_follows = node->scanned_preceeds_hole;
574 INIT_LIST_HEAD(&node->node_list);
575 list_add(&node->node_list, &prev_node->node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100576
577 /* Only need to check for containement because start&size for the
578 * complete resulting free block (not just the desired part) is
579 * stored. */
580 if (node->start >= mm->scan_hit_start &&
581 node->start + node->size
582 <= mm->scan_hit_start + mm->scan_hit_size) {
583 return 1;
584 }
585
586 return 0;
587}
588EXPORT_SYMBOL(drm_mm_scan_remove_block);
589
Dave Airlie55910512007-07-11 16:53:40 +1000590int drm_mm_clean(struct drm_mm * mm)
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100591{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100592 struct list_head *head = &mm->head_node.node_list;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100593
594 return (head->next->next == head);
595}
Jerome Glisse249d6042009-04-08 17:11:16 +0200596EXPORT_SYMBOL(drm_mm_clean);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100597
Dave Airlie55910512007-07-11 16:53:40 +1000598int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000599{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100600 INIT_LIST_HEAD(&mm->hole_stack);
Jerome Glisse249d6042009-04-08 17:11:16 +0200601 INIT_LIST_HEAD(&mm->unused_nodes);
602 mm->num_unused = 0;
Daniel Vetter709ea972010-07-02 15:02:16 +0100603 mm->scanned_blocks = 0;
Jerome Glisse249d6042009-04-08 17:11:16 +0200604 spin_lock_init(&mm->unused_lock);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000605
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100606 /* Clever trick to avoid a special case in the free hole tracking. */
607 INIT_LIST_HEAD(&mm->head_node.node_list);
608 INIT_LIST_HEAD(&mm->head_node.hole_stack);
609 mm->head_node.hole_follows = 1;
610 mm->head_node.scanned_block = 0;
611 mm->head_node.scanned_prev_free = 0;
612 mm->head_node.scanned_next_free = 0;
613 mm->head_node.mm = mm;
614 mm->head_node.start = start + size;
615 mm->head_node.size = start - mm->head_node.start;
616 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
617
618 return 0;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000619}
Eric Anholt673a3942008-07-30 12:06:12 -0700620EXPORT_SYMBOL(drm_mm_init);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000621
Dave Airlie55910512007-07-11 16:53:40 +1000622void drm_mm_takedown(struct drm_mm * mm)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000623{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100624 struct drm_mm_node *entry, *next;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000625
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100626 if (!list_empty(&mm->head_node.node_list)) {
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000627 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
628 return;
629 }
630
Jerome Glisse249d6042009-04-08 17:11:16 +0200631 spin_lock(&mm->unused_lock);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100632 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
633 list_del(&entry->node_list);
Jerome Glisse249d6042009-04-08 17:11:16 +0200634 kfree(entry);
635 --mm->num_unused;
636 }
637 spin_unlock(&mm->unused_lock);
638
639 BUG_ON(mm->num_unused != 0);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000640}
Dave Airlief453ba02008-11-07 14:05:41 -0800641EXPORT_SYMBOL(drm_mm_takedown);
Dave Airliefa8a1232009-08-26 13:13:37 +1000642
Jerome Glisse99d7e482009-12-09 21:55:09 +0100643void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
644{
645 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100646 unsigned long total_used = 0, total_free = 0, total = 0;
647 unsigned long hole_start, hole_end, hole_size;
Jerome Glisse99d7e482009-12-09 21:55:09 +0100648
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100649 hole_start = drm_mm_hole_node_start(&mm->head_node);
650 hole_end = drm_mm_hole_node_end(&mm->head_node);
651 hole_size = hole_end - hole_start;
652 if (hole_size)
653 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
654 prefix, hole_start, hole_end,
655 hole_size);
656 total_free += hole_size;
657
658 drm_mm_for_each_node(entry, mm) {
659 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
Jerome Glisse99d7e482009-12-09 21:55:09 +0100660 prefix, entry->start, entry->start + entry->size,
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100661 entry->size);
662 total_used += entry->size;
663
664 if (entry->hole_follows) {
665 hole_start = drm_mm_hole_node_start(entry);
666 hole_end = drm_mm_hole_node_end(entry);
667 hole_size = hole_end - hole_start;
668 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
669 prefix, hole_start, hole_end,
670 hole_size);
671 total_free += hole_size;
672 }
Jerome Glisse99d7e482009-12-09 21:55:09 +0100673 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100674 total = total_free + total_used;
675
676 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
Jerome Glisse99d7e482009-12-09 21:55:09 +0100677 total_used, total_free);
678}
679EXPORT_SYMBOL(drm_mm_debug_table);
680
Dave Airliefa8a1232009-08-26 13:13:37 +1000681#if defined(CONFIG_DEBUG_FS)
682int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
683{
684 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100685 unsigned long total_used = 0, total_free = 0, total = 0;
686 unsigned long hole_start, hole_end, hole_size;
Dave Airliefa8a1232009-08-26 13:13:37 +1000687
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100688 hole_start = drm_mm_hole_node_start(&mm->head_node);
689 hole_end = drm_mm_hole_node_end(&mm->head_node);
690 hole_size = hole_end - hole_start;
691 if (hole_size)
692 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
693 hole_start, hole_end, hole_size);
694 total_free += hole_size;
695
696 drm_mm_for_each_node(entry, mm) {
697 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
698 entry->start, entry->start + entry->size,
699 entry->size);
700 total_used += entry->size;
701 if (entry->hole_follows) {
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200702 hole_start = drm_mm_hole_node_start(entry);
703 hole_end = drm_mm_hole_node_end(entry);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100704 hole_size = hole_end - hole_start;
705 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
706 hole_start, hole_end, hole_size);
707 total_free += hole_size;
708 }
Dave Airliefa8a1232009-08-26 13:13:37 +1000709 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100710 total = total_free + total_used;
711
712 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
Dave Airliefa8a1232009-08-26 13:13:37 +1000713 return 0;
714}
715EXPORT_SYMBOL(drm_mm_dump_table);
716#endif