blob: 9e8dfbc1955e2c3818b49b119878099e9f2afcfa [file] [log] [blame]
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +10001/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
Jan Engelhardt96de0e22007-10-19 23:21:04 +020041 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +100042 */
43
David Howells760285e2012-10-02 18:01:07 +010044#include <drm/drmP.h>
45#include <drm/drm_mm.h>
Thomas Hellstrom1d584202007-01-08 22:25:47 +110046#include <linux/slab.h>
Dave Airliefa8a1232009-08-26 13:13:37 +100047#include <linux/seq_file.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040048#include <linux/export.h>
Thomas Hellstrom1d584202007-01-08 22:25:47 +110049
Jerome Glisse249d6042009-04-08 17:11:16 +020050#define MM_UNUSED_TARGET 4
51
Jerome Glisse249d6042009-04-08 17:11:16 +020052static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
Thomas Hellstrom1d584202007-01-08 22:25:47 +110053{
Dave Airlie55910512007-07-11 16:53:40 +100054 struct drm_mm_node *child;
Thomas Hellstrom1d584202007-01-08 22:25:47 +110055
Jerome Glisse249d6042009-04-08 17:11:16 +020056 if (atomic)
Daniel Vetter709ea972010-07-02 15:02:16 +010057 child = kzalloc(sizeof(*child), GFP_ATOMIC);
Jerome Glisse249d6042009-04-08 17:11:16 +020058 else
Daniel Vetter709ea972010-07-02 15:02:16 +010059 child = kzalloc(sizeof(*child), GFP_KERNEL);
Jerome Glisse249d6042009-04-08 17:11:16 +020060
61 if (unlikely(child == NULL)) {
62 spin_lock(&mm->unused_lock);
63 if (list_empty(&mm->unused_nodes))
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010068 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
Jerome Glisse249d6042009-04-08 17:11:16 +020070 --mm->num_unused;
71 }
72 spin_unlock(&mm->unused_lock);
73 }
74 return child;
75}
76
Jerome Glissea698cf32009-11-13 20:56:58 +010077/* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
79 *
80 * Returns 0 on success or -ENOMEM if allocation fails.
81 */
Jerome Glisse249d6042009-04-08 17:11:16 +020082int drm_mm_pre_get(struct drm_mm *mm)
83{
84 struct drm_mm_node *node;
85
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
Daniel Vetter709ea972010-07-02 15:02:16 +010089 node = kzalloc(sizeof(*node), GFP_KERNEL);
Jerome Glisse249d6042009-04-08 17:11:16 +020090 spin_lock(&mm->unused_lock);
91
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
96 }
97 ++mm->num_unused;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010098 list_add_tail(&node->node_list, &mm->unused_nodes);
Jerome Glisse249d6042009-04-08 17:11:16 +020099 }
100 spin_unlock(&mm->unused_lock);
101 return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
104
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100105static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106 struct drm_mm_node *node,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100107 unsigned long size, unsigned alignment,
108 unsigned long color)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000109{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100110 struct drm_mm *mm = hole_node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100111 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
112 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100113 unsigned long adj_start = hole_start;
114 unsigned long adj_end = hole_end;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100115
Chris Wilson9e8944a2012-11-15 11:32:17 +0000116 BUG_ON(node->allocated);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100117
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100118 if (mm->color_adjust)
119 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100120
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100121 if (alignment) {
122 unsigned tmp = adj_start % alignment;
123 if (tmp)
124 adj_start += alignment - tmp;
125 }
126
127 if (adj_start == hole_start) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100128 hole_node->hole_follows = 0;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100129 list_del(&hole_node->hole_stack);
130 }
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000131
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100132 node->start = adj_start;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100133 node->size = size;
134 node->mm = mm;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100135 node->color = color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100136 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100137
138 INIT_LIST_HEAD(&node->hole_stack);
139 list_add(&node->node_list, &hole_node->node_list);
140
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100141 BUG_ON(node->start + node->size > adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100142
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100143 node->hole_follows = 0;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000144 if (__drm_mm_hole_node_start(node) < hole_end) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100145 list_add(&node->hole_stack, &mm->hole_stack);
146 node->hole_follows = 1;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000147 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100148}
149
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700150int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
151 unsigned long start, unsigned long size)
Chris Wilson5973c7e2012-11-15 11:32:16 +0000152{
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700153 struct drm_mm_node *hole;
Chris Wilson5973c7e2012-11-15 11:32:16 +0000154 unsigned long end = start + size;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000155 unsigned long hole_start;
156 unsigned long hole_end;
Chris Wilson5973c7e2012-11-15 11:32:16 +0000157
Chris Wilson9e8944a2012-11-15 11:32:17 +0000158 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
Chris Wilson5973c7e2012-11-15 11:32:16 +0000159 if (hole_start > start || hole_end < end)
160 continue;
161
Chris Wilson5973c7e2012-11-15 11:32:16 +0000162 node->start = start;
163 node->size = size;
164 node->mm = mm;
165 node->allocated = 1;
166
167 INIT_LIST_HEAD(&node->hole_stack);
168 list_add(&node->node_list, &hole->node_list);
169
170 if (start == hole_start) {
171 hole->hole_follows = 0;
172 list_del_init(&hole->hole_stack);
173 }
174
175 node->hole_follows = 0;
176 if (end != hole_end) {
177 list_add(&node->hole_stack, &mm->hole_stack);
178 node->hole_follows = 1;
179 }
180
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700181 return 0;
Chris Wilson5973c7e2012-11-15 11:32:16 +0000182 }
183
184 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700185 return -ENOSPC;
Chris Wilson5973c7e2012-11-15 11:32:16 +0000186}
187EXPORT_SYMBOL(drm_mm_create_block);
188
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100189struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
190 unsigned long size,
191 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100192 unsigned long color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100193 int atomic)
194{
195 struct drm_mm_node *node;
196
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100197 node = drm_mm_kmalloc(hole_node->mm, atomic);
198 if (unlikely(node == NULL))
199 return NULL;
200
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100201 drm_mm_insert_helper(hole_node, node, size, alignment, color);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100202
Chris Wilsone6c03c52009-05-22 14:14:22 +0100203 return node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000204}
Thomas Hellstrom89579f72009-06-17 12:29:56 +0200205EXPORT_SYMBOL(drm_mm_get_block_generic);
Jerome Glisse249d6042009-04-08 17:11:16 +0200206
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100207/**
208 * Search for free space and insert a preallocated memory node. Returns
209 * -ENOSPC if no suitable free area is available. The preallocated memory node
210 * must be cleared.
211 */
Chris Wilsonb8103452012-12-07 20:37:06 +0000212int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
213 unsigned long size, unsigned alignment,
214 unsigned long color)
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100215{
216 struct drm_mm_node *hole_node;
217
Chris Wilsonb8103452012-12-07 20:37:06 +0000218 hole_node = drm_mm_search_free_generic(mm, size, alignment,
219 color, 0);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100220 if (!hole_node)
221 return -ENOSPC;
222
Chris Wilsonb8103452012-12-07 20:37:06 +0000223 drm_mm_insert_helper(hole_node, node, size, alignment, color);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100224 return 0;
225}
Chris Wilsonb8103452012-12-07 20:37:06 +0000226EXPORT_SYMBOL(drm_mm_insert_node_generic);
227
228int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
229 unsigned long size, unsigned alignment)
230{
231 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
232}
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100233EXPORT_SYMBOL(drm_mm_insert_node);
234
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100235static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
236 struct drm_mm_node *node,
237 unsigned long size, unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100238 unsigned long color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100239 unsigned long start, unsigned long end)
Jerome Glissea2e68e92009-12-07 15:52:56 +0100240{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100241 struct drm_mm *mm = hole_node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100242 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
243 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100244 unsigned long adj_start = hole_start;
245 unsigned long adj_end = hole_end;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100246
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100247 BUG_ON(!hole_node->hole_follows || node->allocated);
248
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100249 if (adj_start < start)
250 adj_start = start;
Chris Wilson901593f2012-12-19 16:51:06 +0000251 if (adj_end > end)
252 adj_end = end;
253
254 if (mm->color_adjust)
255 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100256
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100257 if (alignment) {
258 unsigned tmp = adj_start % alignment;
259 if (tmp)
260 adj_start += alignment - tmp;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100261 }
262
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100263 if (adj_start == hole_start) {
264 hole_node->hole_follows = 0;
265 list_del(&hole_node->hole_stack);
266 }
267
268 node->start = adj_start;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100269 node->size = size;
270 node->mm = mm;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100271 node->color = color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100272 node->allocated = 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100273
274 INIT_LIST_HEAD(&node->hole_stack);
275 list_add(&node->node_list, &hole_node->node_list);
276
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100277 BUG_ON(node->start + node->size > adj_end);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100278 BUG_ON(node->start + node->size > end);
279
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100280 node->hole_follows = 0;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000281 if (__drm_mm_hole_node_start(node) < hole_end) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100282 list_add(&node->hole_stack, &mm->hole_stack);
283 node->hole_follows = 1;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100284 }
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100285}
286
287struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
288 unsigned long size,
289 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100290 unsigned long color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100291 unsigned long start,
292 unsigned long end,
293 int atomic)
294{
295 struct drm_mm_node *node;
296
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100297 node = drm_mm_kmalloc(hole_node->mm, atomic);
298 if (unlikely(node == NULL))
299 return NULL;
300
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100301 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
Daniel Vetter9fc935d2011-02-18 17:59:13 +0100302 start, end);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100303
Jerome Glissea2e68e92009-12-07 15:52:56 +0100304 return node;
305}
306EXPORT_SYMBOL(drm_mm_get_block_range_generic);
307
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100308/**
309 * Search for free space and insert a preallocated memory node. Returns
310 * -ENOSPC if no suitable free area is available. This is for range
311 * restricted allocations. The preallocated memory node must be cleared.
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000312 */
Chris Wilsonb8103452012-12-07 20:37:06 +0000313int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
314 unsigned long size, unsigned alignment, unsigned long color,
315 unsigned long start, unsigned long end)
316{
317 struct drm_mm_node *hole_node;
318
319 hole_node = drm_mm_search_free_in_range_generic(mm,
320 size, alignment, color,
321 start, end, 0);
322 if (!hole_node)
323 return -ENOSPC;
324
325 drm_mm_insert_helper_range(hole_node, node,
326 size, alignment, color,
327 start, end);
328 return 0;
329}
330EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
331
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100332int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
333 unsigned long size, unsigned alignment,
334 unsigned long start, unsigned long end)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000335{
Chris Wilsonb8103452012-12-07 20:37:06 +0000336 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100337}
338EXPORT_SYMBOL(drm_mm_insert_node_in_range);
339
340/**
341 * Remove a memory node from the allocator.
342 */
343void drm_mm_remove_node(struct drm_mm_node *node)
344{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100345 struct drm_mm *mm = node->mm;
346 struct drm_mm_node *prev_node;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000347
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100348 BUG_ON(node->scanned_block || node->scanned_prev_free
349 || node->scanned_next_free);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000350
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100351 prev_node =
352 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100353
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100354 if (node->hole_follows) {
Chris Wilson9e8944a2012-11-15 11:32:17 +0000355 BUG_ON(__drm_mm_hole_node_start(node) ==
356 __drm_mm_hole_node_end(node));
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100357 list_del(&node->hole_stack);
358 } else
Chris Wilson9e8944a2012-11-15 11:32:17 +0000359 BUG_ON(__drm_mm_hole_node_start(node) !=
360 __drm_mm_hole_node_end(node));
361
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100362
363 if (!prev_node->hole_follows) {
364 prev_node->hole_follows = 1;
365 list_add(&prev_node->hole_stack, &mm->hole_stack);
366 } else
367 list_move(&prev_node->hole_stack, &mm->hole_stack);
368
369 list_del(&node->node_list);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100370 node->allocated = 0;
371}
372EXPORT_SYMBOL(drm_mm_remove_node);
373
374/*
375 * Remove a memory node from the allocator and free the allocated struct
376 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
377 * drm_mm_get_block functions.
378 */
379void drm_mm_put_block(struct drm_mm_node *node)
380{
381
382 struct drm_mm *mm = node->mm;
383
384 drm_mm_remove_node(node);
385
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100386 spin_lock(&mm->unused_lock);
387 if (mm->num_unused < MM_UNUSED_TARGET) {
388 list_add(&node->node_list, &mm->unused_nodes);
389 ++mm->num_unused;
390 } else
391 kfree(node);
392 spin_unlock(&mm->unused_lock);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000393}
Eric Anholt673a3942008-07-30 12:06:12 -0700394EXPORT_SYMBOL(drm_mm_put_block);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000395
Daniel Vetter75214732010-08-26 21:44:17 +0200396static int check_free_hole(unsigned long start, unsigned long end,
397 unsigned long size, unsigned alignment)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100398{
Daniel Vetter75214732010-08-26 21:44:17 +0200399 if (end - start < size)
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100400 return 0;
401
402 if (alignment) {
Daniel Vetter75214732010-08-26 21:44:17 +0200403 unsigned tmp = start % alignment;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100404 if (tmp)
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100405 start += alignment - tmp;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100406 }
407
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100408 return end >= start + size;
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100409}
410
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100411struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
412 unsigned long size,
413 unsigned alignment,
414 unsigned long color,
415 bool best_match)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000416{
Dave Airlie55910512007-07-11 16:53:40 +1000417 struct drm_mm_node *entry;
418 struct drm_mm_node *best;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000419 unsigned long adj_start;
420 unsigned long adj_end;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000421 unsigned long best_size;
422
Daniel Vetter709ea972010-07-02 15:02:16 +0100423 BUG_ON(mm->scanned_blocks);
424
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000425 best = NULL;
426 best_size = ~0UL;
427
Chris Wilson9e8944a2012-11-15 11:32:17 +0000428 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100429 if (mm->color_adjust) {
430 mm->color_adjust(entry, color, &adj_start, &adj_end);
431 if (adj_end <= adj_start)
432 continue;
433 }
434
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100435 if (!check_free_hole(adj_start, adj_end, size, alignment))
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100436 continue;
437
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100438 if (!best_match)
439 return entry;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100440
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100441 if (entry->size < best_size) {
442 best = entry;
443 best_size = entry->size;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000444 }
445 }
446
447 return best;
448}
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100449EXPORT_SYMBOL(drm_mm_search_free_generic);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000450
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100451struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
452 unsigned long size,
453 unsigned alignment,
454 unsigned long color,
455 unsigned long start,
456 unsigned long end,
457 bool best_match)
Jerome Glissea2e68e92009-12-07 15:52:56 +0100458{
Jerome Glissea2e68e92009-12-07 15:52:56 +0100459 struct drm_mm_node *entry;
460 struct drm_mm_node *best;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000461 unsigned long adj_start;
462 unsigned long adj_end;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100463 unsigned long best_size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100464
Daniel Vetter709ea972010-07-02 15:02:16 +0100465 BUG_ON(mm->scanned_blocks);
466
Jerome Glissea2e68e92009-12-07 15:52:56 +0100467 best = NULL;
468 best_size = ~0UL;
469
Chris Wilson9e8944a2012-11-15 11:32:17 +0000470 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
471 if (adj_start < start)
472 adj_start = start;
473 if (adj_end > end)
474 adj_end = end;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100475
476 if (mm->color_adjust) {
477 mm->color_adjust(entry, color, &adj_start, &adj_end);
478 if (adj_end <= adj_start)
479 continue;
480 }
481
Daniel Vetter75214732010-08-26 21:44:17 +0200482 if (!check_free_hole(adj_start, adj_end, size, alignment))
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100483 continue;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100484
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100485 if (!best_match)
486 return entry;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100487
Daniel Vetter7a6b2892010-07-02 15:02:15 +0100488 if (entry->size < best_size) {
489 best = entry;
490 best_size = entry->size;
Jerome Glissea2e68e92009-12-07 15:52:56 +0100491 }
492 }
493
494 return best;
495}
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100496EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
Jerome Glissea2e68e92009-12-07 15:52:56 +0100497
Daniel Vetter709ea972010-07-02 15:02:16 +0100498/**
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100499 * Moves an allocation. To be used with embedded struct drm_mm_node.
500 */
501void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
502{
503 list_replace(&old->node_list, &new->node_list);
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200504 list_replace(&old->hole_stack, &new->hole_stack);
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100505 new->hole_follows = old->hole_follows;
506 new->mm = old->mm;
507 new->start = old->start;
508 new->size = old->size;
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100509 new->color = old->color;
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100510
511 old->allocated = 0;
512 new->allocated = 1;
513}
514EXPORT_SYMBOL(drm_mm_replace_node);
515
516/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100517 * Initializa lru scanning.
518 *
519 * This simply sets up the scanning routines with the parameters for the desired
520 * hole.
521 *
522 * Warning: As long as the scan list is non-empty, no other operations than
523 * adding/removing nodes to/from the scan list are allowed.
524 */
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100525void drm_mm_init_scan(struct drm_mm *mm,
526 unsigned long size,
527 unsigned alignment,
528 unsigned long color)
Daniel Vetter709ea972010-07-02 15:02:16 +0100529{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100530 mm->scan_color = color;
Daniel Vetter709ea972010-07-02 15:02:16 +0100531 mm->scan_alignment = alignment;
532 mm->scan_size = size;
533 mm->scanned_blocks = 0;
534 mm->scan_hit_start = 0;
Chris Wilson901593f2012-12-19 16:51:06 +0000535 mm->scan_hit_end = 0;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200536 mm->scan_check_range = 0;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100537 mm->prev_scanned_node = NULL;
Daniel Vetter709ea972010-07-02 15:02:16 +0100538}
539EXPORT_SYMBOL(drm_mm_init_scan);
540
541/**
Daniel Vetterd935cc62010-09-16 15:13:11 +0200542 * Initializa lru scanning.
543 *
544 * This simply sets up the scanning routines with the parameters for the desired
545 * hole. This version is for range-restricted scans.
546 *
547 * Warning: As long as the scan list is non-empty, no other operations than
548 * adding/removing nodes to/from the scan list are allowed.
549 */
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100550void drm_mm_init_scan_with_range(struct drm_mm *mm,
551 unsigned long size,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200552 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100553 unsigned long color,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200554 unsigned long start,
555 unsigned long end)
556{
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100557 mm->scan_color = color;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200558 mm->scan_alignment = alignment;
559 mm->scan_size = size;
560 mm->scanned_blocks = 0;
561 mm->scan_hit_start = 0;
Chris Wilson901593f2012-12-19 16:51:06 +0000562 mm->scan_hit_end = 0;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200563 mm->scan_start = start;
564 mm->scan_end = end;
565 mm->scan_check_range = 1;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100566 mm->prev_scanned_node = NULL;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200567}
568EXPORT_SYMBOL(drm_mm_init_scan_with_range);
569
570/**
Daniel Vetter709ea972010-07-02 15:02:16 +0100571 * Add a node to the scan list that might be freed to make space for the desired
572 * hole.
573 *
574 * Returns non-zero, if a hole has been found, zero otherwise.
575 */
576int drm_mm_scan_add_block(struct drm_mm_node *node)
577{
578 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100579 struct drm_mm_node *prev_node;
580 unsigned long hole_start, hole_end;
Chris Wilson901593f2012-12-19 16:51:06 +0000581 unsigned long adj_start, adj_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100582
583 mm->scanned_blocks++;
584
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100585 BUG_ON(node->scanned_block);
Daniel Vetter709ea972010-07-02 15:02:16 +0100586 node->scanned_block = 1;
Daniel Vetter709ea972010-07-02 15:02:16 +0100587
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100588 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
589 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100590
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100591 node->scanned_preceeds_hole = prev_node->hole_follows;
592 prev_node->hole_follows = 1;
593 list_del(&node->node_list);
594 node->node_list.prev = &prev_node->node_list;
Daniel Vetterae0cec22011-02-18 17:59:15 +0100595 node->node_list.next = &mm->prev_scanned_node->node_list;
596 mm->prev_scanned_node = node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100597
Chris Wilson901593f2012-12-19 16:51:06 +0000598 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
599 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100600
Daniel Vetterd935cc62010-09-16 15:13:11 +0200601 if (mm->scan_check_range) {
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100602 if (adj_start < mm->scan_start)
603 adj_start = mm->scan_start;
604 if (adj_end > mm->scan_end)
605 adj_end = mm->scan_end;
Daniel Vetterd935cc62010-09-16 15:13:11 +0200606 }
607
Chris Wilson901593f2012-12-19 16:51:06 +0000608 if (mm->color_adjust)
609 mm->color_adjust(prev_node, mm->scan_color,
610 &adj_start, &adj_end);
611
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100612 if (check_free_hole(adj_start, adj_end,
Daniel Vetter75214732010-08-26 21:44:17 +0200613 mm->scan_size, mm->scan_alignment)) {
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100614 mm->scan_hit_start = hole_start;
Chris Wilson901593f2012-12-19 16:51:06 +0000615 mm->scan_hit_end = hole_end;
Daniel Vetter709ea972010-07-02 15:02:16 +0100616 return 1;
617 }
618
619 return 0;
620}
621EXPORT_SYMBOL(drm_mm_scan_add_block);
622
623/**
624 * Remove a node from the scan list.
625 *
626 * Nodes _must_ be removed in the exact same order from the scan list as they
627 * have been added, otherwise the internal state of the memory manager will be
628 * corrupted.
629 *
630 * When the scan list is empty, the selected memory nodes can be freed. An
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300631 * immediately following drm_mm_search_free with best_match = 0 will then return
Daniel Vetter709ea972010-07-02 15:02:16 +0100632 * the just freed block (because its at the top of the free_stack list).
633 *
634 * Returns one if this block should be evicted, zero otherwise. Will always
635 * return zero when no hole has been found.
636 */
637int drm_mm_scan_remove_block(struct drm_mm_node *node)
638{
639 struct drm_mm *mm = node->mm;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100640 struct drm_mm_node *prev_node;
Daniel Vetter709ea972010-07-02 15:02:16 +0100641
642 mm->scanned_blocks--;
643
644 BUG_ON(!node->scanned_block);
645 node->scanned_block = 0;
Daniel Vetter709ea972010-07-02 15:02:16 +0100646
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100647 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
648 node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100649
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100650 prev_node->hole_follows = node->scanned_preceeds_hole;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100651 list_add(&node->node_list, &prev_node->node_list);
Daniel Vetter709ea972010-07-02 15:02:16 +0100652
Chris Wilson901593f2012-12-19 16:51:06 +0000653 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
654 node->start < mm->scan_hit_end);
Daniel Vetter709ea972010-07-02 15:02:16 +0100655}
656EXPORT_SYMBOL(drm_mm_scan_remove_block);
657
Dave Airlie55910512007-07-11 16:53:40 +1000658int drm_mm_clean(struct drm_mm * mm)
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100659{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100660 struct list_head *head = &mm->head_node.node_list;
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100661
662 return (head->next->next == head);
663}
Jerome Glisse249d6042009-04-08 17:11:16 +0200664EXPORT_SYMBOL(drm_mm_clean);
Thomas Hellstrom1d584202007-01-08 22:25:47 +1100665
Dave Airlie55910512007-07-11 16:53:40 +1000666int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000667{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100668 INIT_LIST_HEAD(&mm->hole_stack);
Jerome Glisse249d6042009-04-08 17:11:16 +0200669 INIT_LIST_HEAD(&mm->unused_nodes);
670 mm->num_unused = 0;
Daniel Vetter709ea972010-07-02 15:02:16 +0100671 mm->scanned_blocks = 0;
Jerome Glisse249d6042009-04-08 17:11:16 +0200672 spin_lock_init(&mm->unused_lock);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000673
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100674 /* Clever trick to avoid a special case in the free hole tracking. */
675 INIT_LIST_HEAD(&mm->head_node.node_list);
676 INIT_LIST_HEAD(&mm->head_node.hole_stack);
677 mm->head_node.hole_follows = 1;
678 mm->head_node.scanned_block = 0;
679 mm->head_node.scanned_prev_free = 0;
680 mm->head_node.scanned_next_free = 0;
681 mm->head_node.mm = mm;
682 mm->head_node.start = start + size;
683 mm->head_node.size = start - mm->head_node.start;
684 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
685
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100686 mm->color_adjust = NULL;
687
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100688 return 0;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000689}
Eric Anholt673a3942008-07-30 12:06:12 -0700690EXPORT_SYMBOL(drm_mm_init);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000691
Dave Airlie55910512007-07-11 16:53:40 +1000692void drm_mm_takedown(struct drm_mm * mm)
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000693{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100694 struct drm_mm_node *entry, *next;
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000695
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100696 if (!list_empty(&mm->head_node.node_list)) {
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000697 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
698 return;
699 }
700
Jerome Glisse249d6042009-04-08 17:11:16 +0200701 spin_lock(&mm->unused_lock);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100702 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
703 list_del(&entry->node_list);
Jerome Glisse249d6042009-04-08 17:11:16 +0200704 kfree(entry);
705 --mm->num_unused;
706 }
707 spin_unlock(&mm->unused_lock);
708
709 BUG_ON(mm->num_unused != 0);
Thomas Hellstrom3a1bd922006-08-07 21:30:28 +1000710}
Dave Airlief453ba02008-11-07 14:05:41 -0800711EXPORT_SYMBOL(drm_mm_takedown);
Dave Airliefa8a1232009-08-26 13:13:37 +1000712
Jerome Glisse99d7e482009-12-09 21:55:09 +0100713void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
714{
715 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100716 unsigned long total_used = 0, total_free = 0, total = 0;
717 unsigned long hole_start, hole_end, hole_size;
Jerome Glisse99d7e482009-12-09 21:55:09 +0100718
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100719 hole_start = drm_mm_hole_node_start(&mm->head_node);
720 hole_end = drm_mm_hole_node_end(&mm->head_node);
721 hole_size = hole_end - hole_start;
722 if (hole_size)
723 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
724 prefix, hole_start, hole_end,
725 hole_size);
726 total_free += hole_size;
727
728 drm_mm_for_each_node(entry, mm) {
729 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
Jerome Glisse99d7e482009-12-09 21:55:09 +0100730 prefix, entry->start, entry->start + entry->size,
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100731 entry->size);
732 total_used += entry->size;
733
734 if (entry->hole_follows) {
735 hole_start = drm_mm_hole_node_start(entry);
736 hole_end = drm_mm_hole_node_end(entry);
737 hole_size = hole_end - hole_start;
738 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
739 prefix, hole_start, hole_end,
740 hole_size);
741 total_free += hole_size;
742 }
Jerome Glisse99d7e482009-12-09 21:55:09 +0100743 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100744 total = total_free + total_used;
745
746 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
Jerome Glisse99d7e482009-12-09 21:55:09 +0100747 total_used, total_free);
748}
749EXPORT_SYMBOL(drm_mm_debug_table);
750
Dave Airliefa8a1232009-08-26 13:13:37 +1000751#if defined(CONFIG_DEBUG_FS)
Daniel Vetter3a359f02013-04-20 12:08:11 +0200752static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
753{
754 unsigned long hole_start, hole_end, hole_size;
755
756 if (entry->hole_follows) {
757 hole_start = drm_mm_hole_node_start(entry);
758 hole_end = drm_mm_hole_node_end(entry);
759 hole_size = hole_end - hole_start;
760 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
761 hole_start, hole_end, hole_size);
762 return hole_size;
763 }
764
765 return 0;
766}
767
Dave Airliefa8a1232009-08-26 13:13:37 +1000768int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
769{
770 struct drm_mm_node *entry;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100771 unsigned long total_used = 0, total_free = 0, total = 0;
Dave Airliefa8a1232009-08-26 13:13:37 +1000772
Daniel Vetter3a359f02013-04-20 12:08:11 +0200773 total_free += drm_mm_dump_hole(m, &mm->head_node);
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100774
775 drm_mm_for_each_node(entry, mm) {
776 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
777 entry->start, entry->start + entry->size,
778 entry->size);
779 total_used += entry->size;
Daniel Vetter3a359f02013-04-20 12:08:11 +0200780 total_free += drm_mm_dump_hole(m, entry);
Dave Airliefa8a1232009-08-26 13:13:37 +1000781 }
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100782 total = total_free + total_used;
783
784 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
Dave Airliefa8a1232009-08-26 13:13:37 +1000785 return 0;
786}
787EXPORT_SYMBOL(drm_mm_dump_table);
788#endif