| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 1 | /************************************************************************** | 
|  | 2 | * | 
|  | 3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. | 
|  | 4 | * All Rights Reserved. | 
|  | 5 | * | 
|  | 6 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|  | 7 | * copy of this software and associated documentation files (the | 
|  | 8 | * "Software"), to deal in the Software without restriction, including | 
|  | 9 | * without limitation the rights to use, copy, modify, merge, publish, | 
|  | 10 | * distribute, sub license, and/or sell copies of the Software, and to | 
|  | 11 | * permit persons to whom the Software is furnished to do so, subject to | 
|  | 12 | * the following conditions: | 
|  | 13 | * | 
|  | 14 | * The above copyright notice and this permission notice (including the | 
|  | 15 | * next paragraph) shall be included in all copies or substantial portions | 
|  | 16 | * of the Software. | 
|  | 17 | * | 
|  | 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | 
|  | 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | 
|  | 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | 
|  | 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | 
|  | 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | 
|  | 25 | * | 
|  | 26 | * | 
|  | 27 | **************************************************************************/ | 
|  | 28 |  | 
|  | 29 | /* | 
|  | 30 | * Generic simple memory manager implementation. Intended to be used as a base | 
|  | 31 | * class implementation for more advanced memory managers. | 
|  | 32 | * | 
|  | 33 | * Note that the algorithm used is quite simple and there might be substantial | 
|  | 34 | * performance gains if a smarter free list is implemented. Currently it is just an | 
|  | 35 | * unordered stack of free regions. This could easily be improved if an RB-tree | 
|  | 36 | * is used instead. At least if we expect heavy fragmentation. | 
|  | 37 | * | 
|  | 38 | * Aligned allocations can also see improvement. | 
|  | 39 | * | 
|  | 40 | * Authors: | 
| Jan Engelhardt | 96de0e2 | 2007-10-19 23:21:04 +0200 | [diff] [blame] | 41 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 42 | */ | 
|  | 43 |  | 
|  | 44 | #include "drmP.h" | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 45 | #include "drm_mm.h" | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 46 | #include <linux/slab.h> | 
| Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 47 | #include <linux/seq_file.h> | 
| Paul Gortmaker | 2d1a8a4 | 2011-08-30 18:16:33 -0400 | [diff] [blame] | 48 | #include <linux/export.h> | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 49 |  | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 50 | #define MM_UNUSED_TARGET 4 | 
|  | 51 |  | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 52 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 53 | { | 
| Dave Airlie | 5591051 | 2007-07-11 16:53:40 +1000 | [diff] [blame] | 54 | struct drm_mm_node *child; | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 55 |  | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 56 | if (atomic) | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 57 | child = kzalloc(sizeof(*child), GFP_ATOMIC); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 58 | else | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 59 | child = kzalloc(sizeof(*child), GFP_KERNEL); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 60 |  | 
|  | 61 | if (unlikely(child == NULL)) { | 
|  | 62 | spin_lock(&mm->unused_lock); | 
|  | 63 | if (list_empty(&mm->unused_nodes)) | 
|  | 64 | child = NULL; | 
|  | 65 | else { | 
|  | 66 | child = | 
|  | 67 | list_entry(mm->unused_nodes.next, | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 68 | struct drm_mm_node, node_list); | 
|  | 69 | list_del(&child->node_list); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 70 | --mm->num_unused; | 
|  | 71 | } | 
|  | 72 | spin_unlock(&mm->unused_lock); | 
|  | 73 | } | 
|  | 74 | return child; | 
|  | 75 | } | 
|  | 76 |  | 
| Jerome Glisse | a698cf3 | 2009-11-13 20:56:58 +0100 | [diff] [blame] | 77 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure | 
|  | 78 | * drm_mm:	memory manager struct we are pre-allocating for | 
|  | 79 | * | 
|  | 80 | * Returns 0 on success or -ENOMEM if allocation fails. | 
|  | 81 | */ | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 82 | int drm_mm_pre_get(struct drm_mm *mm) | 
|  | 83 | { | 
|  | 84 | struct drm_mm_node *node; | 
|  | 85 |  | 
|  | 86 | spin_lock(&mm->unused_lock); | 
|  | 87 | while (mm->num_unused < MM_UNUSED_TARGET) { | 
|  | 88 | spin_unlock(&mm->unused_lock); | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 89 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 90 | spin_lock(&mm->unused_lock); | 
|  | 91 |  | 
|  | 92 | if (unlikely(node == NULL)) { | 
|  | 93 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; | 
|  | 94 | spin_unlock(&mm->unused_lock); | 
|  | 95 | return ret; | 
|  | 96 | } | 
|  | 97 | ++mm->num_unused; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 98 | list_add_tail(&node->node_list, &mm->unused_nodes); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 99 | } | 
|  | 100 | spin_unlock(&mm->unused_lock); | 
|  | 101 | return 0; | 
|  | 102 | } | 
|  | 103 | EXPORT_SYMBOL(drm_mm_pre_get); | 
|  | 104 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 105 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 106 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 107 | return hole_node->start + hole_node->size; | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 108 | } | 
|  | 109 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 110 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 111 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 112 | struct drm_mm_node *next_node = | 
|  | 113 | list_entry(hole_node->node_list.next, struct drm_mm_node, | 
|  | 114 | node_list); | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 115 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 116 | return next_node->start; | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 117 | } | 
|  | 118 |  | 
| Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 119 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | 
|  | 120 | struct drm_mm_node *node, | 
|  | 121 | unsigned long size, unsigned alignment) | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 122 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 123 | struct drm_mm *mm = hole_node->mm; | 
|  | 124 | unsigned long tmp = 0, wasted = 0; | 
|  | 125 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 
|  | 126 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 
|  | 127 |  | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 128 | BUG_ON(!hole_node->hole_follows || node->allocated); | 
|  | 129 |  | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 130 | if (alignment) | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 131 | tmp = hole_start % alignment; | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 132 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 133 | if (!tmp) { | 
|  | 134 | hole_node->hole_follows = 0; | 
|  | 135 | list_del_init(&hole_node->hole_stack); | 
|  | 136 | } else | 
|  | 137 | wasted = alignment - tmp; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 138 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 139 | node->start = hole_start + wasted; | 
|  | 140 | node->size = size; | 
|  | 141 | node->mm = mm; | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 142 | node->allocated = 1; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 143 |  | 
|  | 144 | INIT_LIST_HEAD(&node->hole_stack); | 
|  | 145 | list_add(&node->node_list, &hole_node->node_list); | 
|  | 146 |  | 
|  | 147 | BUG_ON(node->start + node->size > hole_end); | 
|  | 148 |  | 
|  | 149 | if (node->start + node->size < hole_end) { | 
|  | 150 | list_add(&node->hole_stack, &mm->hole_stack); | 
|  | 151 | node->hole_follows = 1; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 152 | } else { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 153 | node->hole_follows = 0; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 154 | } | 
| Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 155 | } | 
|  | 156 |  | 
|  | 157 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, | 
|  | 158 | unsigned long size, | 
|  | 159 | unsigned alignment, | 
|  | 160 | int atomic) | 
|  | 161 | { | 
|  | 162 | struct drm_mm_node *node; | 
|  | 163 |  | 
| Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 164 | node = drm_mm_kmalloc(hole_node->mm, atomic); | 
|  | 165 | if (unlikely(node == NULL)) | 
|  | 166 | return NULL; | 
|  | 167 |  | 
|  | 168 | drm_mm_insert_helper(hole_node, node, size, alignment); | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 169 |  | 
| Chris Wilson | e6c03c5 | 2009-05-22 14:14:22 +0100 | [diff] [blame] | 170 | return node; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 171 | } | 
| Thomas Hellstrom | 89579f7 | 2009-06-17 12:29:56 +0200 | [diff] [blame] | 172 | EXPORT_SYMBOL(drm_mm_get_block_generic); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 173 |  | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 174 | /** | 
|  | 175 | * Search for free space and insert a preallocated memory node. Returns | 
|  | 176 | * -ENOSPC if no suitable free area is available. The preallocated memory node | 
|  | 177 | * must be cleared. | 
|  | 178 | */ | 
|  | 179 | int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, | 
|  | 180 | unsigned long size, unsigned alignment) | 
|  | 181 | { | 
|  | 182 | struct drm_mm_node *hole_node; | 
|  | 183 |  | 
|  | 184 | hole_node = drm_mm_search_free(mm, size, alignment, 0); | 
|  | 185 | if (!hole_node) | 
|  | 186 | return -ENOSPC; | 
|  | 187 |  | 
|  | 188 | drm_mm_insert_helper(hole_node, node, size, alignment); | 
|  | 189 |  | 
|  | 190 | return 0; | 
|  | 191 | } | 
|  | 192 | EXPORT_SYMBOL(drm_mm_insert_node); | 
|  | 193 |  | 
| Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 194 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | 
|  | 195 | struct drm_mm_node *node, | 
|  | 196 | unsigned long size, unsigned alignment, | 
|  | 197 | unsigned long start, unsigned long end) | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 198 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 199 | struct drm_mm *mm = hole_node->mm; | 
|  | 200 | unsigned long tmp = 0, wasted = 0; | 
|  | 201 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 
|  | 202 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 203 |  | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 204 | BUG_ON(!hole_node->hole_follows || node->allocated); | 
|  | 205 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 206 | if (hole_start < start) | 
|  | 207 | wasted += start - hole_start; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 208 | if (alignment) | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 209 | tmp = (hole_start + wasted) % alignment; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 210 |  | 
|  | 211 | if (tmp) | 
|  | 212 | wasted += alignment - tmp; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 213 |  | 
|  | 214 | if (!wasted) { | 
|  | 215 | hole_node->hole_follows = 0; | 
|  | 216 | list_del_init(&hole_node->hole_stack); | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 217 | } | 
|  | 218 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 219 | node->start = hole_start + wasted; | 
|  | 220 | node->size = size; | 
|  | 221 | node->mm = mm; | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 222 | node->allocated = 1; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 223 |  | 
|  | 224 | INIT_LIST_HEAD(&node->hole_stack); | 
|  | 225 | list_add(&node->node_list, &hole_node->node_list); | 
|  | 226 |  | 
|  | 227 | BUG_ON(node->start + node->size > hole_end); | 
|  | 228 | BUG_ON(node->start + node->size > end); | 
|  | 229 |  | 
|  | 230 | if (node->start + node->size < hole_end) { | 
|  | 231 | list_add(&node->hole_stack, &mm->hole_stack); | 
|  | 232 | node->hole_follows = 1; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 233 | } else { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 234 | node->hole_follows = 0; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 235 | } | 
| Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 236 | } | 
|  | 237 |  | 
|  | 238 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, | 
|  | 239 | unsigned long size, | 
|  | 240 | unsigned alignment, | 
|  | 241 | unsigned long start, | 
|  | 242 | unsigned long end, | 
|  | 243 | int atomic) | 
|  | 244 | { | 
|  | 245 | struct drm_mm_node *node; | 
|  | 246 |  | 
| Daniel Vetter | 9fc935d | 2011-02-18 17:59:13 +0100 | [diff] [blame] | 247 | node = drm_mm_kmalloc(hole_node->mm, atomic); | 
|  | 248 | if (unlikely(node == NULL)) | 
|  | 249 | return NULL; | 
|  | 250 |  | 
|  | 251 | drm_mm_insert_helper_range(hole_node, node, size, alignment, | 
|  | 252 | start, end); | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 253 |  | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 254 | return node; | 
|  | 255 | } | 
|  | 256 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); | 
|  | 257 |  | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 258 | /** | 
|  | 259 | * Search for free space and insert a preallocated memory node. Returns | 
|  | 260 | * -ENOSPC if no suitable free area is available. This is for range | 
|  | 261 | * restricted allocations. The preallocated memory node must be cleared. | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 262 | */ | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 263 | int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, | 
|  | 264 | unsigned long size, unsigned alignment, | 
|  | 265 | unsigned long start, unsigned long end) | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 266 | { | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 267 | struct drm_mm_node *hole_node; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 268 |  | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 269 | hole_node = drm_mm_search_free_in_range(mm, size, alignment, | 
|  | 270 | start, end, 0); | 
|  | 271 | if (!hole_node) | 
|  | 272 | return -ENOSPC; | 
|  | 273 |  | 
|  | 274 | drm_mm_insert_helper_range(hole_node, node, size, alignment, | 
|  | 275 | start, end); | 
|  | 276 |  | 
|  | 277 | return 0; | 
|  | 278 | } | 
|  | 279 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); | 
|  | 280 |  | 
|  | 281 | /** | 
|  | 282 | * Remove a memory node from the allocator. | 
|  | 283 | */ | 
|  | 284 | void drm_mm_remove_node(struct drm_mm_node *node) | 
|  | 285 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 286 | struct drm_mm *mm = node->mm; | 
|  | 287 | struct drm_mm_node *prev_node; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 288 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 289 | BUG_ON(node->scanned_block || node->scanned_prev_free | 
|  | 290 | || node->scanned_next_free); | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 291 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 292 | prev_node = | 
|  | 293 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 294 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 295 | if (node->hole_follows) { | 
|  | 296 | BUG_ON(drm_mm_hole_node_start(node) | 
|  | 297 | == drm_mm_hole_node_end(node)); | 
|  | 298 | list_del(&node->hole_stack); | 
|  | 299 | } else | 
|  | 300 | BUG_ON(drm_mm_hole_node_start(node) | 
|  | 301 | != drm_mm_hole_node_end(node)); | 
|  | 302 |  | 
|  | 303 | if (!prev_node->hole_follows) { | 
|  | 304 | prev_node->hole_follows = 1; | 
|  | 305 | list_add(&prev_node->hole_stack, &mm->hole_stack); | 
|  | 306 | } else | 
|  | 307 | list_move(&prev_node->hole_stack, &mm->hole_stack); | 
|  | 308 |  | 
|  | 309 | list_del(&node->node_list); | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 310 | node->allocated = 0; | 
|  | 311 | } | 
|  | 312 | EXPORT_SYMBOL(drm_mm_remove_node); | 
|  | 313 |  | 
|  | 314 | /* | 
|  | 315 | * Remove a memory node from the allocator and free the allocated struct | 
|  | 316 | * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the | 
|  | 317 | * drm_mm_get_block functions. | 
|  | 318 | */ | 
|  | 319 | void drm_mm_put_block(struct drm_mm_node *node) | 
|  | 320 | { | 
|  | 321 |  | 
|  | 322 | struct drm_mm *mm = node->mm; | 
|  | 323 |  | 
|  | 324 | drm_mm_remove_node(node); | 
|  | 325 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 326 | spin_lock(&mm->unused_lock); | 
|  | 327 | if (mm->num_unused < MM_UNUSED_TARGET) { | 
|  | 328 | list_add(&node->node_list, &mm->unused_nodes); | 
|  | 329 | ++mm->num_unused; | 
|  | 330 | } else | 
|  | 331 | kfree(node); | 
|  | 332 | spin_unlock(&mm->unused_lock); | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 333 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 334 | EXPORT_SYMBOL(drm_mm_put_block); | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 335 |  | 
| Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 336 | static int check_free_hole(unsigned long start, unsigned long end, | 
|  | 337 | unsigned long size, unsigned alignment) | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 338 | { | 
|  | 339 | unsigned wasted = 0; | 
|  | 340 |  | 
| Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 341 | if (end - start < size) | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 342 | return 0; | 
|  | 343 |  | 
|  | 344 | if (alignment) { | 
| Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 345 | unsigned tmp = start % alignment; | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 346 | if (tmp) | 
|  | 347 | wasted = alignment - tmp; | 
|  | 348 | } | 
|  | 349 |  | 
| Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 350 | if (end >= start + size + wasted) { | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 351 | return 1; | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | return 0; | 
|  | 355 | } | 
|  | 356 |  | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 357 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | 
|  | 358 | unsigned long size, | 
|  | 359 | unsigned alignment, int best_match) | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 360 | { | 
| Dave Airlie | 5591051 | 2007-07-11 16:53:40 +1000 | [diff] [blame] | 361 | struct drm_mm_node *entry; | 
|  | 362 | struct drm_mm_node *best; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 363 | unsigned long best_size; | 
|  | 364 |  | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 365 | BUG_ON(mm->scanned_blocks); | 
|  | 366 |  | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 367 | best = NULL; | 
|  | 368 | best_size = ~0UL; | 
|  | 369 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 370 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { | 
|  | 371 | BUG_ON(!entry->hole_follows); | 
|  | 372 | if (!check_free_hole(drm_mm_hole_node_start(entry), | 
|  | 373 | drm_mm_hole_node_end(entry), | 
| Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 374 | size, alignment)) | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 375 | continue; | 
|  | 376 |  | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 377 | if (!best_match) | 
|  | 378 | return entry; | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 379 |  | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 380 | if (entry->size < best_size) { | 
|  | 381 | best = entry; | 
|  | 382 | best_size = entry->size; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 383 | } | 
|  | 384 | } | 
|  | 385 |  | 
|  | 386 | return best; | 
|  | 387 | } | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 388 | EXPORT_SYMBOL(drm_mm_search_free); | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 389 |  | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 390 | struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | 
|  | 391 | unsigned long size, | 
|  | 392 | unsigned alignment, | 
|  | 393 | unsigned long start, | 
|  | 394 | unsigned long end, | 
|  | 395 | int best_match) | 
|  | 396 | { | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 397 | struct drm_mm_node *entry; | 
|  | 398 | struct drm_mm_node *best; | 
|  | 399 | unsigned long best_size; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 400 |  | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 401 | BUG_ON(mm->scanned_blocks); | 
|  | 402 |  | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 403 | best = NULL; | 
|  | 404 | best_size = ~0UL; | 
|  | 405 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 406 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { | 
|  | 407 | unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? | 
|  | 408 | start : drm_mm_hole_node_start(entry); | 
|  | 409 | unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? | 
|  | 410 | end : drm_mm_hole_node_end(entry); | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 411 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 412 | BUG_ON(!entry->hole_follows); | 
| Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 413 | if (!check_free_hole(adj_start, adj_end, size, alignment)) | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 414 | continue; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 415 |  | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 416 | if (!best_match) | 
|  | 417 | return entry; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 418 |  | 
| Daniel Vetter | 7a6b289 | 2010-07-02 15:02:15 +0100 | [diff] [blame] | 419 | if (entry->size < best_size) { | 
|  | 420 | best = entry; | 
|  | 421 | best_size = entry->size; | 
| Jerome Glisse | a2e68e9 | 2009-12-07 15:52:56 +0100 | [diff] [blame] | 422 | } | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | return best; | 
|  | 426 | } | 
|  | 427 | EXPORT_SYMBOL(drm_mm_search_free_in_range); | 
|  | 428 |  | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 429 | /** | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 430 | * Moves an allocation. To be used with embedded struct drm_mm_node. | 
|  | 431 | */ | 
|  | 432 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) | 
|  | 433 | { | 
|  | 434 | list_replace(&old->node_list, &new->node_list); | 
| Daniel Vetter | 2bbd449 | 2011-05-06 23:47:53 +0200 | [diff] [blame] | 435 | list_replace(&old->hole_stack, &new->hole_stack); | 
| Daniel Vetter | b0b7af1 | 2011-02-18 17:59:14 +0100 | [diff] [blame] | 436 | new->hole_follows = old->hole_follows; | 
|  | 437 | new->mm = old->mm; | 
|  | 438 | new->start = old->start; | 
|  | 439 | new->size = old->size; | 
|  | 440 |  | 
|  | 441 | old->allocated = 0; | 
|  | 442 | new->allocated = 1; | 
|  | 443 | } | 
|  | 444 | EXPORT_SYMBOL(drm_mm_replace_node); | 
|  | 445 |  | 
|  | 446 | /** | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 447 | * Initializa lru scanning. | 
|  | 448 | * | 
|  | 449 | * This simply sets up the scanning routines with the parameters for the desired | 
|  | 450 | * hole. | 
|  | 451 | * | 
|  | 452 | * Warning: As long as the scan list is non-empty, no other operations than | 
|  | 453 | * adding/removing nodes to/from the scan list are allowed. | 
|  | 454 | */ | 
|  | 455 | void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, | 
|  | 456 | unsigned alignment) | 
|  | 457 | { | 
|  | 458 | mm->scan_alignment = alignment; | 
|  | 459 | mm->scan_size = size; | 
|  | 460 | mm->scanned_blocks = 0; | 
|  | 461 | mm->scan_hit_start = 0; | 
|  | 462 | mm->scan_hit_size = 0; | 
| Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 463 | mm->scan_check_range = 0; | 
| Daniel Vetter | ae0cec2 | 2011-02-18 17:59:15 +0100 | [diff] [blame] | 464 | mm->prev_scanned_node = NULL; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 465 | } | 
|  | 466 | EXPORT_SYMBOL(drm_mm_init_scan); | 
|  | 467 |  | 
|  | 468 | /** | 
| Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 469 | * Initializa lru scanning. | 
|  | 470 | * | 
|  | 471 | * This simply sets up the scanning routines with the parameters for the desired | 
|  | 472 | * hole. This version is for range-restricted scans. | 
|  | 473 | * | 
|  | 474 | * Warning: As long as the scan list is non-empty, no other operations than | 
|  | 475 | * adding/removing nodes to/from the scan list are allowed. | 
|  | 476 | */ | 
|  | 477 | void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, | 
|  | 478 | unsigned alignment, | 
|  | 479 | unsigned long start, | 
|  | 480 | unsigned long end) | 
|  | 481 | { | 
|  | 482 | mm->scan_alignment = alignment; | 
|  | 483 | mm->scan_size = size; | 
|  | 484 | mm->scanned_blocks = 0; | 
|  | 485 | mm->scan_hit_start = 0; | 
|  | 486 | mm->scan_hit_size = 0; | 
|  | 487 | mm->scan_start = start; | 
|  | 488 | mm->scan_end = end; | 
|  | 489 | mm->scan_check_range = 1; | 
| Daniel Vetter | ae0cec2 | 2011-02-18 17:59:15 +0100 | [diff] [blame] | 490 | mm->prev_scanned_node = NULL; | 
| Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 491 | } | 
|  | 492 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); | 
|  | 493 |  | 
|  | 494 | /** | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 495 | * Add a node to the scan list that might be freed to make space for the desired | 
|  | 496 | * hole. | 
|  | 497 | * | 
|  | 498 | * Returns non-zero, if a hole has been found, zero otherwise. | 
|  | 499 | */ | 
|  | 500 | int drm_mm_scan_add_block(struct drm_mm_node *node) | 
|  | 501 | { | 
|  | 502 | struct drm_mm *mm = node->mm; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 503 | struct drm_mm_node *prev_node; | 
|  | 504 | unsigned long hole_start, hole_end; | 
| Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 505 | unsigned long adj_start; | 
|  | 506 | unsigned long adj_end; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 507 |  | 
|  | 508 | mm->scanned_blocks++; | 
|  | 509 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 510 | BUG_ON(node->scanned_block); | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 511 | node->scanned_block = 1; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 512 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 513 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, | 
|  | 514 | node_list); | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 515 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 516 | node->scanned_preceeds_hole = prev_node->hole_follows; | 
|  | 517 | prev_node->hole_follows = 1; | 
|  | 518 | list_del(&node->node_list); | 
|  | 519 | node->node_list.prev = &prev_node->node_list; | 
| Daniel Vetter | ae0cec2 | 2011-02-18 17:59:15 +0100 | [diff] [blame] | 520 | node->node_list.next = &mm->prev_scanned_node->node_list; | 
|  | 521 | mm->prev_scanned_node = node; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 522 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 523 | hole_start = drm_mm_hole_node_start(prev_node); | 
|  | 524 | hole_end = drm_mm_hole_node_end(prev_node); | 
| Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 525 | if (mm->scan_check_range) { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 526 | adj_start = hole_start < mm->scan_start ? | 
|  | 527 | mm->scan_start : hole_start; | 
|  | 528 | adj_end = hole_end > mm->scan_end ? | 
|  | 529 | mm->scan_end : hole_end; | 
| Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 530 | } else { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 531 | adj_start = hole_start; | 
|  | 532 | adj_end = hole_end; | 
| Daniel Vetter | d935cc6 | 2010-09-16 15:13:11 +0200 | [diff] [blame] | 533 | } | 
|  | 534 |  | 
|  | 535 | if (check_free_hole(adj_start , adj_end, | 
| Daniel Vetter | 7521473 | 2010-08-26 21:44:17 +0200 | [diff] [blame] | 536 | mm->scan_size, mm->scan_alignment)) { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 537 | mm->scan_hit_start = hole_start; | 
|  | 538 | mm->scan_hit_size = hole_end; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 539 |  | 
|  | 540 | return 1; | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | return 0; | 
|  | 544 | } | 
|  | 545 | EXPORT_SYMBOL(drm_mm_scan_add_block); | 
|  | 546 |  | 
|  | 547 | /** | 
|  | 548 | * Remove a node from the scan list. | 
|  | 549 | * | 
|  | 550 | * Nodes _must_ be removed in the exact same order from the scan list as they | 
|  | 551 | * have been added, otherwise the internal state of the memory manager will be | 
|  | 552 | * corrupted. | 
|  | 553 | * | 
|  | 554 | * When the scan list is empty, the selected memory nodes can be freed. An | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 555 | * immediately following drm_mm_search_free with best_match = 0 will then return | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 556 | * the just freed block (because its at the top of the free_stack list). | 
|  | 557 | * | 
|  | 558 | * Returns one if this block should be evicted, zero otherwise. Will always | 
|  | 559 | * return zero when no hole has been found. | 
|  | 560 | */ | 
|  | 561 | int drm_mm_scan_remove_block(struct drm_mm_node *node) | 
|  | 562 | { | 
|  | 563 | struct drm_mm *mm = node->mm; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 564 | struct drm_mm_node *prev_node; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 565 |  | 
|  | 566 | mm->scanned_blocks--; | 
|  | 567 |  | 
|  | 568 | BUG_ON(!node->scanned_block); | 
|  | 569 | node->scanned_block = 0; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 570 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 571 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, | 
|  | 572 | node_list); | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 573 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 574 | prev_node->hole_follows = node->scanned_preceeds_hole; | 
|  | 575 | INIT_LIST_HEAD(&node->node_list); | 
|  | 576 | list_add(&node->node_list, &prev_node->node_list); | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 577 |  | 
|  | 578 | /* Only need to check for containement because start&size for the | 
|  | 579 | * complete resulting free block (not just the desired part) is | 
|  | 580 | * stored. */ | 
|  | 581 | if (node->start >= mm->scan_hit_start && | 
|  | 582 | node->start + node->size | 
|  | 583 | <= mm->scan_hit_start + mm->scan_hit_size) { | 
|  | 584 | return 1; | 
|  | 585 | } | 
|  | 586 |  | 
|  | 587 | return 0; | 
|  | 588 | } | 
|  | 589 | EXPORT_SYMBOL(drm_mm_scan_remove_block); | 
|  | 590 |  | 
| Dave Airlie | 5591051 | 2007-07-11 16:53:40 +1000 | [diff] [blame] | 591 | int drm_mm_clean(struct drm_mm * mm) | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 592 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 593 | struct list_head *head = &mm->head_node.node_list; | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 594 |  | 
|  | 595 | return (head->next->next == head); | 
|  | 596 | } | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 597 | EXPORT_SYMBOL(drm_mm_clean); | 
| Thomas Hellstrom | 1d58420 | 2007-01-08 22:25:47 +1100 | [diff] [blame] | 598 |  | 
| Dave Airlie | 5591051 | 2007-07-11 16:53:40 +1000 | [diff] [blame] | 599 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 600 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 601 | INIT_LIST_HEAD(&mm->hole_stack); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 602 | INIT_LIST_HEAD(&mm->unused_nodes); | 
|  | 603 | mm->num_unused = 0; | 
| Daniel Vetter | 709ea97 | 2010-07-02 15:02:16 +0100 | [diff] [blame] | 604 | mm->scanned_blocks = 0; | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 605 | spin_lock_init(&mm->unused_lock); | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 606 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 607 | /* Clever trick to avoid a special case in the free hole tracking. */ | 
|  | 608 | INIT_LIST_HEAD(&mm->head_node.node_list); | 
|  | 609 | INIT_LIST_HEAD(&mm->head_node.hole_stack); | 
|  | 610 | mm->head_node.hole_follows = 1; | 
|  | 611 | mm->head_node.scanned_block = 0; | 
|  | 612 | mm->head_node.scanned_prev_free = 0; | 
|  | 613 | mm->head_node.scanned_next_free = 0; | 
|  | 614 | mm->head_node.mm = mm; | 
|  | 615 | mm->head_node.start = start + size; | 
|  | 616 | mm->head_node.size = start - mm->head_node.start; | 
|  | 617 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); | 
|  | 618 |  | 
|  | 619 | return 0; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 620 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 621 | EXPORT_SYMBOL(drm_mm_init); | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 622 |  | 
| Dave Airlie | 5591051 | 2007-07-11 16:53:40 +1000 | [diff] [blame] | 623 | void drm_mm_takedown(struct drm_mm * mm) | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 624 | { | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 625 | struct drm_mm_node *entry, *next; | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 626 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 627 | if (!list_empty(&mm->head_node.node_list)) { | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 628 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); | 
|  | 629 | return; | 
|  | 630 | } | 
|  | 631 |  | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 632 | spin_lock(&mm->unused_lock); | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 633 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { | 
|  | 634 | list_del(&entry->node_list); | 
| Jerome Glisse | 249d604 | 2009-04-08 17:11:16 +0200 | [diff] [blame] | 635 | kfree(entry); | 
|  | 636 | --mm->num_unused; | 
|  | 637 | } | 
|  | 638 | spin_unlock(&mm->unused_lock); | 
|  | 639 |  | 
|  | 640 | BUG_ON(mm->num_unused != 0); | 
| Thomas Hellstrom | 3a1bd92 | 2006-08-07 21:30:28 +1000 | [diff] [blame] | 641 | } | 
| Dave Airlie | f453ba0 | 2008-11-07 14:05:41 -0800 | [diff] [blame] | 642 | EXPORT_SYMBOL(drm_mm_takedown); | 
| Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 643 |  | 
| Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 644 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | 
|  | 645 | { | 
|  | 646 | struct drm_mm_node *entry; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 647 | unsigned long total_used = 0, total_free = 0, total = 0; | 
|  | 648 | unsigned long hole_start, hole_end, hole_size; | 
| Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 649 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 650 | hole_start = drm_mm_hole_node_start(&mm->head_node); | 
|  | 651 | hole_end = drm_mm_hole_node_end(&mm->head_node); | 
|  | 652 | hole_size = hole_end - hole_start; | 
|  | 653 | if (hole_size) | 
|  | 654 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | 
|  | 655 | prefix, hole_start, hole_end, | 
|  | 656 | hole_size); | 
|  | 657 | total_free += hole_size; | 
|  | 658 |  | 
|  | 659 | drm_mm_for_each_node(entry, mm) { | 
|  | 660 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", | 
| Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 661 | prefix, entry->start, entry->start + entry->size, | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 662 | entry->size); | 
|  | 663 | total_used += entry->size; | 
|  | 664 |  | 
|  | 665 | if (entry->hole_follows) { | 
|  | 666 | hole_start = drm_mm_hole_node_start(entry); | 
|  | 667 | hole_end = drm_mm_hole_node_end(entry); | 
|  | 668 | hole_size = hole_end - hole_start; | 
|  | 669 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | 
|  | 670 | prefix, hole_start, hole_end, | 
|  | 671 | hole_size); | 
|  | 672 | total_free += hole_size; | 
|  | 673 | } | 
| Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 674 | } | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 675 | total = total_free + total_used; | 
|  | 676 |  | 
|  | 677 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, | 
| Jerome Glisse | 99d7e48 | 2009-12-09 21:55:09 +0100 | [diff] [blame] | 678 | total_used, total_free); | 
|  | 679 | } | 
|  | 680 | EXPORT_SYMBOL(drm_mm_debug_table); | 
|  | 681 |  | 
| Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 682 | #if defined(CONFIG_DEBUG_FS) | 
|  | 683 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 
|  | 684 | { | 
|  | 685 | struct drm_mm_node *entry; | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 686 | unsigned long total_used = 0, total_free = 0, total = 0; | 
|  | 687 | unsigned long hole_start, hole_end, hole_size; | 
| Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 688 |  | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 689 | hole_start = drm_mm_hole_node_start(&mm->head_node); | 
|  | 690 | hole_end = drm_mm_hole_node_end(&mm->head_node); | 
|  | 691 | hole_size = hole_end - hole_start; | 
|  | 692 | if (hole_size) | 
|  | 693 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | 
|  | 694 | hole_start, hole_end, hole_size); | 
|  | 695 | total_free += hole_size; | 
|  | 696 |  | 
|  | 697 | drm_mm_for_each_node(entry, mm) { | 
|  | 698 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | 
|  | 699 | entry->start, entry->start + entry->size, | 
|  | 700 | entry->size); | 
|  | 701 | total_used += entry->size; | 
|  | 702 | if (entry->hole_follows) { | 
| Daniel Vetter | 2bbd449 | 2011-05-06 23:47:53 +0200 | [diff] [blame] | 703 | hole_start = drm_mm_hole_node_start(entry); | 
|  | 704 | hole_end = drm_mm_hole_node_end(entry); | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 705 | hole_size = hole_end - hole_start; | 
|  | 706 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | 
|  | 707 | hole_start, hole_end, hole_size); | 
|  | 708 | total_free += hole_size; | 
|  | 709 | } | 
| Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 710 | } | 
| Daniel Vetter | ea7b1dd | 2011-02-18 17:59:12 +0100 | [diff] [blame] | 711 | total = total_free + total_used; | 
|  | 712 |  | 
|  | 713 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); | 
| Dave Airlie | fa8a123 | 2009-08-26 13:13:37 +1000 | [diff] [blame] | 714 | return 0; | 
|  | 715 | } | 
|  | 716 | EXPORT_SYMBOL(drm_mm_dump_table); | 
|  | 717 | #endif |