Travis Geiselbrecht | 1d0df69 | 2008-09-01 02:26:09 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2008 Travis Geiselbrecht |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining |
| 5 | * a copy of this software and associated documentation files |
| 6 | * (the "Software"), to deal in the Software without restriction, |
| 7 | * including without limitation the rights to use, copy, modify, merge, |
| 8 | * publish, distribute, sublicense, and/or sell copies of the Software, |
| 9 | * and to permit persons to whom the Software is furnished to do so, |
| 10 | * subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be |
| 13 | * included in all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 16 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 17 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 18 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| 19 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 20 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 21 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | #include <debug.h> |
| 24 | #include <err.h> |
| 25 | #include <list.h> |
| 26 | #include <rand.h> |
| 27 | #include <lib/heap.h> |
| 28 | #include <kernel/thread.h> |
| 29 | |
| 30 | #define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1)) |
| 31 | |
| 32 | #define HEAP_MAGIC 'HEAP' |
| 33 | |
| 34 | // end of the binary |
| 35 | extern int _end; |
| 36 | |
| 37 | // end of memory |
| 38 | extern int _end_of_ram; |
| 39 | |
| 40 | struct free_heap_chunk { |
| 41 | struct list_node node; |
| 42 | size_t len; |
| 43 | }; |
| 44 | |
| 45 | struct heap { |
| 46 | void *base; |
| 47 | size_t len; |
| 48 | struct list_node free_list; |
| 49 | }; |
| 50 | |
| 51 | // heap static vars |
| 52 | static struct heap theheap; |
| 53 | |
| 54 | // structure placed at the beginning every allocation |
| 55 | struct alloc_struct_begin { |
| 56 | unsigned int magic; |
| 57 | void *ptr; |
| 58 | size_t size; |
| 59 | }; |
| 60 | |
| 61 | static void dump_free_chunk(struct free_heap_chunk *chunk) |
| 62 | { |
| 63 | dprintf("\t\tbase %p, end 0x%lx, len 0x%lx\n", chunk, (vaddr_t)chunk + chunk->len, chunk->len); |
| 64 | } |
| 65 | |
| 66 | static void heap_dump(void) |
| 67 | { |
| 68 | dprintf("Heap dump:\n"); |
| 69 | dprintf("\tbase %p, len 0x%lx\n", theheap.base, theheap.len); |
| 70 | dprintf("\tfree list:\n"); |
| 71 | |
| 72 | struct free_heap_chunk *chunk; |
| 73 | list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { |
| 74 | dump_free_chunk(chunk); |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | static void heap_test(void) |
| 79 | { |
| 80 | void *ptr[16]; |
| 81 | |
| 82 | ptr[0] = heap_alloc(8, 0); |
| 83 | ptr[1] = heap_alloc(32, 0); |
| 84 | ptr[2] = heap_alloc(7, 0); |
| 85 | ptr[3] = heap_alloc(0, 0); |
| 86 | ptr[4] = heap_alloc(98713, 0); |
| 87 | ptr[5] = heap_alloc(16, 0); |
| 88 | |
| 89 | heap_free(ptr[5]); |
| 90 | heap_free(ptr[1]); |
| 91 | heap_free(ptr[3]); |
| 92 | heap_free(ptr[0]); |
| 93 | heap_free(ptr[4]); |
| 94 | heap_free(ptr[2]); |
| 95 | |
| 96 | heap_dump(); |
| 97 | |
| 98 | int i; |
| 99 | for (i=0; i < 16; i++) |
| 100 | ptr[i] = 0; |
| 101 | |
| 102 | for (i=0; i < 32768; i++) { |
| 103 | unsigned int index = (unsigned int)rand() % 16; |
| 104 | |
| 105 | if ((i % (16*1024)) == 0) |
| 106 | dprintf("pass %d\n", i); |
| 107 | |
| 108 | // dprintf("index 0x%x\n", index); |
| 109 | if (ptr[index]) { |
| 110 | // dprintf("freeing ptr[0x%x] = %p\n", index, ptr[index]); |
| 111 | heap_free(ptr[index]); |
| 112 | ptr[index] = 0; |
| 113 | } |
| 114 | unsigned int align = 1 << ((unsigned int)rand() % 8); |
| 115 | ptr[index] = heap_alloc((unsigned int)rand() % 32768, align); |
| 116 | // dprintf("ptr[0x%x] = %p, align 0x%x\n", index, ptr[index], align); |
| 117 | |
| 118 | DEBUG_ASSERT(((addr_t)ptr[index] % align) == 0); |
| 119 | // heap_dump(); |
| 120 | } |
| 121 | |
| 122 | for (i=0; i < 16; i++) { |
| 123 | if (ptr[i]) |
| 124 | heap_free(ptr[i]); |
| 125 | } |
| 126 | |
| 127 | heap_dump(); |
| 128 | } |
| 129 | |
| 130 | // try to insert this free chunk into the free list, consuming the chunk by merging it with |
| 131 | // nearby ones if possible. Returns base of whatever chunk it became in the list. |
| 132 | static struct free_heap_chunk *heap_insert_free_chunk(struct free_heap_chunk *chunk) |
| 133 | { |
| 134 | #if DEBUG |
| 135 | vaddr_t chunk_end = (vaddr_t)chunk + chunk->len; |
| 136 | #endif |
| 137 | |
| 138 | // dprintf("%s: chunk ptr %p, size 0x%lx, chunk_end 0x%x\n", __FUNCTION__, chunk, chunk->len, chunk_end); |
| 139 | |
| 140 | struct free_heap_chunk *next_chunk; |
| 141 | struct free_heap_chunk *last_chunk; |
| 142 | |
| 143 | // walk through the list, finding the node to insert before |
| 144 | list_for_every_entry(&theheap.free_list, next_chunk, struct free_heap_chunk, node) { |
| 145 | if (chunk < next_chunk) { |
| 146 | DEBUG_ASSERT(chunk_end <= (vaddr_t)next_chunk); |
| 147 | |
| 148 | list_add_before(&next_chunk->node, &chunk->node); |
| 149 | |
| 150 | goto try_merge; |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | // walked off the end of the list, add it at the tail |
| 155 | list_add_tail(&theheap.free_list, &chunk->node); |
| 156 | |
| 157 | // try to merge with the previous chunk |
| 158 | try_merge: |
| 159 | last_chunk = list_prev_type(&theheap.free_list, &chunk->node, struct free_heap_chunk, node); |
| 160 | if (last_chunk) { |
| 161 | if ((vaddr_t)last_chunk + last_chunk->len == (vaddr_t)chunk) { |
| 162 | // easy, just extend the previous chunk |
| 163 | last_chunk->len += chunk->len; |
| 164 | |
| 165 | // remove ourself from the list |
| 166 | list_delete(&chunk->node); |
| 167 | |
| 168 | // set the chunk pointer to the newly extended chunk, in case |
| 169 | // it needs to merge with the next chunk below |
| 170 | chunk = last_chunk; |
| 171 | } |
| 172 | } |
| 173 | |
| 174 | // try to merge with the next chunk |
| 175 | if (next_chunk) { |
| 176 | if ((vaddr_t)chunk + chunk->len == (vaddr_t)next_chunk) { |
| 177 | // extend our chunk |
| 178 | chunk->len += next_chunk->len; |
| 179 | |
| 180 | // remove them from the list |
| 181 | list_delete(&next_chunk->node); |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | return chunk; |
| 186 | } |
| 187 | |
| 188 | struct free_heap_chunk *heap_create_free_chunk(void *ptr, size_t len) |
| 189 | { |
| 190 | DEBUG_ASSERT((len % sizeof(void *)) == 0); // size must be aligned on pointer boundary |
| 191 | |
| 192 | struct free_heap_chunk *chunk = (struct free_heap_chunk *)ptr; |
| 193 | chunk->len = len; |
| 194 | |
| 195 | return chunk; |
| 196 | } |
| 197 | |
| 198 | void *heap_alloc(size_t size, unsigned int alignment) |
| 199 | { |
| 200 | void *ptr; |
| 201 | |
| 202 | // dprintf("%s: size %zd, align %d\n", __PRETTY_FUNCTION__, size, alignment); |
| 203 | |
| 204 | // alignment must be power of 2 |
| 205 | if (alignment & (alignment - 1)) |
| 206 | return NULL; |
| 207 | |
| 208 | // we always put a size field + base pointer + magic in front of the allocation |
| 209 | size += sizeof(struct alloc_struct_begin); |
| 210 | |
| 211 | // make sure we allocate at least the size of a struct free_heap_chunk so that |
| 212 | // when we free it, we can create a struct free_heap_chunk struct and stick it |
| 213 | // in the spot |
| 214 | if (size < sizeof(struct free_heap_chunk)) |
| 215 | size = sizeof(struct free_heap_chunk); |
| 216 | |
| 217 | // round up size to a multiple of native pointer size |
| 218 | size = ROUNDUP(size, sizeof(void *)); |
| 219 | |
| 220 | // deal with nonzero alignments |
| 221 | if (alignment > 0) { |
| 222 | if (alignment < 16) |
| 223 | alignment = 16; |
| 224 | |
| 225 | // add alignment for worst case fit |
| 226 | size += alignment; |
| 227 | } |
| 228 | |
| 229 | // critical section |
| 230 | enter_critical_section(); |
| 231 | |
| 232 | // walk through the list |
| 233 | ptr = NULL; |
| 234 | struct free_heap_chunk *chunk; |
| 235 | list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { |
| 236 | DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size |
| 237 | |
| 238 | // is it big enough to service our allocation? |
| 239 | if (chunk->len >= size) { |
| 240 | ptr = chunk; |
| 241 | |
| 242 | // remove it from the list |
| 243 | struct list_node *next_node = list_next(&theheap.free_list, &chunk->node); |
| 244 | list_delete(&chunk->node); |
| 245 | |
| 246 | if (chunk->len > size + sizeof(struct free_heap_chunk)) { |
| 247 | // there's enough space in this chunk to create a new one after the allocation |
| 248 | struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size); |
| 249 | |
| 250 | // truncate this chunk |
| 251 | chunk->len -= chunk->len - size; |
| 252 | |
| 253 | // add the new one where chunk used to be |
| 254 | if (next_node) |
| 255 | list_add_before(next_node, &newchunk->node); |
| 256 | else |
| 257 | list_add_tail(&theheap.free_list, &newchunk->node); |
| 258 | } |
| 259 | |
| 260 | // the allocated size is actually the length of this chunk, not the size requested |
| 261 | DEBUG_ASSERT(chunk->len >= size); |
| 262 | size = chunk->len; |
| 263 | |
| 264 | ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin)); |
| 265 | |
| 266 | // align the output if requested |
| 267 | if (alignment > 0) { |
| 268 | ptr = (void *)ROUNDUP((addr_t)ptr, alignment); |
| 269 | } |
| 270 | |
| 271 | struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; |
| 272 | as--; |
| 273 | as->magic = HEAP_MAGIC; |
| 274 | as->ptr = (void *)chunk; |
| 275 | as->size = size; |
| 276 | |
| 277 | break; |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | // dprintf("%s: returning ptr %p\n", __PRETTY_FUNCTION__, ptr); |
| 282 | |
| 283 | // heap_dump(); |
| 284 | |
| 285 | exit_critical_section(); |
| 286 | |
| 287 | return ptr; |
| 288 | } |
| 289 | |
| 290 | void heap_free(void *ptr) |
| 291 | { |
| 292 | if (ptr == 0) |
| 293 | return; |
| 294 | |
| 295 | // dprintf("%s: ptr %p\n", __PRETTY_FUNCTION__, ptr); |
| 296 | |
| 297 | // check for the old allocation structure |
| 298 | struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; |
| 299 | as--; |
| 300 | |
| 301 | DEBUG_ASSERT(as->magic == HEAP_MAGIC); |
| 302 | |
| 303 | // dprintf("%s: allocation was %d bytes long at ptr %p\n", __FUNCTION__, as->size, as->ptr); |
| 304 | |
| 305 | // looks good, create a free chunk and add it to the pool |
| 306 | enter_critical_section(); |
| 307 | heap_insert_free_chunk(heap_create_free_chunk(as->ptr, as->size)); |
| 308 | exit_critical_section(); |
| 309 | |
| 310 | // heap_dump(); |
| 311 | } |
| 312 | |
| 313 | void heap_init(void) |
| 314 | { |
| 315 | dprintf("%s: entry\n", __PRETTY_FUNCTION__); |
| 316 | |
| 317 | // set the heap range |
| 318 | theheap.base = (void *)&_end; |
| 319 | theheap.len = ((unsigned) &_end_of_ram) - ((unsigned) &_end); |
| 320 | |
| 321 | dprintf("%s: heap size %ld bytes\n", __PRETTY_FUNCTION__, theheap.len); |
| 322 | |
| 323 | // initialize the free list |
| 324 | list_initialize(&theheap.free_list); |
| 325 | |
| 326 | // create an initial free chunk |
| 327 | heap_insert_free_chunk(heap_create_free_chunk(theheap.base, theheap.len)); |
| 328 | |
| 329 | // dump heap info |
| 330 | // heap_dump(); |
| 331 | |
| 332 | // dprintf("running heap tests\n"); |
| 333 | // heap_test(); |
| 334 | } |
| 335 | |
| 336 | |