blob: a925a653e5d5571e9fff8686b7378efe3a343704 [file] [log] [blame]
Jens Axboed24c33a2008-03-01 18:27:36 +01001/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <limits.h>
Greg Edwards3a8600b2010-06-25 09:24:19 -060013#include <fcntl.h>
Jens Axboed24c33a2008-03-01 18:27:36 +010014
Jens Axboe6548f472008-06-13 08:38:31 +020015#include "mutex.h"
Jens Axboeb3268b92008-06-02 09:59:32 +020016#include "arch/arch.h"
Greg Edwards3a8600b2010-06-25 09:24:19 -060017#include "os/os.h"
Jens Axboed24c33a2008-03-01 18:27:36 +010018
Jens Axboe55f64912008-05-26 09:37:21 +020019#define SMALLOC_REDZONE /* define to detect memory corruption */
Jens Axboed24c33a2008-03-01 18:27:36 +010020
Jens Axboeec996e92008-05-27 11:39:39 +020021#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
22#define SMALLOC_BPI (sizeof(unsigned int) * 8)
23#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
24
Jens Axboe5e012982009-07-31 11:23:19 +020025#define INITIAL_SIZE 8192*1024 /* new pool size */
Jens Axboe68857682008-07-31 19:50:54 +020026#define MAX_POOLS 128 /* maximum number of pools to setup */
Jens Axboed24c33a2008-03-01 18:27:36 +010027
Jens Axboe55f64912008-05-26 09:37:21 +020028#define SMALLOC_PRE_RED 0xdeadbeefU
29#define SMALLOC_POST_RED 0x5aa55aa5U
Jens Axboe55f64912008-05-26 09:37:21 +020030
Jens Axboe2b386d22008-03-26 10:32:57 +010031unsigned int smalloc_pool_size = INITIAL_SIZE;
Jens Axboe122426d2009-07-07 23:03:06 +020032const int int_mask = sizeof(int) - 1;
Jens Axboe2b386d22008-03-26 10:32:57 +010033
Jens Axboed24c33a2008-03-01 18:27:36 +010034struct pool {
Jens Axboe6548f472008-06-13 08:38:31 +020035 struct fio_mutex *lock; /* protects this pool */
Jens Axboed24c33a2008-03-01 18:27:36 +010036 void *map; /* map of blocks */
Jens Axboeec996e92008-05-27 11:39:39 +020037 unsigned int *bitmap; /* blocks free/busy map */
38 unsigned int free_blocks; /* free blocks */
39 unsigned int nr_blocks; /* total blocks */
40 unsigned int next_non_full;
Jens Axboeec996e92008-05-27 11:39:39 +020041 unsigned int mmap_size;
42};
43
44struct block_hdr {
45 unsigned int size;
46#ifdef SMALLOC_REDZONE
47 unsigned int prered;
48#endif
Jens Axboed24c33a2008-03-01 18:27:36 +010049};
50
51static struct pool mp[MAX_POOLS];
52static unsigned int nr_pools;
53static unsigned int last_pool;
Jens Axboe6548f472008-06-13 08:38:31 +020054static struct fio_mutex *lock;
Jens Axboed24c33a2008-03-01 18:27:36 +010055
Jens Axboed24c33a2008-03-01 18:27:36 +010056static inline void pool_lock(struct pool *pool)
57{
Jens Axboe2e3e31e2009-01-06 11:30:08 +010058 fio_mutex_down(pool->lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010059}
60
61static inline void pool_unlock(struct pool *pool)
62{
Jens Axboe2e3e31e2009-01-06 11:30:08 +010063 fio_mutex_up(pool->lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010064}
65
Jens Axboe65864cf2008-03-03 10:36:36 +010066static inline void global_read_lock(void)
Jens Axboed24c33a2008-03-01 18:27:36 +010067{
Jens Axboe2e3e31e2009-01-06 11:30:08 +010068 fio_mutex_down_read(lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010069}
70
Jens Axboe65864cf2008-03-03 10:36:36 +010071static inline void global_read_unlock(void)
Jens Axboed24c33a2008-03-01 18:27:36 +010072{
Jens Axboe2e3e31e2009-01-06 11:30:08 +010073 fio_mutex_up_read(lock);
Jens Axboe65864cf2008-03-03 10:36:36 +010074}
75
76static inline void global_write_lock(void)
77{
Jens Axboe2e3e31e2009-01-06 11:30:08 +010078 fio_mutex_down_write(lock);
Jens Axboe65864cf2008-03-03 10:36:36 +010079}
80
81static inline void global_write_unlock(void)
82{
Jens Axboe2e3e31e2009-01-06 11:30:08 +010083 fio_mutex_up_write(lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010084}
85
Jens Axboed24c33a2008-03-01 18:27:36 +010086static inline int ptr_valid(struct pool *pool, void *ptr)
87{
Jens Axboedcb69092008-05-27 20:35:18 +020088 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
Jens Axboeec996e92008-05-27 11:39:39 +020089
90 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
Jens Axboed24c33a2008-03-01 18:27:36 +010091}
92
Jens Axboe808e9ea2008-05-27 14:12:45 +020093static inline unsigned int size_to_blocks(unsigned int size)
94{
95 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
96}
97
Jens Axboedcb69092008-05-27 20:35:18 +020098static int blocks_iter(struct pool *pool, unsigned int pool_idx,
99 unsigned int idx, unsigned int nr_blocks,
Jens Axboeec996e92008-05-27 11:39:39 +0200100 int (*func)(unsigned int *map, unsigned int mask))
Jens Axboed24c33a2008-03-01 18:27:36 +0100101{
Jens Axboedcb69092008-05-27 20:35:18 +0200102
Jens Axboeec996e92008-05-27 11:39:39 +0200103 while (nr_blocks) {
104 unsigned int this_blocks, mask;
Jens Axboedcb69092008-05-27 20:35:18 +0200105 unsigned int *map;
106
107 if (pool_idx >= pool->nr_blocks)
108 return 0;
109
110 map = &pool->bitmap[pool_idx];
Jens Axboed24c33a2008-03-01 18:27:36 +0100111
Jens Axboeec996e92008-05-27 11:39:39 +0200112 this_blocks = nr_blocks;
113 if (this_blocks + idx > SMALLOC_BPI) {
114 this_blocks = SMALLOC_BPI - idx;
115 idx = SMALLOC_BPI - this_blocks;
Jens Axboed24c33a2008-03-01 18:27:36 +0100116 }
Jens Axboeec996e92008-05-27 11:39:39 +0200117
118 if (this_blocks == SMALLOC_BPI)
119 mask = -1U;
120 else
121 mask = ((1U << this_blocks) - 1) << idx;
122
123 if (!func(map, mask))
124 return 0;
125
126 nr_blocks -= this_blocks;
127 idx = 0;
Jens Axboedcb69092008-05-27 20:35:18 +0200128 pool_idx++;
Jens Axboed24c33a2008-03-01 18:27:36 +0100129 }
130
Jens Axboeec996e92008-05-27 11:39:39 +0200131 return 1;
Jens Axboeec996e92008-05-27 11:39:39 +0200132}
133
134static int mask_cmp(unsigned int *map, unsigned int mask)
135{
136 return !(*map & mask);
137}
138
139static int mask_clear(unsigned int *map, unsigned int mask)
140{
Jens Axboedcb69092008-05-27 20:35:18 +0200141 assert((*map & mask) == mask);
Jens Axboeec996e92008-05-27 11:39:39 +0200142 *map &= ~mask;
143 return 1;
144}
145
146static int mask_set(unsigned int *map, unsigned int mask)
147{
Jens Axboedcb69092008-05-27 20:35:18 +0200148 assert(!(*map & mask));
Jens Axboeec996e92008-05-27 11:39:39 +0200149 *map |= mask;
150 return 1;
151}
152
Jens Axboedcb69092008-05-27 20:35:18 +0200153static int blocks_free(struct pool *pool, unsigned int pool_idx,
154 unsigned int idx, unsigned int nr_blocks)
Jens Axboeec996e92008-05-27 11:39:39 +0200155{
Jens Axboedcb69092008-05-27 20:35:18 +0200156 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
Jens Axboeec996e92008-05-27 11:39:39 +0200157}
158
Jens Axboedcb69092008-05-27 20:35:18 +0200159static void set_blocks(struct pool *pool, unsigned int pool_idx,
160 unsigned int idx, unsigned int nr_blocks)
Jens Axboeec996e92008-05-27 11:39:39 +0200161{
Jens Axboedcb69092008-05-27 20:35:18 +0200162 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
Jens Axboeec996e92008-05-27 11:39:39 +0200163}
164
Jens Axboedcb69092008-05-27 20:35:18 +0200165static void clear_blocks(struct pool *pool, unsigned int pool_idx,
166 unsigned int idx, unsigned int nr_blocks)
Jens Axboeec996e92008-05-27 11:39:39 +0200167{
Jens Axboedcb69092008-05-27 20:35:18 +0200168 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
Jens Axboeec996e92008-05-27 11:39:39 +0200169}
170
Jens Axboeec996e92008-05-27 11:39:39 +0200171static int find_next_zero(int word, int start)
172{
173 assert(word != -1U);
174 word >>= (start + 1);
Jens Axboeb3268b92008-06-02 09:59:32 +0200175 return ffz(word) + start + 1;
Jens Axboed24c33a2008-03-01 18:27:36 +0100176}
177
Jens Axboeadf57092008-04-16 19:43:17 +0200178static int add_pool(struct pool *pool, unsigned int alloc_size)
Jens Axboed24c33a2008-03-01 18:27:36 +0100179{
Jens Axboe8d5844e2011-06-29 09:50:08 +0200180 int bitmap_blocks;
Jens Axboeb8a65822008-12-04 14:33:41 +0100181 void *ptr;
Jens Axboeec996e92008-05-27 11:39:39 +0200182
Jens Axboe55f64912008-05-26 09:37:21 +0200183#ifdef SMALLOC_REDZONE
Jens Axboeec996e92008-05-27 11:39:39 +0200184 alloc_size += sizeof(unsigned int);
Jens Axboe55f64912008-05-26 09:37:21 +0200185#endif
Jens Axboeec996e92008-05-27 11:39:39 +0200186 alloc_size += sizeof(struct block_hdr);
187 if (alloc_size < INITIAL_SIZE)
188 alloc_size = INITIAL_SIZE;
Jens Axboeadf57092008-04-16 19:43:17 +0200189
Jens Axboeec996e92008-05-27 11:39:39 +0200190 /* round up to nearest full number of blocks */
191 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
192 bitmap_blocks = alloc_size / SMALLOC_BPL;
193 alloc_size += bitmap_blocks * sizeof(unsigned int);
194 pool->mmap_size = alloc_size;
Jens Axboe0b9d69e2009-09-11 22:29:54 +0200195
Jens Axboeec996e92008-05-27 11:39:39 +0200196 pool->nr_blocks = bitmap_blocks;
197 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
198
Jens Axboe8d5844e2011-06-29 09:50:08 +0200199 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE,
200 MAP_SHARED | OS_MAP_ANON, -1, 0);
Jens Axboed24c33a2008-03-01 18:27:36 +0100201 if (ptr == MAP_FAILED)
Jens Axboe8d5844e2011-06-29 09:50:08 +0200202 goto out_fail;
Jens Axboed24c33a2008-03-01 18:27:36 +0100203
Jens Axboeec996e92008-05-27 11:39:39 +0200204 memset(ptr, 0, alloc_size);
205 pool->map = ptr;
206 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
Jens Axboed24c33a2008-03-01 18:27:36 +0100207
Jens Axboe6548f472008-06-13 08:38:31 +0200208 pool->lock = fio_mutex_init(1);
Jens Axboed24c33a2008-03-01 18:27:36 +0100209 if (!pool->lock)
Jens Axboe8d5844e2011-06-29 09:50:08 +0200210 goto out_fail;
Jens Axboed24c33a2008-03-01 18:27:36 +0100211
Jens Axboed24c33a2008-03-01 18:27:36 +0100212 nr_pools++;
213 return 0;
Jens Axboe8d5844e2011-06-29 09:50:08 +0200214out_fail:
Jens Axboeec996e92008-05-27 11:39:39 +0200215 fprintf(stderr, "smalloc: failed adding pool\n");
Jens Axboed24c33a2008-03-01 18:27:36 +0100216 if (pool->map)
Jens Axboeec996e92008-05-27 11:39:39 +0200217 munmap(pool->map, pool->mmap_size);
Jens Axboed24c33a2008-03-01 18:27:36 +0100218 return 1;
219}
220
221void sinit(void)
222{
Jens Axboe4d4e80f2008-03-04 10:18:56 +0100223 int ret;
Jens Axboed24c33a2008-03-01 18:27:36 +0100224
Jens Axboe6548f472008-06-13 08:38:31 +0200225 lock = fio_mutex_rw_init();
Jens Axboeadf57092008-04-16 19:43:17 +0200226 ret = add_pool(&mp[0], INITIAL_SIZE);
Jens Axboed24c33a2008-03-01 18:27:36 +0100227 assert(!ret);
228}
229
230static void cleanup_pool(struct pool *pool)
231{
Jens Axboe443bb112008-12-04 14:30:13 +0100232 /*
233 * This will also remove the temporary file we used as a backing
234 * store, it was already unlinked
235 */
Jens Axboeec996e92008-05-27 11:39:39 +0200236 munmap(pool->map, pool->mmap_size);
Jens Axboe6548f472008-06-13 08:38:31 +0200237
238 if (pool->lock)
239 fio_mutex_remove(pool->lock);
Jens Axboed24c33a2008-03-01 18:27:36 +0100240}
241
242void scleanup(void)
243{
244 unsigned int i;
245
246 for (i = 0; i < nr_pools; i++)
247 cleanup_pool(&mp[i]);
248
Jens Axboe6548f472008-06-13 08:38:31 +0200249 if (lock)
250 fio_mutex_remove(lock);
Jens Axboed24c33a2008-03-01 18:27:36 +0100251}
252
Jens Axboe89da54e2008-05-27 20:49:29 +0200253#ifdef SMALLOC_REDZONE
Jens Axboecf987082009-07-07 17:43:11 +0200254static void *postred_ptr(struct block_hdr *hdr)
255{
Jens Axboecf987082009-07-07 17:43:11 +0200256 unsigned long ptr;
257
258 ptr = (unsigned long) hdr + hdr->size - sizeof(unsigned int);
259 ptr = (ptr + int_mask) & ~int_mask;
260
261 return (void *) ptr;
262}
263
Jens Axboeec996e92008-05-27 11:39:39 +0200264static void fill_redzone(struct block_hdr *hdr)
Jens Axboe55f64912008-05-26 09:37:21 +0200265{
Jens Axboecf987082009-07-07 17:43:11 +0200266 unsigned int *postred = postred_ptr(hdr);
Jens Axboe55f64912008-05-26 09:37:21 +0200267
Jens Axboeec996e92008-05-27 11:39:39 +0200268 hdr->prered = SMALLOC_PRE_RED;
269 *postred = SMALLOC_POST_RED;
Jens Axboeec996e92008-05-27 11:39:39 +0200270}
Jens Axboe55f64912008-05-26 09:37:21 +0200271
Jens Axboeec996e92008-05-27 11:39:39 +0200272static void sfree_check_redzone(struct block_hdr *hdr)
273{
Jens Axboecf987082009-07-07 17:43:11 +0200274 unsigned int *postred = postred_ptr(hdr);
Jens Axboeec996e92008-05-27 11:39:39 +0200275
276 if (hdr->prered != SMALLOC_PRE_RED) {
Jens Axboe55f64912008-05-26 09:37:21 +0200277 fprintf(stderr, "smalloc pre redzone destroyed!\n");
278 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
Jens Axboeec996e92008-05-27 11:39:39 +0200279 hdr, hdr->prered, SMALLOC_PRE_RED);
Jens Axboe55f64912008-05-26 09:37:21 +0200280 assert(0);
281 }
282 if (*postred != SMALLOC_POST_RED) {
283 fprintf(stderr, "smalloc post redzone destroyed!\n");
284 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
Jens Axboeec996e92008-05-27 11:39:39 +0200285 hdr, *postred, SMALLOC_POST_RED);
Jens Axboe55f64912008-05-26 09:37:21 +0200286 assert(0);
287 }
Jens Axboe55f64912008-05-26 09:37:21 +0200288}
Jens Axboe89da54e2008-05-27 20:49:29 +0200289#else
290static void fill_redzone(struct block_hdr *hdr)
291{
292}
293
294static void sfree_check_redzone(struct block_hdr *hdr)
295{
296}
297#endif
Jens Axboe55f64912008-05-26 09:37:21 +0200298
Jens Axboed24c33a2008-03-01 18:27:36 +0100299static void sfree_pool(struct pool *pool, void *ptr)
300{
Jens Axboeec996e92008-05-27 11:39:39 +0200301 struct block_hdr *hdr;
Jens Axboe179446e2008-05-27 14:11:56 +0200302 unsigned int i, idx;
Jens Axboeec996e92008-05-27 11:39:39 +0200303 unsigned long offset;
Jens Axboed24c33a2008-03-01 18:27:36 +0100304
305 if (!ptr)
306 return;
307
Jens Axboeec996e92008-05-27 11:39:39 +0200308 ptr -= sizeof(*hdr);
309 hdr = ptr;
Jens Axboe55f64912008-05-26 09:37:21 +0200310
Jens Axboed24c33a2008-03-01 18:27:36 +0100311 assert(ptr_valid(pool, ptr));
312
Jens Axboeec996e92008-05-27 11:39:39 +0200313 sfree_check_redzone(hdr);
314
315 offset = ptr - pool->map;
316 i = offset / SMALLOC_BPL;
317 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
318
Jens Axboed24c33a2008-03-01 18:27:36 +0100319 pool_lock(pool);
Jens Axboedcb69092008-05-27 20:35:18 +0200320 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
Jens Axboeec996e92008-05-27 11:39:39 +0200321 if (i < pool->next_non_full)
322 pool->next_non_full = i;
Jens Axboe179446e2008-05-27 14:11:56 +0200323 pool->free_blocks += size_to_blocks(hdr->size);
Jens Axboed24c33a2008-03-01 18:27:36 +0100324 pool_unlock(pool);
325}
326
327void sfree(void *ptr)
328{
329 struct pool *pool = NULL;
330 unsigned int i;
331
Jens Axboe8e5732e2008-04-16 19:51:46 +0200332 if (!ptr)
333 return;
334
Jens Axboe65864cf2008-03-03 10:36:36 +0100335 global_read_lock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100336
337 for (i = 0; i < nr_pools; i++) {
338 if (ptr_valid(&mp[i], ptr)) {
339 pool = &mp[i];
340 break;
341 }
342 }
343
Jens Axboe65864cf2008-03-03 10:36:36 +0100344 global_read_unlock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100345
346 assert(pool);
347 sfree_pool(pool, ptr);
348}
349
Jens Axboe55f64912008-05-26 09:37:21 +0200350static void *__smalloc_pool(struct pool *pool, unsigned int size)
Jens Axboed24c33a2008-03-01 18:27:36 +0100351{
Jens Axboeec996e92008-05-27 11:39:39 +0200352 unsigned int nr_blocks;
353 unsigned int i;
354 unsigned int offset;
355 unsigned int last_idx;
356 void *ret = NULL;
Jens Axboed24c33a2008-03-01 18:27:36 +0100357
Jens Axboed24c33a2008-03-01 18:27:36 +0100358 pool_lock(pool);
Jens Axboe179446e2008-05-27 14:11:56 +0200359
360 nr_blocks = size_to_blocks(size);
Jens Axboeec996e92008-05-27 11:39:39 +0200361 if (nr_blocks > pool->free_blocks)
Jens Axboed24c33a2008-03-01 18:27:36 +0100362 goto fail;
363
Jens Axboeec996e92008-05-27 11:39:39 +0200364 i = pool->next_non_full;
365 last_idx = 0;
366 offset = -1U;
367 while (i < pool->nr_blocks) {
368 unsigned int idx;
Jens Axboed24c33a2008-03-01 18:27:36 +0100369
Jens Axboeec996e92008-05-27 11:39:39 +0200370 if (pool->bitmap[i] == -1U) {
371 i++;
372 pool->next_non_full = i;
373 last_idx = 0;
374 continue;
Jens Axboed24c33a2008-03-01 18:27:36 +0100375 }
Jens Axboeec996e92008-05-27 11:39:39 +0200376
377 idx = find_next_zero(pool->bitmap[i], last_idx);
Jens Axboedcb69092008-05-27 20:35:18 +0200378 if (!blocks_free(pool, i, idx, nr_blocks)) {
Jens Axboeec996e92008-05-27 11:39:39 +0200379 idx += nr_blocks;
380 if (idx < SMALLOC_BPI)
381 last_idx = idx;
382 else {
383 last_idx = 0;
384 while (idx >= SMALLOC_BPI) {
385 i++;
386 idx -= SMALLOC_BPI;
387 }
388 }
389 continue;
390 }
Jens Axboedcb69092008-05-27 20:35:18 +0200391 set_blocks(pool, i, idx, nr_blocks);
Jens Axboeec996e92008-05-27 11:39:39 +0200392 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
393 break;
Jens Axboed24c33a2008-03-01 18:27:36 +0100394 }
Jens Axboeec996e92008-05-27 11:39:39 +0200395
396 if (i < pool->nr_blocks) {
397 pool->free_blocks -= nr_blocks;
398 ret = pool->map + offset;
399 }
400fail:
Jens Axboed24c33a2008-03-01 18:27:36 +0100401 pool_unlock(pool);
Jens Axboeec996e92008-05-27 11:39:39 +0200402 return ret;
Jens Axboed24c33a2008-03-01 18:27:36 +0100403}
404
Jens Axboe55f64912008-05-26 09:37:21 +0200405static void *smalloc_pool(struct pool *pool, unsigned int size)
406{
Jens Axboe89da54e2008-05-27 20:49:29 +0200407 unsigned int alloc_size = size + sizeof(struct block_hdr);
Jens Axboe55f64912008-05-26 09:37:21 +0200408 void *ptr;
409
Jens Axboecf987082009-07-07 17:43:11 +0200410 /*
Jens Axboe122426d2009-07-07 23:03:06 +0200411 * Round to int alignment, so that the postred pointer will
412 * be naturally aligned as well.
Jens Axboecf987082009-07-07 17:43:11 +0200413 */
Jens Axboeec996e92008-05-27 11:39:39 +0200414#ifdef SMALLOC_REDZONE
Jens Axboe122426d2009-07-07 23:03:06 +0200415 alloc_size += sizeof(unsigned int);
416 alloc_size = (alloc_size + int_mask) & ~int_mask;
Jens Axboe55f64912008-05-26 09:37:21 +0200417#endif
Jens Axboeec996e92008-05-27 11:39:39 +0200418
419 ptr = __smalloc_pool(pool, alloc_size);
Jens Axboe89da54e2008-05-27 20:49:29 +0200420 if (ptr) {
421 struct block_hdr *hdr = ptr;
Jens Axboeec996e92008-05-27 11:39:39 +0200422
Jens Axboe89da54e2008-05-27 20:49:29 +0200423 hdr->size = alloc_size;
424 fill_redzone(hdr);
Jens Axboeec996e92008-05-27 11:39:39 +0200425
Jens Axboe89da54e2008-05-27 20:49:29 +0200426 ptr += sizeof(*hdr);
427 memset(ptr, 0, size);
428 }
Jens Axboeec996e92008-05-27 11:39:39 +0200429
Jens Axboeec996e92008-05-27 11:39:39 +0200430 return ptr;
Jens Axboe55f64912008-05-26 09:37:21 +0200431}
432
Jens Axboed24c33a2008-03-01 18:27:36 +0100433void *smalloc(unsigned int size)
434{
435 unsigned int i;
436
Shaozhi Shawn Yed1271dc2008-09-10 09:02:51 +0200437 global_write_lock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100438 i = last_pool;
439
440 do {
441 for (; i < nr_pools; i++) {
442 void *ptr = smalloc_pool(&mp[i], size);
443
444 if (ptr) {
445 last_pool = i;
Shaozhi Shawn Yed1271dc2008-09-10 09:02:51 +0200446 global_write_unlock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100447 return ptr;
448 }
449 }
450 if (last_pool) {
451 last_pool = 0;
452 continue;
453 }
454
Jens Axboeec996e92008-05-27 11:39:39 +0200455 if (nr_pools + 1 > MAX_POOLS)
Jens Axboed24c33a2008-03-01 18:27:36 +0100456 break;
457 else {
458 i = nr_pools;
Jens Axboeadf57092008-04-16 19:43:17 +0200459 if (add_pool(&mp[nr_pools], size))
Jens Axboe65864cf2008-03-03 10:36:36 +0100460 goto out;
Jens Axboed24c33a2008-03-01 18:27:36 +0100461 }
462 } while (1);
463
Jens Axboe65864cf2008-03-03 10:36:36 +0100464out:
Shaozhi Shawn Yed1271dc2008-09-10 09:02:51 +0200465 global_write_unlock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100466 return NULL;
467}
468
469char *smalloc_strdup(const char *str)
470{
471 char *ptr;
472
473 ptr = smalloc(strlen(str) + 1);
474 strcpy(ptr, str);
475 return ptr;
476}