blob: 1e72068541bac82449eb9f13dc3b2cd790247768 [file] [log] [blame]
Jens Axboed24c33a2008-03-01 18:27:36 +01001/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <limits.h>
13
Jens Axboe6548f472008-06-13 08:38:31 +020014#include "mutex.h"
Jens Axboeb3268b92008-06-02 09:59:32 +020015#include "arch/arch.h"
Jens Axboed24c33a2008-03-01 18:27:36 +010016
Jens Axboe6548f472008-06-13 08:38:31 +020017#define MP_SAFE /* define to make thread safe */
Jens Axboe55f64912008-05-26 09:37:21 +020018#define SMALLOC_REDZONE /* define to detect memory corruption */
Jens Axboed24c33a2008-03-01 18:27:36 +010019
Jens Axboeec996e92008-05-27 11:39:39 +020020#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
21#define SMALLOC_BPI (sizeof(unsigned int) * 8)
22#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
23
24#define INITIAL_SIZE 1024*1024 /* new pool size */
Jens Axboe68857682008-07-31 19:50:54 +020025#define MAX_POOLS 128 /* maximum number of pools to setup */
Jens Axboed24c33a2008-03-01 18:27:36 +010026
Jens Axboe55f64912008-05-26 09:37:21 +020027#define SMALLOC_PRE_RED 0xdeadbeefU
28#define SMALLOC_POST_RED 0x5aa55aa5U
Jens Axboe55f64912008-05-26 09:37:21 +020029
Jens Axboe2b386d22008-03-26 10:32:57 +010030unsigned int smalloc_pool_size = INITIAL_SIZE;
31
Jens Axboed24c33a2008-03-01 18:27:36 +010032struct pool {
Jens Axboe6548f472008-06-13 08:38:31 +020033 struct fio_mutex *lock; /* protects this pool */
Jens Axboed24c33a2008-03-01 18:27:36 +010034 void *map; /* map of blocks */
Jens Axboeec996e92008-05-27 11:39:39 +020035 unsigned int *bitmap; /* blocks free/busy map */
36 unsigned int free_blocks; /* free blocks */
37 unsigned int nr_blocks; /* total blocks */
38 unsigned int next_non_full;
Jens Axboed24c33a2008-03-01 18:27:36 +010039 int fd; /* memory backing fd */
Jens Axboeec996e92008-05-27 11:39:39 +020040 unsigned int mmap_size;
41};
42
43struct block_hdr {
44 unsigned int size;
45#ifdef SMALLOC_REDZONE
46 unsigned int prered;
47#endif
Jens Axboed24c33a2008-03-01 18:27:36 +010048};
49
50static struct pool mp[MAX_POOLS];
51static unsigned int nr_pools;
52static unsigned int last_pool;
Jens Axboe6548f472008-06-13 08:38:31 +020053static struct fio_mutex *lock;
Jens Axboed24c33a2008-03-01 18:27:36 +010054
Jens Axboed24c33a2008-03-01 18:27:36 +010055static inline void pool_lock(struct pool *pool)
56{
Jens Axboe6548f472008-06-13 08:38:31 +020057 if (pool->lock)
58 fio_mutex_down(pool->lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010059}
60
61static inline void pool_unlock(struct pool *pool)
62{
Jens Axboe6548f472008-06-13 08:38:31 +020063 if (pool->lock)
64 fio_mutex_up(pool->lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010065}
66
Jens Axboe65864cf2008-03-03 10:36:36 +010067static inline void global_read_lock(void)
Jens Axboed24c33a2008-03-01 18:27:36 +010068{
Jens Axboe6548f472008-06-13 08:38:31 +020069 if (lock)
70 fio_mutex_down_read(lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010071}
72
Jens Axboe65864cf2008-03-03 10:36:36 +010073static inline void global_read_unlock(void)
Jens Axboed24c33a2008-03-01 18:27:36 +010074{
Jens Axboe6548f472008-06-13 08:38:31 +020075 if (lock)
76 fio_mutex_up_read(lock);
Jens Axboe65864cf2008-03-03 10:36:36 +010077}
78
79static inline void global_write_lock(void)
80{
Jens Axboe6548f472008-06-13 08:38:31 +020081 if (lock)
82 fio_mutex_down_write(lock);
Jens Axboe65864cf2008-03-03 10:36:36 +010083}
84
85static inline void global_write_unlock(void)
86{
Jens Axboe6548f472008-06-13 08:38:31 +020087 if (lock)
88 fio_mutex_up_write(lock);
Jens Axboed24c33a2008-03-01 18:27:36 +010089}
90
Jens Axboed24c33a2008-03-01 18:27:36 +010091static inline int ptr_valid(struct pool *pool, void *ptr)
92{
Jens Axboedcb69092008-05-27 20:35:18 +020093 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
Jens Axboeec996e92008-05-27 11:39:39 +020094
95 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
Jens Axboed24c33a2008-03-01 18:27:36 +010096}
97
Jens Axboe808e9ea2008-05-27 14:12:45 +020098static inline unsigned int size_to_blocks(unsigned int size)
99{
100 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
101}
102
Jens Axboedcb69092008-05-27 20:35:18 +0200103static int blocks_iter(struct pool *pool, unsigned int pool_idx,
104 unsigned int idx, unsigned int nr_blocks,
Jens Axboeec996e92008-05-27 11:39:39 +0200105 int (*func)(unsigned int *map, unsigned int mask))
Jens Axboed24c33a2008-03-01 18:27:36 +0100106{
Jens Axboedcb69092008-05-27 20:35:18 +0200107
Jens Axboeec996e92008-05-27 11:39:39 +0200108 while (nr_blocks) {
109 unsigned int this_blocks, mask;
Jens Axboedcb69092008-05-27 20:35:18 +0200110 unsigned int *map;
111
112 if (pool_idx >= pool->nr_blocks)
113 return 0;
114
115 map = &pool->bitmap[pool_idx];
Jens Axboed24c33a2008-03-01 18:27:36 +0100116
Jens Axboeec996e92008-05-27 11:39:39 +0200117 this_blocks = nr_blocks;
118 if (this_blocks + idx > SMALLOC_BPI) {
119 this_blocks = SMALLOC_BPI - idx;
120 idx = SMALLOC_BPI - this_blocks;
Jens Axboed24c33a2008-03-01 18:27:36 +0100121 }
Jens Axboeec996e92008-05-27 11:39:39 +0200122
123 if (this_blocks == SMALLOC_BPI)
124 mask = -1U;
125 else
126 mask = ((1U << this_blocks) - 1) << idx;
127
128 if (!func(map, mask))
129 return 0;
130
131 nr_blocks -= this_blocks;
132 idx = 0;
Jens Axboedcb69092008-05-27 20:35:18 +0200133 pool_idx++;
Jens Axboed24c33a2008-03-01 18:27:36 +0100134 }
135
Jens Axboeec996e92008-05-27 11:39:39 +0200136 return 1;
Jens Axboeec996e92008-05-27 11:39:39 +0200137}
138
139static int mask_cmp(unsigned int *map, unsigned int mask)
140{
141 return !(*map & mask);
142}
143
144static int mask_clear(unsigned int *map, unsigned int mask)
145{
Jens Axboedcb69092008-05-27 20:35:18 +0200146 assert((*map & mask) == mask);
Jens Axboeec996e92008-05-27 11:39:39 +0200147 *map &= ~mask;
148 return 1;
149}
150
151static int mask_set(unsigned int *map, unsigned int mask)
152{
Jens Axboedcb69092008-05-27 20:35:18 +0200153 assert(!(*map & mask));
Jens Axboeec996e92008-05-27 11:39:39 +0200154 *map |= mask;
155 return 1;
156}
157
Jens Axboedcb69092008-05-27 20:35:18 +0200158static int blocks_free(struct pool *pool, unsigned int pool_idx,
159 unsigned int idx, unsigned int nr_blocks)
Jens Axboeec996e92008-05-27 11:39:39 +0200160{
Jens Axboedcb69092008-05-27 20:35:18 +0200161 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
Jens Axboeec996e92008-05-27 11:39:39 +0200162}
163
Jens Axboedcb69092008-05-27 20:35:18 +0200164static void set_blocks(struct pool *pool, unsigned int pool_idx,
165 unsigned int idx, unsigned int nr_blocks)
Jens Axboeec996e92008-05-27 11:39:39 +0200166{
Jens Axboedcb69092008-05-27 20:35:18 +0200167 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
Jens Axboeec996e92008-05-27 11:39:39 +0200168}
169
Jens Axboedcb69092008-05-27 20:35:18 +0200170static void clear_blocks(struct pool *pool, unsigned int pool_idx,
171 unsigned int idx, unsigned int nr_blocks)
Jens Axboeec996e92008-05-27 11:39:39 +0200172{
Jens Axboedcb69092008-05-27 20:35:18 +0200173 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
Jens Axboeec996e92008-05-27 11:39:39 +0200174}
175
Jens Axboeec996e92008-05-27 11:39:39 +0200176static int find_next_zero(int word, int start)
177{
178 assert(word != -1U);
179 word >>= (start + 1);
Jens Axboeb3268b92008-06-02 09:59:32 +0200180 return ffz(word) + start + 1;
Jens Axboed24c33a2008-03-01 18:27:36 +0100181}
182
Jens Axboeadf57092008-04-16 19:43:17 +0200183static int add_pool(struct pool *pool, unsigned int alloc_size)
Jens Axboed24c33a2008-03-01 18:27:36 +0100184{
Jens Axboeec996e92008-05-27 11:39:39 +0200185 int fd, bitmap_blocks;
Jens Axboeb8a65822008-12-04 14:33:41 +0100186 char file[] = "/tmp/.fio_smalloc.XXXXXX";
187 void *ptr;
Jens Axboeec996e92008-05-27 11:39:39 +0200188
Jens Axboeb8a65822008-12-04 14:33:41 +0100189 fd = mkstemp(file);
Jens Axboed24c33a2008-03-01 18:27:36 +0100190 if (fd < 0)
191 goto out_close;
192
Jens Axboe55f64912008-05-26 09:37:21 +0200193#ifdef SMALLOC_REDZONE
Jens Axboeec996e92008-05-27 11:39:39 +0200194 alloc_size += sizeof(unsigned int);
Jens Axboe55f64912008-05-26 09:37:21 +0200195#endif
Jens Axboeec996e92008-05-27 11:39:39 +0200196 alloc_size += sizeof(struct block_hdr);
197 if (alloc_size < INITIAL_SIZE)
198 alloc_size = INITIAL_SIZE;
Jens Axboeadf57092008-04-16 19:43:17 +0200199
Jens Axboeec996e92008-05-27 11:39:39 +0200200 /* round up to nearest full number of blocks */
201 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
202 bitmap_blocks = alloc_size / SMALLOC_BPL;
203 alloc_size += bitmap_blocks * sizeof(unsigned int);
204 pool->mmap_size = alloc_size;
205
206 pool->nr_blocks = bitmap_blocks;
207 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
208
209 if (ftruncate(fd, alloc_size) < 0)
Jens Axboed24c33a2008-03-01 18:27:36 +0100210 goto out_unlink;
211
Jens Axboeec996e92008-05-27 11:39:39 +0200212 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
Jens Axboed24c33a2008-03-01 18:27:36 +0100213 if (ptr == MAP_FAILED)
214 goto out_unlink;
215
Jens Axboeec996e92008-05-27 11:39:39 +0200216 memset(ptr, 0, alloc_size);
217 pool->map = ptr;
218 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
Jens Axboed24c33a2008-03-01 18:27:36 +0100219
Jens Axboe6548f472008-06-13 08:38:31 +0200220#ifdef MP_SAFE
221 pool->lock = fio_mutex_init(1);
Jens Axboed24c33a2008-03-01 18:27:36 +0100222 if (!pool->lock)
223 goto out_unlink;
Jens Axboe6548f472008-06-13 08:38:31 +0200224#endif
Jens Axboed24c33a2008-03-01 18:27:36 +0100225
Jens Axboe443bb112008-12-04 14:30:13 +0100226 /*
227 * Unlink pool file now. It wont get deleted until the fd is closed,
228 * which happens both for cleanup or unexpected quit. This way we
229 * don't leave temp files around in case of a crash.
230 */
Jens Axboeb8a65822008-12-04 14:33:41 +0100231 unlink(file);
Jens Axboed24c33a2008-03-01 18:27:36 +0100232 pool->fd = fd;
233
Jens Axboed24c33a2008-03-01 18:27:36 +0100234 nr_pools++;
235 return 0;
236out_unlink:
Jens Axboeec996e92008-05-27 11:39:39 +0200237 fprintf(stderr, "smalloc: failed adding pool\n");
Jens Axboed24c33a2008-03-01 18:27:36 +0100238 if (pool->map)
Jens Axboeec996e92008-05-27 11:39:39 +0200239 munmap(pool->map, pool->mmap_size);
Jens Axboeb8a65822008-12-04 14:33:41 +0100240 unlink(file);
Jens Axboed24c33a2008-03-01 18:27:36 +0100241out_close:
Jens Axboeb8a65822008-12-04 14:33:41 +0100242 close(fd);
Jens Axboed24c33a2008-03-01 18:27:36 +0100243 return 1;
244}
245
246void sinit(void)
247{
Jens Axboe4d4e80f2008-03-04 10:18:56 +0100248 int ret;
Jens Axboed24c33a2008-03-01 18:27:36 +0100249
Jens Axboe6548f472008-06-13 08:38:31 +0200250#ifdef MP_SAFE
251 lock = fio_mutex_rw_init();
252#endif
Jens Axboeadf57092008-04-16 19:43:17 +0200253 ret = add_pool(&mp[0], INITIAL_SIZE);
Jens Axboed24c33a2008-03-01 18:27:36 +0100254 assert(!ret);
255}
256
257static void cleanup_pool(struct pool *pool)
258{
Jens Axboe443bb112008-12-04 14:30:13 +0100259 /*
260 * This will also remove the temporary file we used as a backing
261 * store, it was already unlinked
262 */
Jens Axboed24c33a2008-03-01 18:27:36 +0100263 close(pool->fd);
Jens Axboeec996e92008-05-27 11:39:39 +0200264 munmap(pool->map, pool->mmap_size);
Jens Axboe6548f472008-06-13 08:38:31 +0200265
266 if (pool->lock)
267 fio_mutex_remove(pool->lock);
Jens Axboed24c33a2008-03-01 18:27:36 +0100268}
269
270void scleanup(void)
271{
272 unsigned int i;
273
274 for (i = 0; i < nr_pools; i++)
275 cleanup_pool(&mp[i]);
276
Jens Axboe6548f472008-06-13 08:38:31 +0200277 if (lock)
278 fio_mutex_remove(lock);
Jens Axboed24c33a2008-03-01 18:27:36 +0100279}
280
Jens Axboe89da54e2008-05-27 20:49:29 +0200281#ifdef SMALLOC_REDZONE
Jens Axboeec996e92008-05-27 11:39:39 +0200282static void fill_redzone(struct block_hdr *hdr)
Jens Axboe55f64912008-05-26 09:37:21 +0200283{
Jens Axboeec996e92008-05-27 11:39:39 +0200284 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
Jens Axboe55f64912008-05-26 09:37:21 +0200285
Jens Axboeec996e92008-05-27 11:39:39 +0200286 hdr->prered = SMALLOC_PRE_RED;
287 *postred = SMALLOC_POST_RED;
Jens Axboeec996e92008-05-27 11:39:39 +0200288}
Jens Axboe55f64912008-05-26 09:37:21 +0200289
Jens Axboeec996e92008-05-27 11:39:39 +0200290static void sfree_check_redzone(struct block_hdr *hdr)
291{
Jens Axboeec996e92008-05-27 11:39:39 +0200292 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
293
294 if (hdr->prered != SMALLOC_PRE_RED) {
Jens Axboe55f64912008-05-26 09:37:21 +0200295 fprintf(stderr, "smalloc pre redzone destroyed!\n");
296 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
Jens Axboeec996e92008-05-27 11:39:39 +0200297 hdr, hdr->prered, SMALLOC_PRE_RED);
Jens Axboe55f64912008-05-26 09:37:21 +0200298 assert(0);
299 }
300 if (*postred != SMALLOC_POST_RED) {
301 fprintf(stderr, "smalloc post redzone destroyed!\n");
302 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
Jens Axboeec996e92008-05-27 11:39:39 +0200303 hdr, *postred, SMALLOC_POST_RED);
Jens Axboe55f64912008-05-26 09:37:21 +0200304 assert(0);
305 }
Jens Axboe55f64912008-05-26 09:37:21 +0200306}
Jens Axboe89da54e2008-05-27 20:49:29 +0200307#else
308static void fill_redzone(struct block_hdr *hdr)
309{
310}
311
312static void sfree_check_redzone(struct block_hdr *hdr)
313{
314}
315#endif
Jens Axboe55f64912008-05-26 09:37:21 +0200316
Jens Axboed24c33a2008-03-01 18:27:36 +0100317static void sfree_pool(struct pool *pool, void *ptr)
318{
Jens Axboeec996e92008-05-27 11:39:39 +0200319 struct block_hdr *hdr;
Jens Axboe179446e2008-05-27 14:11:56 +0200320 unsigned int i, idx;
Jens Axboeec996e92008-05-27 11:39:39 +0200321 unsigned long offset;
Jens Axboed24c33a2008-03-01 18:27:36 +0100322
323 if (!ptr)
324 return;
325
Jens Axboeec996e92008-05-27 11:39:39 +0200326 ptr -= sizeof(*hdr);
327 hdr = ptr;
Jens Axboe55f64912008-05-26 09:37:21 +0200328
Jens Axboed24c33a2008-03-01 18:27:36 +0100329 assert(ptr_valid(pool, ptr));
330
Jens Axboeec996e92008-05-27 11:39:39 +0200331 sfree_check_redzone(hdr);
332
333 offset = ptr - pool->map;
334 i = offset / SMALLOC_BPL;
335 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
336
Jens Axboed24c33a2008-03-01 18:27:36 +0100337 pool_lock(pool);
Jens Axboedcb69092008-05-27 20:35:18 +0200338 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
Jens Axboeec996e92008-05-27 11:39:39 +0200339 if (i < pool->next_non_full)
340 pool->next_non_full = i;
Jens Axboe179446e2008-05-27 14:11:56 +0200341 pool->free_blocks += size_to_blocks(hdr->size);
Jens Axboed24c33a2008-03-01 18:27:36 +0100342 pool_unlock(pool);
343}
344
345void sfree(void *ptr)
346{
347 struct pool *pool = NULL;
348 unsigned int i;
349
Jens Axboe8e5732e2008-04-16 19:51:46 +0200350 if (!ptr)
351 return;
352
Jens Axboe65864cf2008-03-03 10:36:36 +0100353 global_read_lock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100354
355 for (i = 0; i < nr_pools; i++) {
356 if (ptr_valid(&mp[i], ptr)) {
357 pool = &mp[i];
358 break;
359 }
360 }
361
Jens Axboe65864cf2008-03-03 10:36:36 +0100362 global_read_unlock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100363
364 assert(pool);
365 sfree_pool(pool, ptr);
366}
367
Jens Axboe55f64912008-05-26 09:37:21 +0200368static void *__smalloc_pool(struct pool *pool, unsigned int size)
Jens Axboed24c33a2008-03-01 18:27:36 +0100369{
Jens Axboeec996e92008-05-27 11:39:39 +0200370 unsigned int nr_blocks;
371 unsigned int i;
372 unsigned int offset;
373 unsigned int last_idx;
374 void *ret = NULL;
Jens Axboed24c33a2008-03-01 18:27:36 +0100375
Jens Axboed24c33a2008-03-01 18:27:36 +0100376 pool_lock(pool);
Jens Axboe179446e2008-05-27 14:11:56 +0200377
378 nr_blocks = size_to_blocks(size);
Jens Axboeec996e92008-05-27 11:39:39 +0200379 if (nr_blocks > pool->free_blocks)
Jens Axboed24c33a2008-03-01 18:27:36 +0100380 goto fail;
381
Jens Axboeec996e92008-05-27 11:39:39 +0200382 i = pool->next_non_full;
383 last_idx = 0;
384 offset = -1U;
385 while (i < pool->nr_blocks) {
386 unsigned int idx;
Jens Axboed24c33a2008-03-01 18:27:36 +0100387
Jens Axboeec996e92008-05-27 11:39:39 +0200388 if (pool->bitmap[i] == -1U) {
389 i++;
390 pool->next_non_full = i;
391 last_idx = 0;
392 continue;
Jens Axboed24c33a2008-03-01 18:27:36 +0100393 }
Jens Axboeec996e92008-05-27 11:39:39 +0200394
395 idx = find_next_zero(pool->bitmap[i], last_idx);
Jens Axboedcb69092008-05-27 20:35:18 +0200396 if (!blocks_free(pool, i, idx, nr_blocks)) {
Jens Axboeec996e92008-05-27 11:39:39 +0200397 idx += nr_blocks;
398 if (idx < SMALLOC_BPI)
399 last_idx = idx;
400 else {
401 last_idx = 0;
402 while (idx >= SMALLOC_BPI) {
403 i++;
404 idx -= SMALLOC_BPI;
405 }
406 }
407 continue;
408 }
Jens Axboedcb69092008-05-27 20:35:18 +0200409 set_blocks(pool, i, idx, nr_blocks);
Jens Axboeec996e92008-05-27 11:39:39 +0200410 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
411 break;
Jens Axboed24c33a2008-03-01 18:27:36 +0100412 }
Jens Axboeec996e92008-05-27 11:39:39 +0200413
414 if (i < pool->nr_blocks) {
415 pool->free_blocks -= nr_blocks;
416 ret = pool->map + offset;
417 }
418fail:
Jens Axboed24c33a2008-03-01 18:27:36 +0100419 pool_unlock(pool);
Jens Axboeec996e92008-05-27 11:39:39 +0200420 return ret;
Jens Axboed24c33a2008-03-01 18:27:36 +0100421}
422
Jens Axboe55f64912008-05-26 09:37:21 +0200423static void *smalloc_pool(struct pool *pool, unsigned int size)
424{
Jens Axboe89da54e2008-05-27 20:49:29 +0200425 unsigned int alloc_size = size + sizeof(struct block_hdr);
Jens Axboe55f64912008-05-26 09:37:21 +0200426 void *ptr;
427
Jens Axboeec996e92008-05-27 11:39:39 +0200428#ifdef SMALLOC_REDZONE
429 alloc_size += sizeof(unsigned int);
Jens Axboe55f64912008-05-26 09:37:21 +0200430#endif
Jens Axboeec996e92008-05-27 11:39:39 +0200431
432 ptr = __smalloc_pool(pool, alloc_size);
Jens Axboe89da54e2008-05-27 20:49:29 +0200433 if (ptr) {
434 struct block_hdr *hdr = ptr;
Jens Axboeec996e92008-05-27 11:39:39 +0200435
Jens Axboe89da54e2008-05-27 20:49:29 +0200436 hdr->size = alloc_size;
437 fill_redzone(hdr);
Jens Axboeec996e92008-05-27 11:39:39 +0200438
Jens Axboe89da54e2008-05-27 20:49:29 +0200439 ptr += sizeof(*hdr);
440 memset(ptr, 0, size);
441 }
Jens Axboeec996e92008-05-27 11:39:39 +0200442
Jens Axboeec996e92008-05-27 11:39:39 +0200443 return ptr;
Jens Axboe55f64912008-05-26 09:37:21 +0200444}
445
Jens Axboed24c33a2008-03-01 18:27:36 +0100446void *smalloc(unsigned int size)
447{
448 unsigned int i;
449
Shaozhi Shawn Yed1271dc2008-09-10 09:02:51 +0200450 global_write_lock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100451 i = last_pool;
452
453 do {
454 for (; i < nr_pools; i++) {
455 void *ptr = smalloc_pool(&mp[i], size);
456
457 if (ptr) {
458 last_pool = i;
Shaozhi Shawn Yed1271dc2008-09-10 09:02:51 +0200459 global_write_unlock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100460 return ptr;
461 }
462 }
463 if (last_pool) {
464 last_pool = 0;
465 continue;
466 }
467
Jens Axboeec996e92008-05-27 11:39:39 +0200468 if (nr_pools + 1 > MAX_POOLS)
Jens Axboed24c33a2008-03-01 18:27:36 +0100469 break;
470 else {
471 i = nr_pools;
Jens Axboeadf57092008-04-16 19:43:17 +0200472 if (add_pool(&mp[nr_pools], size))
Jens Axboe65864cf2008-03-03 10:36:36 +0100473 goto out;
Jens Axboed24c33a2008-03-01 18:27:36 +0100474 }
475 } while (1);
476
Jens Axboe65864cf2008-03-03 10:36:36 +0100477out:
Shaozhi Shawn Yed1271dc2008-09-10 09:02:51 +0200478 global_write_unlock();
Jens Axboed24c33a2008-03-01 18:27:36 +0100479 return NULL;
480}
481
482char *smalloc_strdup(const char *str)
483{
484 char *ptr;
485
486 ptr = smalloc(strlen(str) + 1);
487 strcpy(ptr, str);
488 return ptr;
489}