blob: 0b23edbee309bfcf9ba335b7ba1c0ba261ac8243 [file] [log] [blame]
Kent Overstreet798ab482013-08-16 22:04:37 +00001#ifndef __PERCPU_IDA_H__
2#define __PERCPU_IDA_H__
3
4#include <linux/types.h>
5#include <linux/bitops.h>
6#include <linux/init.h>
7#include <linux/spinlock_types.h>
8#include <linux/wait.h>
9#include <linux/cpumask.h>
10
11struct percpu_ida_cpu;
12
13struct percpu_ida {
14 /*
15 * number of tags available to be allocated, as passed to
16 * percpu_ida_init()
17 */
18 unsigned nr_tags;
19
20 struct percpu_ida_cpu __percpu *tag_cpu;
21
22 /*
23 * Bitmap of cpus that (may) have tags on their percpu freelists:
24 * steal_tags() uses this to decide when to steal tags, and which cpus
25 * to try stealing from.
26 *
27 * It's ok for a freelist to be empty when its bit is set - steal_tags()
28 * will just keep looking - but the bitmap _must_ be set whenever a
29 * percpu freelist does have tags.
30 */
31 cpumask_t cpus_have_tags;
32
33 struct {
34 spinlock_t lock;
35 /*
36 * When we go to steal tags from another cpu (see steal_tags()),
37 * we want to pick a cpu at random. Cycling through them every
38 * time we steal is a bit easier and more or less equivalent:
39 */
40 unsigned cpu_last_stolen;
41
42 /* For sleeping on allocation failure */
43 wait_queue_head_t wait;
44
45 /*
46 * Global freelist - it's a stack where nr_free points to the
47 * top
48 */
49 unsigned nr_free;
50 unsigned *freelist;
51 } ____cacheline_aligned_in_smp;
52};
53
54int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
55void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
56
57void percpu_ida_destroy(struct percpu_ida *pool);
58int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags);
59
60#endif /* __PERCPU_IDA_H__ */