blob: 1900bd0fa639ae7a3c72d1c97a4f0758337c1560 [file] [log] [blame]
Kent Overstreet798ab482013-08-16 22:04:37 +00001#ifndef __PERCPU_IDA_H__
2#define __PERCPU_IDA_H__
3
4#include <linux/types.h>
5#include <linux/bitops.h>
6#include <linux/init.h>
7#include <linux/spinlock_types.h>
8#include <linux/wait.h>
9#include <linux/cpumask.h>
10
11struct percpu_ida_cpu;
12
13struct percpu_ida {
14 /*
15 * number of tags available to be allocated, as passed to
16 * percpu_ida_init()
17 */
18 unsigned nr_tags;
Shaohua Lie26b53d2013-10-15 09:05:01 +080019 unsigned percpu_max_size;
20 unsigned percpu_batch_size;
Kent Overstreet798ab482013-08-16 22:04:37 +000021
22 struct percpu_ida_cpu __percpu *tag_cpu;
23
24 /*
25 * Bitmap of cpus that (may) have tags on their percpu freelists:
26 * steal_tags() uses this to decide when to steal tags, and which cpus
27 * to try stealing from.
28 *
29 * It's ok for a freelist to be empty when its bit is set - steal_tags()
30 * will just keep looking - but the bitmap _must_ be set whenever a
31 * percpu freelist does have tags.
32 */
33 cpumask_t cpus_have_tags;
34
35 struct {
36 spinlock_t lock;
37 /*
38 * When we go to steal tags from another cpu (see steal_tags()),
39 * we want to pick a cpu at random. Cycling through them every
40 * time we steal is a bit easier and more or less equivalent:
41 */
42 unsigned cpu_last_stolen;
43
44 /* For sleeping on allocation failure */
45 wait_queue_head_t wait;
46
47 /*
48 * Global freelist - it's a stack where nr_free points to the
49 * top
50 */
51 unsigned nr_free;
52 unsigned *freelist;
53 } ____cacheline_aligned_in_smp;
54};
55
Shaohua Lie26b53d2013-10-15 09:05:01 +080056/*
57 * Number of tags we move between the percpu freelist and the global freelist at
58 * a time
59 */
60#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
61/* Max size of percpu freelist, */
62#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
63
Kent Overstreet798ab482013-08-16 22:04:37 +000064int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
65void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
66
67void percpu_ida_destroy(struct percpu_ida *pool);
Shaohua Lie26b53d2013-10-15 09:05:01 +080068int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
69 unsigned long max_size, unsigned long batch_size);
70static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
71{
72 return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
73 IDA_DEFAULT_PCPU_BATCH_MOVE);
74}
Kent Overstreet798ab482013-08-16 22:04:37 +000075
Shaohua Li7fc2ba12013-10-15 09:05:02 +080076typedef int (*percpu_ida_cb)(unsigned, void *);
77int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
78 void *data);
79
Shaohua Li1dddc012013-10-15 09:05:03 +080080unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
Kent Overstreet798ab482013-08-16 22:04:37 +000081#endif /* __PERCPU_IDA_H__ */