blob: 07d78e4653bc007ce8bfa7c233456a603fb3223b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Kent Overstreet798ab482013-08-16 22:04:37 +00002#ifndef __PERCPU_IDA_H__
3#define __PERCPU_IDA_H__
4
5#include <linux/types.h>
6#include <linux/bitops.h>
7#include <linux/init.h>
Kent Overstreet6f6b5d12014-01-19 08:26:37 +00008#include <linux/sched.h>
Kent Overstreet798ab482013-08-16 22:04:37 +00009#include <linux/spinlock_types.h>
10#include <linux/wait.h>
11#include <linux/cpumask.h>
12
13struct percpu_ida_cpu;
14
15struct percpu_ida {
16 /*
17 * number of tags available to be allocated, as passed to
18 * percpu_ida_init()
19 */
20 unsigned nr_tags;
Shaohua Lie26b53d2013-10-15 09:05:01 +080021 unsigned percpu_max_size;
22 unsigned percpu_batch_size;
Kent Overstreet798ab482013-08-16 22:04:37 +000023
24 struct percpu_ida_cpu __percpu *tag_cpu;
25
26 /*
27 * Bitmap of cpus that (may) have tags on their percpu freelists:
28 * steal_tags() uses this to decide when to steal tags, and which cpus
29 * to try stealing from.
30 *
31 * It's ok for a freelist to be empty when its bit is set - steal_tags()
32 * will just keep looking - but the bitmap _must_ be set whenever a
33 * percpu freelist does have tags.
34 */
35 cpumask_t cpus_have_tags;
36
37 struct {
38 spinlock_t lock;
39 /*
40 * When we go to steal tags from another cpu (see steal_tags()),
41 * we want to pick a cpu at random. Cycling through them every
42 * time we steal is a bit easier and more or less equivalent:
43 */
44 unsigned cpu_last_stolen;
45
46 /* For sleeping on allocation failure */
47 wait_queue_head_t wait;
48
49 /*
50 * Global freelist - it's a stack where nr_free points to the
51 * top
52 */
53 unsigned nr_free;
54 unsigned *freelist;
55 } ____cacheline_aligned_in_smp;
56};
57
Shaohua Lie26b53d2013-10-15 09:05:01 +080058/*
59 * Number of tags we move between the percpu freelist and the global freelist at
60 * a time
61 */
62#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
63/* Max size of percpu freelist, */
64#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
65
Kent Overstreet6f6b5d12014-01-19 08:26:37 +000066int percpu_ida_alloc(struct percpu_ida *pool, int state);
Kent Overstreet798ab482013-08-16 22:04:37 +000067void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
68
69void percpu_ida_destroy(struct percpu_ida *pool);
Shaohua Lie26b53d2013-10-15 09:05:01 +080070int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
71 unsigned long max_size, unsigned long batch_size);
72static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
73{
74 return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
75 IDA_DEFAULT_PCPU_BATCH_MOVE);
76}
Kent Overstreet798ab482013-08-16 22:04:37 +000077
Shaohua Li7fc2ba12013-10-15 09:05:02 +080078typedef int (*percpu_ida_cb)(unsigned, void *);
79int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
80 void *data);
81
Shaohua Li1dddc012013-10-15 09:05:03 +080082unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
Kent Overstreet798ab482013-08-16 22:04:37 +000083#endif /* __PERCPU_IDA_H__ */