blob: e315903ec63a80fd95581806a1ee01c2c331d58e [file] [log] [blame]
Kent Overstreet798ab482013-08-16 22:04:37 +00001/*
2 * Percpu IDA library
3 *
4 * Copyright (C) 2013 Datera, Inc. Kent Overstreet
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#include <linux/bitmap.h>
18#include <linux/bitops.h>
19#include <linux/bug.h>
20#include <linux/err.h>
21#include <linux/export.h>
22#include <linux/hardirq.h>
23#include <linux/idr.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/percpu.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/spinlock.h>
31#include <linux/percpu_ida.h>
32
33/*
34 * Number of tags we move between the percpu freelist and the global freelist at
35 * a time
36 */
37#define IDA_PCPU_BATCH_MOVE 32U
38
39/* Max size of percpu freelist, */
40#define IDA_PCPU_SIZE ((IDA_PCPU_BATCH_MOVE * 3) / 2)
41
42struct percpu_ida_cpu {
43 /*
44 * Even though this is percpu, we need a lock for tag stealing by remote
45 * CPUs:
46 */
47 spinlock_t lock;
48
49 /* nr_free/freelist form a stack of free IDs */
50 unsigned nr_free;
51 unsigned freelist[];
52};
53
54static inline void move_tags(unsigned *dst, unsigned *dst_nr,
55 unsigned *src, unsigned *src_nr,
56 unsigned nr)
57{
58 *src_nr -= nr;
59 memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
60 *dst_nr += nr;
61}
62
63/*
64 * Try to steal tags from a remote cpu's percpu freelist.
65 *
66 * We first check how many percpu freelists have tags - we don't steal tags
67 * unless enough percpu freelists have tags on them that it's possible more than
68 * half the total tags could be stuck on remote percpu freelists.
69 *
70 * Then we iterate through the cpus until we find some tags - we don't attempt
71 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
72 * minimum.
73 */
74static inline void steal_tags(struct percpu_ida *pool,
75 struct percpu_ida_cpu *tags)
76{
77 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
78 struct percpu_ida_cpu *remote;
79
80 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
81 cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2;
82 cpus_have_tags--) {
83 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
84
85 if (cpu >= nr_cpu_ids) {
86 cpu = cpumask_first(&pool->cpus_have_tags);
87 if (cpu >= nr_cpu_ids)
88 BUG();
89 }
90
91 pool->cpu_last_stolen = cpu;
92 remote = per_cpu_ptr(pool->tag_cpu, cpu);
93
94 cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
95
96 if (remote == tags)
97 continue;
98
99 spin_lock(&remote->lock);
100
101 if (remote->nr_free) {
102 memcpy(tags->freelist,
103 remote->freelist,
104 sizeof(unsigned) * remote->nr_free);
105
106 tags->nr_free = remote->nr_free;
107 remote->nr_free = 0;
108 }
109
110 spin_unlock(&remote->lock);
111
112 if (tags->nr_free)
113 break;
114 }
115}
116
117/*
118 * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
119 * our percpu freelist:
120 */
121static inline void alloc_global_tags(struct percpu_ida *pool,
122 struct percpu_ida_cpu *tags)
123{
124 move_tags(tags->freelist, &tags->nr_free,
125 pool->freelist, &pool->nr_free,
126 min(pool->nr_free, IDA_PCPU_BATCH_MOVE));
127}
128
Nick Swenson6b378382013-10-02 17:55:45 -0700129static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
Kent Overstreet798ab482013-08-16 22:04:37 +0000130{
131 int tag = -ENOSPC;
132
133 spin_lock(&tags->lock);
134 if (tags->nr_free)
135 tag = tags->freelist[--tags->nr_free];
136 spin_unlock(&tags->lock);
137
138 return tag;
139}
140
141/**
142 * percpu_ida_alloc - allocate a tag
143 * @pool: pool to allocate from
144 * @gfp: gfp flags
145 *
146 * Returns a tag - an integer in the range [0..nr_tags) (passed to
147 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
148 *
149 * Safe to be called from interrupt context (assuming it isn't passed
150 * __GFP_WAIT, of course).
151 *
152 * @gfp indicates whether or not to wait until a free id is available (it's not
153 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
154 * however long it takes until another thread frees an id (same semantics as a
155 * mempool).
156 *
157 * Will not fail if passed __GFP_WAIT.
158 */
159int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
160{
161 DEFINE_WAIT(wait);
162 struct percpu_ida_cpu *tags;
163 unsigned long flags;
164 int tag;
165
166 local_irq_save(flags);
167 tags = this_cpu_ptr(pool->tag_cpu);
168
169 /* Fastpath */
Nick Swenson6b378382013-10-02 17:55:45 -0700170 tag = alloc_local_tag(tags);
Kent Overstreet798ab482013-08-16 22:04:37 +0000171 if (likely(tag >= 0)) {
172 local_irq_restore(flags);
173 return tag;
174 }
175
176 while (1) {
177 spin_lock(&pool->lock);
178
179 /*
180 * prepare_to_wait() must come before steal_tags(), in case
181 * percpu_ida_free() on another cpu flips a bit in
182 * cpus_have_tags
183 *
184 * global lock held and irqs disabled, don't need percpu lock
185 */
186 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
187
188 if (!tags->nr_free)
189 alloc_global_tags(pool, tags);
190 if (!tags->nr_free)
191 steal_tags(pool, tags);
192
193 if (tags->nr_free) {
194 tag = tags->freelist[--tags->nr_free];
195 if (tags->nr_free)
196 cpumask_set_cpu(smp_processor_id(),
197 &pool->cpus_have_tags);
198 }
199
200 spin_unlock(&pool->lock);
201 local_irq_restore(flags);
202
203 if (tag >= 0 || !(gfp & __GFP_WAIT))
204 break;
205
206 schedule();
207
208 local_irq_save(flags);
209 tags = this_cpu_ptr(pool->tag_cpu);
210 }
211
212 finish_wait(&pool->wait, &wait);
213 return tag;
214}
215EXPORT_SYMBOL_GPL(percpu_ida_alloc);
216
217/**
218 * percpu_ida_free - free a tag
219 * @pool: pool @tag was allocated from
220 * @tag: a tag previously allocated with percpu_ida_alloc()
221 *
222 * Safe to be called from interrupt context.
223 */
224void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
225{
226 struct percpu_ida_cpu *tags;
227 unsigned long flags;
228 unsigned nr_free;
229
230 BUG_ON(tag >= pool->nr_tags);
231
232 local_irq_save(flags);
233 tags = this_cpu_ptr(pool->tag_cpu);
234
235 spin_lock(&tags->lock);
236 tags->freelist[tags->nr_free++] = tag;
237
238 nr_free = tags->nr_free;
239 spin_unlock(&tags->lock);
240
241 if (nr_free == 1) {
242 cpumask_set_cpu(smp_processor_id(),
243 &pool->cpus_have_tags);
244 wake_up(&pool->wait);
245 }
246
247 if (nr_free == IDA_PCPU_SIZE) {
248 spin_lock(&pool->lock);
249
250 /*
251 * Global lock held and irqs disabled, don't need percpu
252 * lock
253 */
254 if (tags->nr_free == IDA_PCPU_SIZE) {
255 move_tags(pool->freelist, &pool->nr_free,
256 tags->freelist, &tags->nr_free,
257 IDA_PCPU_BATCH_MOVE);
258
259 wake_up(&pool->wait);
260 }
261 spin_unlock(&pool->lock);
262 }
263
264 local_irq_restore(flags);
265}
266EXPORT_SYMBOL_GPL(percpu_ida_free);
267
268/**
269 * percpu_ida_destroy - release a tag pool's resources
270 * @pool: pool to free
271 *
272 * Frees the resources allocated by percpu_ida_init().
273 */
274void percpu_ida_destroy(struct percpu_ida *pool)
275{
276 free_percpu(pool->tag_cpu);
277 free_pages((unsigned long) pool->freelist,
278 get_order(pool->nr_tags * sizeof(unsigned)));
279}
280EXPORT_SYMBOL_GPL(percpu_ida_destroy);
281
282/**
283 * percpu_ida_init - initialize a percpu tag pool
284 * @pool: pool to initialize
285 * @nr_tags: number of tags that will be available for allocation
286 *
287 * Initializes @pool so that it can be used to allocate tags - integers in the
288 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
289 * preallocated array of tag structures.
290 *
291 * Allocation is percpu, but sharding is limited by nr_tags - for best
292 * performance, the workload should not span more cpus than nr_tags / 128.
293 */
294int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
295{
296 unsigned i, cpu, order;
297
298 memset(pool, 0, sizeof(*pool));
299
300 init_waitqueue_head(&pool->wait);
301 spin_lock_init(&pool->lock);
302 pool->nr_tags = nr_tags;
303
304 /* Guard against overflow */
305 if (nr_tags > (unsigned) INT_MAX + 1) {
306 pr_err("percpu_ida_init(): nr_tags too large\n");
307 return -EINVAL;
308 }
309
310 order = get_order(nr_tags * sizeof(unsigned));
311 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
312 if (!pool->freelist)
313 return -ENOMEM;
314
315 for (i = 0; i < nr_tags; i++)
316 pool->freelist[i] = i;
317
318 pool->nr_free = nr_tags;
319
320 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
321 IDA_PCPU_SIZE * sizeof(unsigned),
322 sizeof(unsigned));
323 if (!pool->tag_cpu)
324 goto err;
325
326 for_each_possible_cpu(cpu)
327 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
328
329 return 0;
330err:
331 percpu_ida_destroy(pool);
332 return -ENOMEM;
333}
334EXPORT_SYMBOL_GPL(percpu_ida_init);