blob: fb8d2d24e4bb1d855b4d6017202c6dca2882dfa1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
3#include <linux/spinlock.h> /* For preempt_disable() */
4#include <linux/slab.h> /* For kmalloc() */
5#include <linux/smp.h>
6#include <linux/string.h> /* For memset() */
7#include <asm/percpu.h>
8
9/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
10#ifndef PERCPU_ENOUGH_ROOM
11#define PERCPU_ENOUGH_ROOM 32768
12#endif
13
14/* Must be an lvalue. */
15#define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
16#define put_cpu_var(var) preempt_enable()
17
18#ifdef CONFIG_SMP
19
20struct percpu_data {
21 void *ptrs[NR_CPUS];
22 void *blkp;
23};
24
25/*
26 * Use this to get to a cpu's version of the per-cpu object allocated using
27 * alloc_percpu. Non-atomic access to the current CPU's version should
28 * probably be combined with get_cpu()/put_cpu().
29 */
30#define per_cpu_ptr(ptr, cpu) \
31({ \
32 struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \
33 (__typeof__(ptr))__p->ptrs[(cpu)]; \
34})
35
36extern void *__alloc_percpu(size_t size, size_t align);
37extern void free_percpu(const void *);
38
39#else /* CONFIG_SMP */
40
Paul Mundt66341a92005-11-13 16:07:21 -080041#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43static inline void *__alloc_percpu(size_t size, size_t align)
44{
45 void *ret = kmalloc(size, GFP_KERNEL);
46 if (ret)
47 memset(ret, 0, size);
48 return ret;
49}
50static inline void free_percpu(const void *ptr)
51{
52 kfree(ptr);
53}
54
55#endif /* CONFIG_SMP */
56
57/* Simple wrapper for the common case: zeros memory. */
58#define alloc_percpu(type) \
59 ((type *)(__alloc_percpu(sizeof(type), __alignof__(type))))
60
61#endif /* __LINUX_PERCPU_H */