Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PERCPU_H_ |
| 2 | #define _ASM_POWERPC_PERCPU_H_ |
| 3 | #ifdef __powerpc64__ |
| 4 | #include <linux/compiler.h> |
| 5 | |
| 6 | /* |
| 7 | * Same as asm-generic/percpu.h, except that we store the per cpu offset |
| 8 | * in the paca. Based on the x86-64 implementation. |
| 9 | */ |
| 10 | |
| 11 | #ifdef CONFIG_SMP |
| 12 | |
| 13 | #include <asm/paca.h> |
| 14 | |
| 15 | #define __per_cpu_offset(cpu) (paca[cpu].data_offset) |
| 16 | #define __my_cpu_offset() get_paca()->data_offset |
Ingo Molnar | a875a69 | 2006-07-03 00:24:26 -0700 | [diff] [blame] | 17 | #define per_cpu_offset(x) (__per_cpu_offset(x)) |
Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 18 | |
| 19 | /* Separate out the type, so (int[3], foo) works. */ |
| 20 | #define DEFINE_PER_CPU(type, name) \ |
| 21 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name |
| 22 | |
Fenghua Yu | 5fb7dc3 | 2007-07-19 01:48:12 -0700 | [diff] [blame] | 23 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
| 24 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ |
| 25 | __typeof__(type) per_cpu__##name \ |
| 26 | ____cacheline_aligned_in_smp |
| 27 | |
Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 28 | /* var is in discarded region: offset to particular copy we want */ |
| 29 | #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) |
| 30 | #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) |
Hugh Dickins | 048c8bc | 2006-11-01 05:44:54 +1100 | [diff] [blame] | 31 | #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, local_paca->data_offset)) |
Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 32 | |
| 33 | /* A macro to avoid #include hell... */ |
| 34 | #define percpu_modcopy(pcpudst, src, size) \ |
| 35 | do { \ |
| 36 | unsigned int __i; \ |
KAMEZAWA Hiroyuki | 0e55195 | 2006-03-28 14:50:51 -0800 | [diff] [blame] | 37 | for_each_possible_cpu(__i) \ |
Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 38 | memcpy((pcpudst)+__per_cpu_offset(__i), \ |
| 39 | (src), (size)); \ |
Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 40 | } while (0) |
| 41 | |
| 42 | extern void setup_per_cpu_areas(void); |
| 43 | |
| 44 | #else /* ! SMP */ |
| 45 | |
| 46 | #define DEFINE_PER_CPU(type, name) \ |
| 47 | __typeof__(type) per_cpu__##name |
Fenghua Yu | 5fb7dc3 | 2007-07-19 01:48:12 -0700 | [diff] [blame] | 48 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
| 49 | DEFINE_PER_CPU(type, name) |
Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 50 | |
| 51 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) |
| 52 | #define __get_cpu_var(var) per_cpu__##var |
Paul Mackerras | bfe5d83 | 2006-06-25 05:47:14 -0700 | [diff] [blame] | 53 | #define __raw_get_cpu_var(var) per_cpu__##var |
Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 54 | |
| 55 | #endif /* SMP */ |
| 56 | |
| 57 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name |
| 58 | |
| 59 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
| 60 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) |
| 61 | |
| 62 | #else |
Stephen Rothwell | 6f9aa72 | 2005-08-29 14:08:11 +1000 | [diff] [blame] | 63 | #include <asm-generic/percpu.h> |
Anton Blanchard | 7a0268f | 2006-01-11 13:16:44 +1100 | [diff] [blame] | 64 | #endif |
| 65 | |
| 66 | #endif /* _ASM_POWERPC_PERCPU_H_ */ |