blob: c4f1e328a5ba845f0635e42f0669b137369a604d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_PERCPU_H
2#define _ASM_IA64_PERCPU_H
3
4/*
5 * Copyright (C) 2002-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
10
11#ifdef __ASSEMBLY__
12# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
13#else /* !__ASSEMBLY__ */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#include <linux/threads.h>
17
18#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
19# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
20#else
21# define __SMALL_ADDR_AREA
22#endif
23
24#define DECLARE_PER_CPU(type, name) \
25 extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
26
27/* Separate out the type, so (int[3], foo) works. */
28#define DEFINE_PER_CPU(type, name) \
29 __attribute__((__section__(".data.percpu"))) \
30 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
31
Fenghua Yu5fb7dc32007-07-19 01:48:12 -070032#ifdef CONFIG_SMP
33#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
34 __attribute__((__section__(".data.percpu.shared_aligned"))) \
35 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
36 ____cacheline_aligned_in_smp
37#else
38#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
39 DEFINE_PER_CPU(type, name)
40#endif
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/*
43 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
44 * external routine, to avoid include-hell.
45 */
46#ifdef CONFIG_SMP
47
48extern unsigned long __per_cpu_offset[NR_CPUS];
Yu Lumingd7c40862007-10-29 11:21:45 -070049#define per_cpu_offset(x) (__per_cpu_offset[x])
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
52DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
53
54#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
55#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
Paul Mackerrasbfe5d832006-06-25 05:47:14 -070056#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
59extern void setup_per_cpu_areas (void);
60extern void *per_cpu_init(void);
61
62#else /* ! SMP */
63
Jan Beulich11c80c82005-06-23 00:09:59 -070064#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#define __get_cpu_var(var) per_cpu__##var
Paul Mackerrasbfe5d832006-06-25 05:47:14 -070066#define __raw_get_cpu_var(var) per_cpu__##var
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#define per_cpu_init() (__phys_per_cpu_start)
68
69#endif /* SMP */
70
71#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
72#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
73
74/*
75 * Be extremely careful when taking the address of this variable! Due to virtual
76 * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
77 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
78 * more efficient.
79 */
80#define __ia64_per_cpu_var(var) (per_cpu__##var)
81
82#endif /* !__ASSEMBLY__ */
83
84#endif /* _ASM_IA64_PERCPU_H */