blob: f5eaa1ab48ff20f11eed67bf1698d121e5e18b5a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SMP_H
2#define __ASM_SMP_H
3
4/*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
7#ifndef __ASSEMBLY__
8#include <linux/config.h>
9#include <linux/threads.h>
10#include <linux/cpumask.h>
11#include <linux/bitops.h>
12extern int disable_apic;
13#endif
14
15#ifdef CONFIG_X86_LOCAL_APIC
16#ifndef __ASSEMBLY__
17#include <asm/fixmap.h>
18#include <asm/mpspec.h>
19#ifdef CONFIG_X86_IO_APIC
20#include <asm/io_apic.h>
21#endif
22#include <asm/apic.h>
23#include <asm/thread_info.h>
24#endif
25#endif
26
27#ifdef CONFIG_SMP
28#ifndef ASSEMBLY
29
30#include <asm/pda.h>
31
32struct pt_regs;
33
34/*
35 * Private routines/data
36 */
37
38extern void smp_alloc_memory(void);
39extern cpumask_t cpu_online_map;
40extern volatile unsigned long smp_invalidate_needed;
41extern int pic_mode;
42extern int smp_num_siblings;
43extern void smp_flush_tlb(void);
44extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
45extern void smp_send_reschedule(int cpu);
46extern void smp_invalidate_rcv(void); /* Process an NMI */
47extern void (*mtrr_hook) (void);
48extern void zap_low_mappings(void);
49void smp_stop_cpu(void);
50extern cpumask_t cpu_sibling_map[NR_CPUS];
Andi Kleen3dd9d512005-04-16 15:25:15 -070051extern cpumask_t cpu_core_map[NR_CPUS];
Linus Torvalds1da177e2005-04-16 15:20:36 -070052extern u8 phys_proc_id[NR_CPUS];
Andi Kleen3dd9d512005-04-16 15:25:15 -070053extern u8 cpu_core_id[NR_CPUS];
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#define SMP_TRAMPOLINE_BASE 0x6000
56
57/*
58 * On x86 all CPUs are mapped 1:1 to the APIC space.
59 * This simplifies scheduling and IPI sending and
60 * compresses data structures.
61 */
62
63extern cpumask_t cpu_callout_map;
64extern cpumask_t cpu_callin_map;
65#define cpu_possible_map cpu_callout_map
66
67static inline int num_booting_cpus(void)
68{
69 return cpus_weight(cpu_callout_map);
70}
71
72#define __smp_processor_id() read_pda(cpunumber)
73
74extern __inline int hard_smp_processor_id(void)
75{
76 /* we don't want to mark this access volatile - bad code generation */
77 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
78}
79
80#define safe_smp_processor_id() (disable_apic ? 0 : x86_apicid_to_cpu(hard_smp_processor_id()))
81
82#endif /* !ASSEMBLY */
83
84#define NO_PROC_ID 0xFF /* No processor magic marker */
85
86#endif
87
88#ifndef ASSEMBLY
89/*
90 * Some lowlevel functions might want to know about
91 * the real APIC ID <-> CPU # mapping.
92 */
93extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
94extern u8 x86_cpu_to_log_apicid[NR_CPUS];
95extern u8 bios_cpu_apicid[];
96
97static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
98{
99 return cpus_addr(cpumask)[0];
100}
101
102static inline int x86_apicid_to_cpu(u8 apicid)
103{
104 int i;
105
106 for (i = 0; i < NR_CPUS; ++i)
107 if (x86_cpu_to_apicid[i] == apicid)
108 return i;
109
110 /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
111 * or called too early. Either way, we must be CPU 0. */
112 if (x86_cpu_to_apicid[0] == BAD_APICID)
113 return 0;
114
115 return -1;
116}
117
118static inline int cpu_present_to_apicid(int mps_cpu)
119{
120 if (mps_cpu < NR_CPUS)
121 return (int)bios_cpu_apicid[mps_cpu];
122 else
123 return BAD_APICID;
124}
125
126#endif /* !ASSEMBLY */
127
128#ifndef CONFIG_SMP
129#define stack_smp_processor_id() 0
130#define safe_smp_processor_id() 0
131#define cpu_logical_map(x) (x)
132#else
133#include <asm/thread_info.h>
134#define stack_smp_processor_id() \
135({ \
136 struct thread_info *ti; \
137 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
138 ti->cpu; \
139})
140#endif
141
142#ifndef __ASSEMBLY__
143static __inline int logical_smp_processor_id(void)
144{
145 /* we don't want to mark this access volatile - bad code generation */
146 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
147}
148#endif
149
150#endif
151