blob: d596061939d183a307118938126a18be1e73ff59 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/* smp.h: Sparc specific SMP stuff.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef _SPARC_SMP_H
7#define _SPARC_SMP_H
8
9#include <linux/threads.h>
10#include <asm/head.h>
11#include <asm/btfixup.h>
12
13#ifndef __ASSEMBLY__
14
15#include <linux/cpumask.h>
16
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_SMP
20
21#ifndef __ASSEMBLY__
22
23#include <asm/ptrace.h>
24#include <asm/asi.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070026
27/*
28 * Private routines/data
29 */
30
31extern unsigned char boot_cpu_id;
Sam Ravnborgb7afdb72011-01-28 22:08:18 +000032extern volatile unsigned long cpu_callin_map[NR_CPUS];
33extern cpumask_t smp_commenced_mask;
34extern struct linux_prom_registers smp_penguin_ctable;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070035
36typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
37 unsigned long, unsigned long);
38
Sam Ravnborgb7afdb72011-01-28 22:08:18 +000039void cpu_panic(void);
40extern void smp4m_irq_rotate(int cpu);
41
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070042/*
43 * General functions that each host system must provide.
44 */
45
46void sun4m_init_smp(void);
47void sun4d_init_smp(void);
48
49void smp_callin(void);
50void smp_boot_cpus(void);
51void smp_store_cpu_info(int);
52
Daniel Hellstromd6d04812011-05-02 00:08:51 +000053void smp_resched_interrupt(void);
54void smp_call_function_single_interrupt(void);
55void smp_call_function_interrupt(void);
56
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070057struct seq_file;
58void smp_bogo(struct seq_file *);
59void smp_info(struct seq_file *);
60
Sam Ravnborg4ba22b12012-05-14 15:14:36 +020061struct sparc32_ipi_ops {
62 void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1,
63 unsigned long arg2, unsigned long arg3,
64 unsigned long arg4);
65 void (*resched)(int cpu);
66 void (*single)(int cpu);
67 void (*mask_one)(int cpu);
68};
69extern const struct sparc32_ipi_ops *sparc32_ipi_ops;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070070
Sam Ravnborg4ba22b12012-05-14 15:14:36 +020071static inline void xc0(smpfunc_t func)
72{
73 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0);
74}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070075
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070076static inline void xc1(smpfunc_t func, unsigned long arg1)
Sam Ravnborg4ba22b12012-05-14 15:14:36 +020077{
78 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0);
79}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070080static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
Sam Ravnborg4ba22b12012-05-14 15:14:36 +020081{
82 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0);
83}
84
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070085static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
Sam Ravnborg4ba22b12012-05-14 15:14:36 +020086 unsigned long arg3)
87{
88 sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
89 arg1, arg2, arg3, 0);
90}
91
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070092static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
Sam Ravnborg4ba22b12012-05-14 15:14:36 +020093 unsigned long arg3, unsigned long arg4)
94{
95 sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
96 arg1, arg2, arg3, arg4);
97}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070098
Daniel Hellstromd6d04812011-05-02 00:08:51 +000099extern void arch_send_call_function_single_ipi(int cpu);
100extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
David S. Miller66e4f8c2008-08-27 20:03:22 -0700101
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700102static inline int cpu_logical_map(int cpu)
103{
104 return cpu;
105}
106
107static inline int hard_smp4m_processor_id(void)
108{
109 int cpuid;
110
111 __asm__ __volatile__("rd %%tbr, %0\n\t"
112 "srl %0, 12, %0\n\t"
113 "and %0, 3, %0\n\t" :
114 "=&r" (cpuid));
115 return cpuid;
116}
117
118static inline int hard_smp4d_processor_id(void)
119{
120 int cpuid;
121
122 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
123 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
124 return cpuid;
125}
126
Konrad Eisele84017072009-08-31 22:08:13 +0000127extern inline int hard_smpleon_processor_id(void)
128{
129 int cpuid;
130 __asm__ __volatile__("rd %%asr17,%0\n\t"
131 "srl %0,28,%0" :
132 "=&r" (cpuid) : );
133 return cpuid;
134}
135
David S. Millerc68e5d32012-05-13 23:09:04 -0700136extern int hard_smp_processor_id(void);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700137
138#define raw_smp_processor_id() (current_thread_info()->cpu)
139
140#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
141#define prof_counter(__cpu) cpu_data(__cpu).counter
142
143void smp_setup_cpu_possible_map(void);
144
145#endif /* !(__ASSEMBLY__) */
146
147/* Sparc specific messages. */
148#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
149
150/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
151 * contains something other than one of these then the ipi is from
152 * Linux's active_kernel_processor. This facility exists so that
153 * the boot monitor can capture all the other cpus when one catches
154 * a watchdog reset or the user enters the monitor using L1-A keys.
155 */
156#define MBOX_STOPCPU 0xFB
157#define MBOX_IDLECPU 0xFC
158#define MBOX_IDLECPU2 0xFD
159#define MBOX_STOPCPU2 0xFE
160
161#else /* SMP */
162
163#define hard_smp_processor_id() 0
164#define smp_setup_cpu_possible_map() do { } while (0)
165
166#endif /* !(SMP) */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700167#endif /* !(_SPARC_SMP_H) */