blob: 8c9e75dc1e652b5b0e61567954aba3d76236befd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* smp.c: Sparc64 SMP support.
2 *
David S. Millercf3d7c12008-03-26 01:11:55 -07003 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
David S. Millerb9709452008-02-13 19:20:45 -080023#include <linux/lmb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <asm/head.h>
26#include <asm/ptrace.h>
27#include <asm/atomic.h>
28#include <asm/tlbflush.h>
29#include <asm/mmu_context.h>
30#include <asm/cpudata.h>
David S. Miller27a2ef32007-07-14 00:58:53 -070031#include <asm/hvtramp.h>
32#include <asm/io.h>
David S. Millercf3d7c12008-03-26 01:11:55 -070033#include <asm/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <asm/irq.h>
Al Viro6d24c8d2006-10-08 08:23:28 -040036#include <asm/irq_regs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/page.h>
38#include <asm/pgtable.h>
39#include <asm/oplib.h>
40#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/starfire.h>
42#include <asm/tlb.h>
David S. Miller56fb4df2006-02-26 23:24:22 -080043#include <asm/sections.h>
David S. Miller07f8e5f2006-06-21 23:34:02 -070044#include <asm/prom.h>
David S. Miller5cbc3072007-05-25 15:49:59 -070045#include <asm/mdesc.h>
David S. Miller4f0234f2007-07-13 16:03:42 -070046#include <asm/ldc.h>
David S. Millere02044092007-07-16 03:49:40 -070047#include <asm/hypervisor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
David S. Millera2f9f6b2007-06-04 21:48:33 -070049int sparc64_multi_core __read_mostly;
50
David S. Miller4f0234f2007-07-13 16:03:42 -070051cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
Andrew Mortonc12a8282005-07-12 12:09:43 -070052cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
Mike Travisd5a74302007-10-16 01:24:05 -070053DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
David S. Millerf78eae22007-06-04 17:01:39 -070054cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
55 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
David S. Miller4f0234f2007-07-13 16:03:42 -070056
57EXPORT_SYMBOL(cpu_possible_map);
58EXPORT_SYMBOL(cpu_online_map);
Mike Travisd5a74302007-10-16 01:24:05 -070059EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
David S. Miller4f0234f2007-07-13 16:03:42 -070060EXPORT_SYMBOL(cpu_core_map);
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062static cpumask_t smp_commenced_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64void smp_info(struct seq_file *m)
65{
66 int i;
67
68 seq_printf(m, "State:\n");
Andrew Morton394e3902006-03-23 03:01:05 -080069 for_each_online_cpu(i)
70 seq_printf(m, "CPU%d:\t\tonline\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
73void smp_bogo(struct seq_file *m)
74{
75 int i;
76
Andrew Morton394e3902006-03-23 03:01:05 -080077 for_each_online_cpu(i)
78 seq_printf(m,
Andrew Morton394e3902006-03-23 03:01:05 -080079 "Cpu%dClkTck\t: %016lx\n",
Andrew Morton394e3902006-03-23 03:01:05 -080080 i, cpu_data(i).clock_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
David S. Millere02044092007-07-16 03:49:40 -070083static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
84
David S. Miller112f4872007-03-05 15:28:37 -080085extern void setup_sparc64_timer(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87static volatile unsigned long callin_flag = 0;
88
Sam Ravnborg0f7f22d2008-02-20 22:22:16 -080089void __cpuinit smp_callin(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
91 int cpuid = hard_smp_processor_id();
92
David S. Miller56fb4df2006-02-26 23:24:22 -080093 __local_per_cpu_offset = __per_cpu_offset(cpuid);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
David S. Miller4a07e642006-02-14 13:49:32 -080095 if (tlb_type == hypervisor)
David S. Miller490384e2006-02-11 14:41:18 -080096 sun4v_ktsb_register();
David S. Miller481295f2006-02-07 21:51:08 -080097
David S. Miller56fb4df2006-02-26 23:24:22 -080098 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
David S. Miller112f4872007-03-05 15:28:37 -0800100 setup_sparc64_timer();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
David S. Miller816242d2005-05-23 15:52:08 -0700102 if (cheetah_pcache_forced_on)
103 cheetah_enable_pcache();
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 local_irq_enable();
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 callin_flag = 1;
108 __asm__ __volatile__("membar #Sync\n\t"
109 "flush %%g6" : : : "memory");
110
111 /* Clear this or we will die instantly when we
112 * schedule back to this idler...
113 */
David S. Millerdb7d9a42005-07-24 19:36:26 -0700114 current_thread_info()->new_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116 /* Attach to the address space of init_task. */
117 atomic_inc(&init_mm.mm_count);
118 current->active_mm = &init_mm;
119
120 while (!cpu_isset(cpuid, smp_commenced_mask))
David S. Miller4f071182005-08-29 12:46:22 -0700121 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
David S. Millere02044092007-07-16 03:49:40 -0700123 spin_lock(&call_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 cpu_set(cpuid, cpu_online_map);
David S. Millere02044092007-07-16 03:49:40 -0700125 spin_unlock(&call_lock);
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800126
127 /* idle thread is expected to have preempt disabled */
128 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
130
131void cpu_panic(void)
132{
133 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
134 panic("SMP bolixed\n");
135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/* This tick register synchronization scheme is taken entirely from
138 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
139 *
140 * The only change I've made is to rework it so that the master
141 * initiates the synchonization instead of the slave. -DaveM
142 */
143
144#define MASTER 0
145#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
146
147#define NUM_ROUNDS 64 /* magic value */
148#define NUM_ITERS 5 /* likewise */
149
150static DEFINE_SPINLOCK(itc_sync_lock);
151static unsigned long go[SLAVE + 1];
152
153#define DEBUG_TICK_SYNC 0
154
155static inline long get_delta (long *rt, long *master)
156{
157 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
158 unsigned long tcenter, t0, t1, tm;
159 unsigned long i;
160
161 for (i = 0; i < NUM_ITERS; i++) {
162 t0 = tick_ops->get_tick();
163 go[MASTER] = 1;
David S. Miller4f071182005-08-29 12:46:22 -0700164 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 while (!(tm = go[SLAVE]))
David S. Miller4f071182005-08-29 12:46:22 -0700166 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 go[SLAVE] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700168 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 t1 = tick_ops->get_tick();
170
171 if (t1 - t0 < best_t1 - best_t0)
172 best_t0 = t0, best_t1 = t1, best_tm = tm;
173 }
174
175 *rt = best_t1 - best_t0;
176 *master = best_tm - best_t0;
177
178 /* average best_t0 and best_t1 without overflow: */
179 tcenter = (best_t0/2 + best_t1/2);
180 if (best_t0 % 2 + best_t1 % 2 == 2)
181 tcenter++;
182 return tcenter - best_tm;
183}
184
185void smp_synchronize_tick_client(void)
186{
187 long i, delta, adj, adjust_latency = 0, done = 0;
188 unsigned long flags, rt, master_time_stamp, bound;
189#if DEBUG_TICK_SYNC
190 struct {
191 long rt; /* roundtrip time */
192 long master; /* master's timestamp */
193 long diff; /* difference between midpoint and master's timestamp */
194 long lat; /* estimate of itc adjustment latency */
195 } t[NUM_ROUNDS];
196#endif
197
198 go[MASTER] = 1;
199
200 while (go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700201 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203 local_irq_save(flags);
204 {
205 for (i = 0; i < NUM_ROUNDS; i++) {
206 delta = get_delta(&rt, &master_time_stamp);
207 if (delta == 0) {
208 done = 1; /* let's lock on to this... */
209 bound = rt;
210 }
211
212 if (!done) {
213 if (i > 0) {
214 adjust_latency += -delta;
215 adj = -delta + adjust_latency/4;
216 } else
217 adj = -delta;
218
David S. Miller112f4872007-03-05 15:28:37 -0800219 tick_ops->add_tick(adj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221#if DEBUG_TICK_SYNC
222 t[i].rt = rt;
223 t[i].master = master_time_stamp;
224 t[i].diff = delta;
225 t[i].lat = adjust_latency/4;
226#endif
227 }
228 }
229 local_irq_restore(flags);
230
231#if DEBUG_TICK_SYNC
232 for (i = 0; i < NUM_ROUNDS; i++)
233 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
234 t[i].rt, t[i].master, t[i].diff, t[i].lat);
235#endif
236
Joe Perches519c4d22007-11-19 23:43:00 -0800237 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
238 "(last diff %ld cycles, maxerr %lu cycles)\n",
239 smp_processor_id(), delta, rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
242static void smp_start_sync_tick_client(int cpu);
243
244static void smp_synchronize_one_tick(int cpu)
245{
246 unsigned long flags, i;
247
248 go[MASTER] = 0;
249
250 smp_start_sync_tick_client(cpu);
251
252 /* wait for client to be ready */
253 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700254 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 /* now let the client proceed into his loop */
257 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700258 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 spin_lock_irqsave(&itc_sync_lock, flags);
261 {
262 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
263 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700264 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700266 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 go[SLAVE] = tick_ops->get_tick();
David S. Miller4f071182005-08-29 12:46:22 -0700268 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270 }
271 spin_unlock_irqrestore(&itc_sync_lock, flags);
272}
273
David S. Millerb14f5c12007-07-14 00:45:16 -0700274#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
David S. Miller27a2ef32007-07-14 00:58:53 -0700275/* XXX Put this in some common place. XXX */
276static unsigned long kimage_addr_to_ra(void *p)
277{
278 unsigned long val = (unsigned long) p;
279
280 return kern_base + (val - KERNBASE);
281}
282
David S. Millerb14f5c12007-07-14 00:45:16 -0700283static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
284{
285 extern unsigned long sparc64_ttable_tl0;
286 extern unsigned long kern_locked_tte_data;
David S. Millerb14f5c12007-07-14 00:45:16 -0700287 struct hvtramp_descr *hdesc;
288 unsigned long trampoline_ra;
289 struct trap_per_cpu *tb;
290 u64 tte_vaddr, tte_data;
291 unsigned long hv_err;
David S. Miller64658742008-03-21 17:01:38 -0700292 int i;
David S. Millerb14f5c12007-07-14 00:45:16 -0700293
David S. Miller64658742008-03-21 17:01:38 -0700294 hdesc = kzalloc(sizeof(*hdesc) +
295 (sizeof(struct hvtramp_mapping) *
296 num_kernel_image_mappings - 1),
297 GFP_KERNEL);
David S. Millerb14f5c12007-07-14 00:45:16 -0700298 if (!hdesc) {
David S. Miller27a2ef32007-07-14 00:58:53 -0700299 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
David S. Millerb14f5c12007-07-14 00:45:16 -0700300 "hvtramp_descr.\n");
301 return;
302 }
303
304 hdesc->cpu = cpu;
David S. Miller64658742008-03-21 17:01:38 -0700305 hdesc->num_mappings = num_kernel_image_mappings;
David S. Millerb14f5c12007-07-14 00:45:16 -0700306
307 tb = &trap_block[cpu];
308 tb->hdesc = hdesc;
309
310 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
311 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
312
313 hdesc->thread_reg = thread_reg;
314
315 tte_vaddr = (unsigned long) KERNBASE;
316 tte_data = kern_locked_tte_data;
317
David S. Miller64658742008-03-21 17:01:38 -0700318 for (i = 0; i < hdesc->num_mappings; i++) {
319 hdesc->maps[i].vaddr = tte_vaddr;
320 hdesc->maps[i].tte = tte_data;
David S. Millerb14f5c12007-07-14 00:45:16 -0700321 tte_vaddr += 0x400000;
322 tte_data += 0x400000;
David S. Millerb14f5c12007-07-14 00:45:16 -0700323 }
324
325 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
326
327 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
328 kimage_addr_to_ra(&sparc64_ttable_tl0),
329 __pa(hdesc));
David S. Millere02044092007-07-16 03:49:40 -0700330 if (hv_err)
331 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
332 "gives error %lu\n", hv_err);
David S. Millerb14f5c12007-07-14 00:45:16 -0700333}
334#endif
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336extern unsigned long sparc64_cpu_startup;
337
338/* The OBP cpu startup callback truncates the 3rd arg cookie to
339 * 32-bits (I think) so to be safe we have it read the pointer
340 * contained here so we work on >4GB machines. -DaveM
341 */
342static struct thread_info *cpu_new_thread = NULL;
343
344static int __devinit smp_boot_one_cpu(unsigned int cpu)
345{
David S. Millerb37d40d2007-07-15 01:08:03 -0700346 struct trap_per_cpu *tb = &trap_block[cpu];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 unsigned long entry =
348 (unsigned long)(&sparc64_cpu_startup);
349 unsigned long cookie =
350 (unsigned long)(&cpu_new_thread);
351 struct task_struct *p;
David S. Miller7890f792006-02-15 02:26:54 -0800352 int timeout, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 p = fork_idle(cpu);
Akinobu Mita1177bf92007-10-04 14:55:59 -0700355 if (IS_ERR(p))
356 return PTR_ERR(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 callin_flag = 0;
Al Virof3169642006-01-12 01:05:42 -0800358 cpu_new_thread = task_thread_info(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
David S. Miller7890f792006-02-15 02:26:54 -0800360 if (tlb_type == hypervisor) {
David S. Millerb14f5c12007-07-14 00:45:16 -0700361#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
David S. Miller4f0234f2007-07-13 16:03:42 -0700362 if (ldom_domaining_enabled)
363 ldom_startcpu_cpuid(cpu,
364 (unsigned long) cpu_new_thread);
365 else
366#endif
367 prom_startcpu_cpuid(cpu, entry, cookie);
David S. Miller7890f792006-02-15 02:26:54 -0800368 } else {
David S. Miller5cbc3072007-05-25 15:49:59 -0700369 struct device_node *dp = of_find_node_by_cpuid(cpu);
David S. Miller7890f792006-02-15 02:26:54 -0800370
David S. Miller07f8e5f2006-06-21 23:34:02 -0700371 prom_startcpu(dp->node, entry, cookie);
David S. Miller7890f792006-02-15 02:26:54 -0800372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
David S. Miller4f0234f2007-07-13 16:03:42 -0700374 for (timeout = 0; timeout < 50000; timeout++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (callin_flag)
376 break;
377 udelay(100);
378 }
David S. Miller72aff532006-02-17 01:29:17 -0800379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (callin_flag) {
381 ret = 0;
382 } else {
383 printk("Processor %d is stuck.\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 ret = -ENODEV;
385 }
386 cpu_new_thread = NULL;
387
David S. Millerb37d40d2007-07-15 01:08:03 -0700388 if (tb->hdesc) {
389 kfree(tb->hdesc);
390 tb->hdesc = NULL;
391 }
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return ret;
394}
395
396static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
397{
398 u64 result, target;
399 int stuck, tmp;
400
401 if (this_is_starfire) {
402 /* map to real upaid */
403 cpu = (((cpu & 0x3c) << 1) |
404 ((cpu & 0x40) >> 4) |
405 (cpu & 0x3));
406 }
407
408 target = (cpu << 14) | 0x70;
409again:
410 /* Ok, this is the real Spitfire Errata #54.
411 * One must read back from a UDB internal register
412 * after writes to the UDB interrupt dispatch, but
413 * before the membar Sync for that write.
414 * So we use the high UDB control register (ASI 0x7f,
415 * ADDR 0x20) for the dummy read. -DaveM
416 */
417 tmp = 0x40;
418 __asm__ __volatile__(
419 "wrpr %1, %2, %%pstate\n\t"
420 "stxa %4, [%0] %3\n\t"
421 "stxa %5, [%0+%8] %3\n\t"
422 "add %0, %8, %0\n\t"
423 "stxa %6, [%0+%8] %3\n\t"
424 "membar #Sync\n\t"
425 "stxa %%g0, [%7] %3\n\t"
426 "membar #Sync\n\t"
427 "mov 0x20, %%g1\n\t"
428 "ldxa [%%g1] 0x7f, %%g0\n\t"
429 "membar #Sync"
430 : "=r" (tmp)
431 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
432 "r" (data0), "r" (data1), "r" (data2), "r" (target),
433 "r" (0x10), "0" (tmp)
434 : "g1");
435
436 /* NOTE: PSTATE_IE is still clear. */
437 stuck = 100000;
438 do {
439 __asm__ __volatile__("ldxa [%%g0] %1, %0"
440 : "=r" (result)
441 : "i" (ASI_INTR_DISPATCH_STAT));
442 if (result == 0) {
443 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
444 : : "r" (pstate));
445 return;
446 }
447 stuck -= 1;
448 if (stuck == 0)
449 break;
450 } while (result & 0x1);
451 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
452 : : "r" (pstate));
453 if (stuck == 0) {
454 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
455 smp_processor_id(), result);
456 } else {
457 udelay(2);
458 goto again;
459 }
460}
461
David S. Millercd5bc892008-08-03 23:24:26 -0700462static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
464 u64 pstate;
465 int i;
466
467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
David S. Millercd5bc892008-08-03 23:24:26 -0700468 for_each_cpu_mask_nr(i, *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 spitfire_xcall_helper(data0, data1, data2, pstate, i);
470}
471
472/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
473 * packet, but we have no use for that. However we do take advantage of
474 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
475 */
David S. Millercd5bc892008-08-03 23:24:26 -0700476static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
David S. Miller0de56d12007-12-12 07:31:46 -0800478 u64 pstate, ver, busy_mask;
David S. Miller22adb352007-05-26 01:14:43 -0700479 int nack_busy_id, is_jbus, need_more;
David S. Millercd5bc892008-08-03 23:24:26 -0700480 cpumask_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
David S. Millercd5bc892008-08-03 23:24:26 -0700482 if (cpus_empty(*mask_p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return;
484
David S. Millercd5bc892008-08-03 23:24:26 -0700485 mask = *mask_p;
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 /* Unfortunately, someone at Sun had the brilliant idea to make the
488 * busy/nack fields hard-coded by ITID number for this Ultra-III
489 * derivative processor.
490 */
491 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
David S. Miller92704a12006-02-26 23:27:19 -0800492 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
493 (ver >> 32) == __SERRANO_ID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
496
497retry:
David S. Miller22adb352007-05-26 01:14:43 -0700498 need_more = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
500 : : "r" (pstate), "i" (PSTATE_IE));
501
502 /* Setup the dispatch data registers. */
503 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
504 "stxa %1, [%4] %6\n\t"
505 "stxa %2, [%5] %6\n\t"
506 "membar #Sync\n\t"
507 : /* no outputs */
508 : "r" (data0), "r" (data1), "r" (data2),
509 "r" (0x40), "r" (0x50), "r" (0x60),
510 "i" (ASI_INTR_W));
511
512 nack_busy_id = 0;
David S. Miller0de56d12007-12-12 07:31:46 -0800513 busy_mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 {
515 int i;
516
David S. Millercd5bc892008-08-03 23:24:26 -0700517 for_each_cpu_mask_nr(i, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 u64 target = (i << 14) | 0x70;
519
David S. Miller0de56d12007-12-12 07:31:46 -0800520 if (is_jbus) {
521 busy_mask |= (0x1UL << (i * 2));
522 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 target |= (nack_busy_id << 24);
David S. Miller0de56d12007-12-12 07:31:46 -0800524 busy_mask |= (0x1UL <<
525 (nack_busy_id * 2));
526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 __asm__ __volatile__(
528 "stxa %%g0, [%0] %1\n\t"
529 "membar #Sync\n\t"
530 : /* no outputs */
531 : "r" (target), "i" (ASI_INTR_W));
532 nack_busy_id++;
David S. Miller22adb352007-05-26 01:14:43 -0700533 if (nack_busy_id == 32) {
534 need_more = 1;
535 break;
536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 }
538 }
539
540 /* Now, poll for completion. */
541 {
David S. Miller0de56d12007-12-12 07:31:46 -0800542 u64 dispatch_stat, nack_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 long stuck;
544
545 stuck = 100000 * nack_busy_id;
David S. Miller0de56d12007-12-12 07:31:46 -0800546 nack_mask = busy_mask << 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 do {
548 __asm__ __volatile__("ldxa [%%g0] %1, %0"
549 : "=r" (dispatch_stat)
550 : "i" (ASI_INTR_DISPATCH_STAT));
David S. Miller0de56d12007-12-12 07:31:46 -0800551 if (!(dispatch_stat & (busy_mask | nack_mask))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
553 : : "r" (pstate));
David S. Miller22adb352007-05-26 01:14:43 -0700554 if (unlikely(need_more)) {
555 int i, cnt = 0;
David S. Millercd5bc892008-08-03 23:24:26 -0700556 for_each_cpu_mask_nr(i, mask) {
David S. Miller22adb352007-05-26 01:14:43 -0700557 cpu_clear(i, mask);
558 cnt++;
559 if (cnt == 32)
560 break;
561 }
562 goto retry;
563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 return;
565 }
566 if (!--stuck)
567 break;
David S. Miller0de56d12007-12-12 07:31:46 -0800568 } while (dispatch_stat & busy_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
570 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
571 : : "r" (pstate));
572
David S. Miller0de56d12007-12-12 07:31:46 -0800573 if (dispatch_stat & busy_mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 /* Busy bits will not clear, continue instead
575 * of freezing up on this cpu.
576 */
577 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
578 smp_processor_id(), dispatch_stat);
579 } else {
580 int i, this_busy_nack = 0;
581
582 /* Delay some random time with interrupts enabled
583 * to prevent deadlock.
584 */
585 udelay(2 * nack_busy_id);
586
587 /* Clear out the mask bits for cpus which did not
588 * NACK us.
589 */
David S. Millercd5bc892008-08-03 23:24:26 -0700590 for_each_cpu_mask_nr(i, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 u64 check_mask;
592
David S. Miller92704a12006-02-26 23:27:19 -0800593 if (is_jbus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 check_mask = (0x2UL << (2*i));
595 else
596 check_mask = (0x2UL <<
597 this_busy_nack);
598 if ((dispatch_stat & check_mask) == 0)
599 cpu_clear(i, mask);
600 this_busy_nack += 2;
David S. Miller22adb352007-05-26 01:14:43 -0700601 if (this_busy_nack == 64)
602 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 }
604
605 goto retry;
606 }
607 }
608}
609
David S. Miller1d2f1f92006-02-08 16:41:20 -0800610/* Multi-cpu list version. */
David S. Millercd5bc892008-08-03 23:24:26 -0700611static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
David S. Millera43fe0e2006-02-04 03:10:53 -0800612{
David S. Millercd5bc892008-08-03 23:24:26 -0700613 int cnt, retries, this_cpu, prev_sent, i;
614 unsigned long flags, status;
615 cpumask_t error_mask;
David S. Millerb830ab62006-02-28 15:10:26 -0800616 struct trap_per_cpu *tb;
617 u16 *cpu_list;
618 u64 *mondo;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800619
David S. Millercd5bc892008-08-03 23:24:26 -0700620 if (cpus_empty(*mask))
David S. Miller17f34f02007-05-14 02:01:52 -0700621 return;
622
David S. Millerb830ab62006-02-28 15:10:26 -0800623 /* We have to do this whole thing with interrupts fully disabled.
624 * Otherwise if we send an xcall from interrupt context it will
625 * corrupt both our mondo block and cpu list state.
626 *
627 * One consequence of this is that we cannot use timeout mechanisms
628 * that depend upon interrupts being delivered locally. So, for
629 * example, we cannot sample jiffies and expect it to advance.
630 *
631 * Fortunately, udelay() uses %stick/%tick so we can use that.
632 */
633 local_irq_save(flags);
634
635 this_cpu = smp_processor_id();
636 tb = &trap_block[this_cpu];
637
638 mondo = __va(tb->cpu_mondo_block_pa);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800639 mondo[0] = data0;
640 mondo[1] = data1;
641 mondo[2] = data2;
642 wmb();
643
David S. Millerb830ab62006-02-28 15:10:26 -0800644 cpu_list = __va(tb->cpu_list_pa);
645
646 /* Setup the initial cpu list. */
647 cnt = 0;
David S. Millercd5bc892008-08-03 23:24:26 -0700648 for_each_cpu_mask_nr(i, *mask)
David S. Millerb830ab62006-02-28 15:10:26 -0800649 cpu_list[cnt++] = i;
650
651 cpus_clear(error_mask);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800652 retries = 0;
David S. Miller3cab0c32006-03-02 21:50:47 -0800653 prev_sent = 0;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800654 do {
David S. Miller3cab0c32006-03-02 21:50:47 -0800655 int forward_progress, n_sent;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800656
David S. Millerb830ab62006-02-28 15:10:26 -0800657 status = sun4v_cpu_mondo_send(cnt,
658 tb->cpu_list_pa,
659 tb->cpu_mondo_block_pa);
David S. Miller1d2f1f92006-02-08 16:41:20 -0800660
David S. Millerb830ab62006-02-28 15:10:26 -0800661 /* HV_EOK means all cpus received the xcall, we're done. */
662 if (likely(status == HV_EOK))
David S. Miller1d2f1f92006-02-08 16:41:20 -0800663 break;
664
David S. Miller3cab0c32006-03-02 21:50:47 -0800665 /* First, see if we made any forward progress.
666 *
667 * The hypervisor indicates successful sends by setting
668 * cpu list entries to the value 0xffff.
David S. Millerb830ab62006-02-28 15:10:26 -0800669 */
David S. Miller3cab0c32006-03-02 21:50:47 -0800670 n_sent = 0;
David S. Millerb830ab62006-02-28 15:10:26 -0800671 for (i = 0; i < cnt; i++) {
David S. Miller3cab0c32006-03-02 21:50:47 -0800672 if (likely(cpu_list[i] == 0xffff))
673 n_sent++;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800674 }
675
David S. Miller3cab0c32006-03-02 21:50:47 -0800676 forward_progress = 0;
677 if (n_sent > prev_sent)
678 forward_progress = 1;
679
680 prev_sent = n_sent;
681
David S. Millerb830ab62006-02-28 15:10:26 -0800682 /* If we get a HV_ECPUERROR, then one or more of the cpus
683 * in the list are in error state. Use the cpu_state()
684 * hypervisor call to find out which cpus are in error state.
685 */
686 if (unlikely(status == HV_ECPUERROR)) {
687 for (i = 0; i < cnt; i++) {
688 long err;
689 u16 cpu;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800690
David S. Millerb830ab62006-02-28 15:10:26 -0800691 cpu = cpu_list[i];
692 if (cpu == 0xffff)
693 continue;
694
695 err = sun4v_cpu_state(cpu);
696 if (err >= 0 &&
697 err == HV_CPU_STATE_ERROR) {
David S. Miller3cab0c32006-03-02 21:50:47 -0800698 cpu_list[i] = 0xffff;
David S. Millerb830ab62006-02-28 15:10:26 -0800699 cpu_set(cpu, error_mask);
700 }
701 }
702 } else if (unlikely(status != HV_EWOULDBLOCK))
703 goto fatal_mondo_error;
704
David S. Miller3cab0c32006-03-02 21:50:47 -0800705 /* Don't bother rewriting the CPU list, just leave the
706 * 0xffff and non-0xffff entries in there and the
707 * hypervisor will do the right thing.
708 *
709 * Only advance timeout state if we didn't make any
710 * forward progress.
711 */
David S. Millerb830ab62006-02-28 15:10:26 -0800712 if (unlikely(!forward_progress)) {
713 if (unlikely(++retries > 10000))
714 goto fatal_mondo_timeout;
715
716 /* Delay a little bit to let other cpus catch up
717 * on their cpu mondo queue work.
718 */
719 udelay(2 * cnt);
720 }
David S. Miller1d2f1f92006-02-08 16:41:20 -0800721 } while (1);
722
David S. Millerb830ab62006-02-28 15:10:26 -0800723 local_irq_restore(flags);
724
725 if (unlikely(!cpus_empty(error_mask)))
726 goto fatal_mondo_cpu_error;
727
728 return;
729
730fatal_mondo_cpu_error:
731 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
732 "were in error state\n",
733 this_cpu);
734 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
David S. Millercd5bc892008-08-03 23:24:26 -0700735 for_each_cpu_mask_nr(i, error_mask)
David S. Millerb830ab62006-02-28 15:10:26 -0800736 printk("%d ", i);
737 printk("]\n");
738 return;
739
740fatal_mondo_timeout:
741 local_irq_restore(flags);
742 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
743 " progress after %d retries.\n",
744 this_cpu, retries);
745 goto dump_cpu_list_and_out;
746
747fatal_mondo_error:
748 local_irq_restore(flags);
749 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
750 this_cpu, status);
751 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
752 "mondo_block_pa(%lx)\n",
753 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
754
755dump_cpu_list_and_out:
756 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
757 for (i = 0; i < cnt; i++)
758 printk("%u ", cpu_list[i]);
759 printk("]\n");
David S. Millera43fe0e2006-02-04 03:10:53 -0800760}
761
David S. Millercd5bc892008-08-03 23:24:26 -0700762static void (*xcall_deliver)(u64, u64, u64, const cpumask_t *);
David S. Miller5e0797e2008-08-03 22:52:41 -0700763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764/* Send cross call to all processors mentioned in MASK
765 * except self.
766 */
767static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
768{
769 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
770 int this_cpu = get_cpu();
771
772 cpus_and(mask, mask, cpu_online_map);
773 cpu_clear(this_cpu, mask);
774
David S. Millercd5bc892008-08-03 23:24:26 -0700775 xcall_deliver(data0, data1, data2, &mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 /* NOTE: Caller runs local copy on master. */
777
778 put_cpu();
779}
780
781extern unsigned long xcall_sync_tick;
782
783static void smp_start_sync_tick_client(int cpu)
784{
David S. Miller24445a42008-08-04 00:02:31 -0700785 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
786 &cpumask_of_cpu(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
788
David S. Millerd172ad12008-07-17 23:44:50 -0700789extern unsigned long xcall_call_function;
790
791void arch_send_call_function_ipi(cpumask_t mask)
792{
David S. Miller19926632008-08-03 23:56:28 -0700793 xcall_deliver((u64) &xcall_call_function, 0, 0, &mask);
David S. Millerd172ad12008-07-17 23:44:50 -0700794}
795
796extern unsigned long xcall_call_function_single;
797
798void arch_send_call_function_single_ipi(int cpu)
799{
David S. Miller19926632008-08-03 23:56:28 -0700800 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
801 &cpumask_of_cpu(cpu));
David S. Millerd172ad12008-07-17 23:44:50 -0700802}
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804/* Send cross call to all processors except self. */
805#define smp_cross_call(func, ctx, data1, data2) \
806 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808void smp_call_function_client(int irq, struct pt_regs *regs)
809{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 clear_softint(1 << irq);
David S. Millerd172ad12008-07-17 23:44:50 -0700811 generic_smp_call_function_interrupt();
812}
813
814void smp_call_function_single_client(int irq, struct pt_regs *regs)
815{
816 clear_softint(1 << irq);
817 generic_smp_call_function_single_interrupt();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818}
819
David S. Millerbd407912006-01-31 18:31:38 -0800820static void tsb_sync(void *info)
821{
David S. Miller6f25f392006-03-28 13:29:26 -0800822 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
David S. Millerbd407912006-01-31 18:31:38 -0800823 struct mm_struct *mm = info;
824
David S. Miller6f25f392006-03-28 13:29:26 -0800825 /* It is not valid to test "currrent->active_mm == mm" here.
826 *
827 * The value of "current" is not changed atomically with
828 * switch_mm(). But that's OK, we just need to check the
829 * current cpu's trap block PGD physical address.
830 */
831 if (tp->pgd_paddr == __pa(mm->pgd))
David S. Millerbd407912006-01-31 18:31:38 -0800832 tsb_context_switch(mm);
833}
834
835void smp_tsb_sync(struct mm_struct *mm)
836{
David S. Millerd172ad12008-07-17 23:44:50 -0700837 smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
David S. Millerbd407912006-01-31 18:31:38 -0800838}
839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840extern unsigned long xcall_flush_tlb_mm;
841extern unsigned long xcall_flush_tlb_pending;
842extern unsigned long xcall_flush_tlb_kernel_range;
David S. Miller93dae5b2008-05-19 23:46:00 -0700843#ifdef CONFIG_MAGIC_SYSRQ
844extern unsigned long xcall_fetch_glob_regs;
845#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846extern unsigned long xcall_receive_signal;
David S. Milleree290742006-03-06 22:50:44 -0800847extern unsigned long xcall_new_mmu_context_version;
David S. Millere2fdd7f2008-04-29 02:38:50 -0700848#ifdef CONFIG_KGDB
849extern unsigned long xcall_kgdb_capture;
850#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852#ifdef DCACHE_ALIASING_POSSIBLE
853extern unsigned long xcall_flush_dcache_page_cheetah;
854#endif
855extern unsigned long xcall_flush_dcache_page_spitfire;
856
857#ifdef CONFIG_DEBUG_DCFLUSH
858extern atomic_t dcpage_flushes;
859extern atomic_t dcpage_flushes_xcall;
860#endif
861
David S. Millerd979f172007-10-27 00:13:04 -0700862static inline void __local_flush_dcache_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
864#ifdef DCACHE_ALIASING_POSSIBLE
865 __flush_dcache_page(page_address(page),
866 ((tlb_type == spitfire) &&
867 page_mapping(page) != NULL));
868#else
869 if (page_mapping(page) != NULL &&
870 tlb_type == spitfire)
871 __flush_icache_page(__pa(page_address(page)));
872#endif
873}
874
875void smp_flush_dcache_page_impl(struct page *page, int cpu)
876{
877 cpumask_t mask = cpumask_of_cpu(cpu);
David S. Millera43fe0e2006-02-04 03:10:53 -0800878 int this_cpu;
879
880 if (tlb_type == hypervisor)
881 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883#ifdef CONFIG_DEBUG_DCFLUSH
884 atomic_inc(&dcpage_flushes);
885#endif
David S. Millera43fe0e2006-02-04 03:10:53 -0800886
887 this_cpu = get_cpu();
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (cpu == this_cpu) {
890 __local_flush_dcache_page(page);
891 } else if (cpu_online(cpu)) {
892 void *pg_addr = page_address(page);
David S. Miller622824d2008-08-03 23:07:18 -0700893 u64 data0 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
895 if (tlb_type == spitfire) {
David S. Miller622824d2008-08-03 23:07:18 -0700896 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (page_mapping(page) != NULL)
898 data0 |= ((u64)1 << 32);
David S. Millera43fe0e2006-02-04 03:10:53 -0800899 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900#ifdef DCACHE_ALIASING_POSSIBLE
David S. Miller622824d2008-08-03 23:07:18 -0700901 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902#endif
903 }
David S. Miller622824d2008-08-03 23:07:18 -0700904 if (data0) {
905 xcall_deliver(data0, __pa(pg_addr),
David S. Millercd5bc892008-08-03 23:24:26 -0700906 (u64) pg_addr, &mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907#ifdef CONFIG_DEBUG_DCFLUSH
David S. Miller622824d2008-08-03 23:07:18 -0700908 atomic_inc(&dcpage_flushes_xcall);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909#endif
David S. Miller622824d2008-08-03 23:07:18 -0700910 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 }
912
913 put_cpu();
914}
915
916void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
917{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 cpumask_t mask = cpu_online_map;
David S. Miller622824d2008-08-03 23:07:18 -0700919 void *pg_addr;
David S. Millera43fe0e2006-02-04 03:10:53 -0800920 int this_cpu;
David S. Miller622824d2008-08-03 23:07:18 -0700921 u64 data0;
David S. Millera43fe0e2006-02-04 03:10:53 -0800922
923 if (tlb_type == hypervisor)
924 return;
925
926 this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
928 cpu_clear(this_cpu, mask);
929
930#ifdef CONFIG_DEBUG_DCFLUSH
931 atomic_inc(&dcpage_flushes);
932#endif
933 if (cpus_empty(mask))
934 goto flush_self;
David S. Miller622824d2008-08-03 23:07:18 -0700935 data0 = 0;
936 pg_addr = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 if (tlb_type == spitfire) {
938 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
939 if (page_mapping(page) != NULL)
940 data0 |= ((u64)1 << 32);
David S. Millera43fe0e2006-02-04 03:10:53 -0800941 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942#ifdef DCACHE_ALIASING_POSSIBLE
943 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944#endif
945 }
David S. Miller622824d2008-08-03 23:07:18 -0700946 if (data0) {
947 xcall_deliver(data0, __pa(pg_addr),
David S. Millercd5bc892008-08-03 23:24:26 -0700948 (u64) pg_addr, &mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949#ifdef CONFIG_DEBUG_DCFLUSH
David S. Miller622824d2008-08-03 23:07:18 -0700950 atomic_inc(&dcpage_flushes_xcall);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951#endif
David S. Miller622824d2008-08-03 23:07:18 -0700952 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 flush_self:
954 __local_flush_dcache_page(page);
955
956 put_cpu();
957}
958
David S. Milleree290742006-03-06 22:50:44 -0800959void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
960{
David S. Millera0663a72006-02-23 14:19:28 -0800961 struct mm_struct *mm;
David S. Milleree290742006-03-06 22:50:44 -0800962 unsigned long flags;
David S. Millera0663a72006-02-23 14:19:28 -0800963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 clear_softint(1 << irq);
David S. Millera0663a72006-02-23 14:19:28 -0800965
966 /* See if we need to allocate a new TLB context because
967 * the version of the one we are using is now out of date.
968 */
969 mm = current->active_mm;
David S. Milleree290742006-03-06 22:50:44 -0800970 if (unlikely(!mm || (mm == &init_mm)))
971 return;
David S. Millera0663a72006-02-23 14:19:28 -0800972
David S. Milleree290742006-03-06 22:50:44 -0800973 spin_lock_irqsave(&mm->context.lock, flags);
David S. Milleraac0aad2006-02-27 17:56:51 -0800974
David S. Milleree290742006-03-06 22:50:44 -0800975 if (unlikely(!CTX_VALID(mm->context)))
976 get_new_mmu_context(mm);
David S. Milleraac0aad2006-02-27 17:56:51 -0800977
David S. Milleree290742006-03-06 22:50:44 -0800978 spin_unlock_irqrestore(&mm->context.lock, flags);
David S. Milleraac0aad2006-02-27 17:56:51 -0800979
David S. Milleree290742006-03-06 22:50:44 -0800980 load_secondary_context(mm);
981 __flush_tlb_mm(CTX_HWBITS(mm->context),
982 SECONDARY_CONTEXT);
David S. Millera0663a72006-02-23 14:19:28 -0800983}
984
985void smp_new_mmu_context_version(void)
986{
David S. Milleree290742006-03-06 22:50:44 -0800987 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988}
989
David S. Millere2fdd7f2008-04-29 02:38:50 -0700990#ifdef CONFIG_KGDB
991void kgdb_roundup_cpus(unsigned long flags)
992{
993 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
994}
995#endif
996
David S. Miller93dae5b2008-05-19 23:46:00 -0700997#ifdef CONFIG_MAGIC_SYSRQ
998void smp_fetch_global_regs(void)
999{
1000 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1001}
1002#endif
1003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004/* We know that the window frames of the user have been flushed
1005 * to the stack before we get here because all callers of us
1006 * are flush_tlb_*() routines, and these run after flush_cache_*()
1007 * which performs the flushw.
1008 *
1009 * The SMP TLB coherency scheme we use works as follows:
1010 *
1011 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1012 * space has (potentially) executed on, this is the heuristic
1013 * we use to avoid doing cross calls.
1014 *
1015 * Also, for flushing from kswapd and also for clones, we
1016 * use cpu_vm_mask as the list of cpus to make run the TLB.
1017 *
1018 * 2) TLB context numbers are shared globally across all processors
1019 * in the system, this allows us to play several games to avoid
1020 * cross calls.
1021 *
1022 * One invariant is that when a cpu switches to a process, and
1023 * that processes tsk->active_mm->cpu_vm_mask does not have the
1024 * current cpu's bit set, that tlb context is flushed locally.
1025 *
1026 * If the address space is non-shared (ie. mm->count == 1) we avoid
1027 * cross calls when we want to flush the currently running process's
1028 * tlb state. This is done by clearing all cpu bits except the current
1029 * processor's in current->active_mm->cpu_vm_mask and performing the
1030 * flush locally only. This will force any subsequent cpus which run
1031 * this task to flush the context from the local tlb if the process
1032 * migrates to another cpu (again).
1033 *
1034 * 3) For shared address spaces (threads) and swapping we bite the
1035 * bullet for most cases and perform the cross call (but only to
1036 * the cpus listed in cpu_vm_mask).
1037 *
1038 * The performance gain from "optimizing" away the cross call for threads is
1039 * questionable (in theory the big win for threads is the massive sharing of
1040 * address space state across processors).
1041 */
David S. Miller62dbec72005-11-07 14:09:58 -08001042
1043/* This currently is only used by the hugetlb arch pre-fault
1044 * hook on UltraSPARC-III+ and later when changing the pagesize
1045 * bits of the context register for an address space.
1046 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047void smp_flush_tlb_mm(struct mm_struct *mm)
1048{
David S. Miller62dbec72005-11-07 14:09:58 -08001049 u32 ctx = CTX_HWBITS(mm->context);
1050 int cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
David S. Miller62dbec72005-11-07 14:09:58 -08001052 if (atomic_read(&mm->mm_users) == 1) {
1053 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1054 goto local_flush_and_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 }
David S. Miller62dbec72005-11-07 14:09:58 -08001056
1057 smp_cross_call_masked(&xcall_flush_tlb_mm,
1058 ctx, 0, 0,
1059 mm->cpu_vm_mask);
1060
1061local_flush_and_out:
1062 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1063
1064 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
1067void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1068{
1069 u32 ctx = CTX_HWBITS(mm->context);
1070 int cpu = get_cpu();
1071
Hugh Dickinsdedeb002005-11-07 14:09:01 -08001072 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
Hugh Dickinsdedeb002005-11-07 14:09:01 -08001074 else
1075 smp_cross_call_masked(&xcall_flush_tlb_pending,
1076 ctx, nr, (unsigned long) vaddrs,
1077 mm->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 __flush_tlb_pending(ctx, nr, vaddrs);
1080
1081 put_cpu();
1082}
1083
1084void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1085{
1086 start &= PAGE_MASK;
1087 end = PAGE_ALIGN(end);
1088 if (start != end) {
1089 smp_cross_call(&xcall_flush_tlb_kernel_range,
1090 0, start, end);
1091
1092 __flush_tlb_kernel_range(start, end);
1093 }
1094}
1095
1096/* CPU capture. */
1097/* #define CAPTURE_DEBUG */
1098extern unsigned long xcall_capture;
1099
1100static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1101static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1102static unsigned long penguins_are_doing_time;
1103
1104void smp_capture(void)
1105{
1106 int result = atomic_add_ret(1, &smp_capture_depth);
1107
1108 if (result == 1) {
1109 int ncpus = num_online_cpus();
1110
1111#ifdef CAPTURE_DEBUG
1112 printk("CPU[%d]: Sending penguins to jail...",
1113 smp_processor_id());
1114#endif
1115 penguins_are_doing_time = 1;
David S. Miller4f071182005-08-29 12:46:22 -07001116 membar_storestore_loadstore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 atomic_inc(&smp_capture_registry);
1118 smp_cross_call(&xcall_capture, 0, 0, 0);
1119 while (atomic_read(&smp_capture_registry) != ncpus)
David S. Miller4f071182005-08-29 12:46:22 -07001120 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121#ifdef CAPTURE_DEBUG
1122 printk("done\n");
1123#endif
1124 }
1125}
1126
1127void smp_release(void)
1128{
1129 if (atomic_dec_and_test(&smp_capture_depth)) {
1130#ifdef CAPTURE_DEBUG
1131 printk("CPU[%d]: Giving pardon to "
1132 "imprisoned penguins\n",
1133 smp_processor_id());
1134#endif
1135 penguins_are_doing_time = 0;
David S. Miller4f071182005-08-29 12:46:22 -07001136 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 atomic_dec(&smp_capture_registry);
1138 }
1139}
1140
1141/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1142 * can service tlb flush xcalls...
1143 */
1144extern void prom_world(int);
David S. Miller96c6e0d2006-01-31 18:32:29 -08001145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1147{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 clear_softint(1 << irq);
1149
1150 preempt_disable();
1151
1152 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 prom_world(1);
1154 atomic_inc(&smp_capture_registry);
David S. Miller4f071182005-08-29 12:46:22 -07001155 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 while (penguins_are_doing_time)
David S. Miller4f071182005-08-29 12:46:22 -07001157 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 atomic_dec(&smp_capture_registry);
1159 prom_world(0);
1160
1161 preempt_enable();
1162}
1163
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164/* /proc/profile writes can call this, don't __init it please. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165int setup_profiling_timer(unsigned int multiplier)
1166{
David S. Miller777a4472007-02-22 06:24:10 -08001167 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168}
1169
1170void __init smp_prepare_cpus(unsigned int max_cpus)
1171{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
1173
1174void __devinit smp_prepare_boot_cpu(void)
1175{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
David S. Miller5e0797e2008-08-03 22:52:41 -07001178void __init smp_setup_processor_id(void)
1179{
1180 if (tlb_type == spitfire)
1181 xcall_deliver = spitfire_xcall_deliver;
1182 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1183 xcall_deliver = cheetah_xcall_deliver;
1184 else
1185 xcall_deliver = hypervisor_xcall_deliver;
1186}
1187
David S. Miller5cbc3072007-05-25 15:49:59 -07001188void __devinit smp_fill_in_sib_core_maps(void)
1189{
1190 unsigned int i;
1191
David S. Millere02044092007-07-16 03:49:40 -07001192 for_each_present_cpu(i) {
David S. Miller5cbc3072007-05-25 15:49:59 -07001193 unsigned int j;
1194
David S. Miller39dd9922007-07-15 01:29:24 -07001195 cpus_clear(cpu_core_map[i]);
David S. Miller5cbc3072007-05-25 15:49:59 -07001196 if (cpu_data(i).core_id == 0) {
David S. Millerf78eae22007-06-04 17:01:39 -07001197 cpu_set(i, cpu_core_map[i]);
David S. Miller5cbc3072007-05-25 15:49:59 -07001198 continue;
1199 }
1200
David S. Millere02044092007-07-16 03:49:40 -07001201 for_each_present_cpu(j) {
David S. Miller5cbc3072007-05-25 15:49:59 -07001202 if (cpu_data(i).core_id ==
1203 cpu_data(j).core_id)
David S. Millerf78eae22007-06-04 17:01:39 -07001204 cpu_set(j, cpu_core_map[i]);
1205 }
1206 }
1207
David S. Millere02044092007-07-16 03:49:40 -07001208 for_each_present_cpu(i) {
David S. Millerf78eae22007-06-04 17:01:39 -07001209 unsigned int j;
1210
Mike Travisd5a74302007-10-16 01:24:05 -07001211 cpus_clear(per_cpu(cpu_sibling_map, i));
David S. Millerf78eae22007-06-04 17:01:39 -07001212 if (cpu_data(i).proc_id == -1) {
Mike Travisd5a74302007-10-16 01:24:05 -07001213 cpu_set(i, per_cpu(cpu_sibling_map, i));
David S. Millerf78eae22007-06-04 17:01:39 -07001214 continue;
1215 }
1216
David S. Millere02044092007-07-16 03:49:40 -07001217 for_each_present_cpu(j) {
David S. Millerf78eae22007-06-04 17:01:39 -07001218 if (cpu_data(i).proc_id ==
1219 cpu_data(j).proc_id)
Mike Travisd5a74302007-10-16 01:24:05 -07001220 cpu_set(j, per_cpu(cpu_sibling_map, i));
David S. Miller5cbc3072007-05-25 15:49:59 -07001221 }
1222 }
1223}
1224
Gautham R Shenoyb282b6f2007-01-10 23:15:34 -08001225int __cpuinit __cpu_up(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226{
1227 int ret = smp_boot_one_cpu(cpu);
1228
1229 if (!ret) {
1230 cpu_set(cpu, smp_commenced_mask);
1231 while (!cpu_isset(cpu, cpu_online_map))
1232 mb();
1233 if (!cpu_isset(cpu, cpu_online_map)) {
1234 ret = -ENODEV;
1235 } else {
David S. Miller02fead72006-02-11 23:22:47 -08001236 /* On SUN4V, writes to %tick and %stick are
1237 * not allowed.
1238 */
1239 if (tlb_type != hypervisor)
1240 smp_synchronize_one_tick(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 }
1242 }
1243 return ret;
1244}
1245
David S. Miller4f0234f2007-07-13 16:03:42 -07001246#ifdef CONFIG_HOTPLUG_CPU
David S. Millere02044092007-07-16 03:49:40 -07001247void cpu_play_dead(void)
1248{
1249 int cpu = smp_processor_id();
1250 unsigned long pstate;
1251
1252 idle_task_exit();
1253
1254 if (tlb_type == hypervisor) {
1255 struct trap_per_cpu *tb = &trap_block[cpu];
1256
1257 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1258 tb->cpu_mondo_pa, 0);
1259 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1260 tb->dev_mondo_pa, 0);
1261 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1262 tb->resum_mondo_pa, 0);
1263 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1264 tb->nonresum_mondo_pa, 0);
1265 }
1266
1267 cpu_clear(cpu, smp_commenced_mask);
1268 membar_safe("#Sync");
1269
1270 local_irq_disable();
1271
1272 __asm__ __volatile__(
1273 "rdpr %%pstate, %0\n\t"
1274 "wrpr %0, %1, %%pstate"
1275 : "=r" (pstate)
1276 : "i" (PSTATE_IE));
1277
1278 while (1)
1279 barrier();
1280}
1281
David S. Miller4f0234f2007-07-13 16:03:42 -07001282int __cpu_disable(void)
1283{
David S. Millere02044092007-07-16 03:49:40 -07001284 int cpu = smp_processor_id();
1285 cpuinfo_sparc *c;
1286 int i;
1287
1288 for_each_cpu_mask(i, cpu_core_map[cpu])
1289 cpu_clear(cpu, cpu_core_map[i]);
1290 cpus_clear(cpu_core_map[cpu]);
1291
Mike Travisd5a74302007-10-16 01:24:05 -07001292 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1293 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1294 cpus_clear(per_cpu(cpu_sibling_map, cpu));
David S. Millere02044092007-07-16 03:49:40 -07001295
1296 c = &cpu_data(cpu);
1297
1298 c->core_id = 0;
1299 c->proc_id = -1;
1300
1301 spin_lock(&call_lock);
1302 cpu_clear(cpu, cpu_online_map);
1303 spin_unlock(&call_lock);
1304
1305 smp_wmb();
1306
1307 /* Make sure no interrupts point to this cpu. */
1308 fixup_irqs();
1309
1310 local_irq_enable();
1311 mdelay(1);
1312 local_irq_disable();
1313
1314 return 0;
David S. Miller4f0234f2007-07-13 16:03:42 -07001315}
1316
1317void __cpu_die(unsigned int cpu)
1318{
David S. Millere02044092007-07-16 03:49:40 -07001319 int i;
1320
1321 for (i = 0; i < 100; i++) {
1322 smp_rmb();
1323 if (!cpu_isset(cpu, smp_commenced_mask))
1324 break;
1325 msleep(100);
1326 }
1327 if (cpu_isset(cpu, smp_commenced_mask)) {
1328 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1329 } else {
1330#if defined(CONFIG_SUN_LDOMS)
1331 unsigned long hv_err;
1332 int limit = 100;
1333
1334 do {
1335 hv_err = sun4v_cpu_stop(cpu);
1336 if (hv_err == HV_EOK) {
1337 cpu_clear(cpu, cpu_present_map);
1338 break;
1339 }
1340 } while (--limit > 0);
1341 if (limit <= 0) {
1342 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1343 hv_err);
1344 }
1345#endif
1346 }
David S. Miller4f0234f2007-07-13 16:03:42 -07001347}
1348#endif
1349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350void __init smp_cpus_done(unsigned int max_cpus)
1351{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354void smp_send_reschedule(int cpu)
1355{
David S. Miller19926632008-08-03 23:56:28 -07001356 xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1357 &cpumask_of_cpu(cpu));
1358}
1359
1360void smp_receive_signal_client(int irq, struct pt_regs *regs)
1361{
1362 clear_softint(1 << irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
1365/* This is a nop because we capture all other cpus
1366 * anyways when making the PROM active.
1367 */
1368void smp_send_stop(void)
1369{
1370}
1371
David S. Millerd369ddd2005-07-10 15:45:11 -07001372unsigned long __per_cpu_base __read_mostly;
1373unsigned long __per_cpu_shift __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375EXPORT_SYMBOL(__per_cpu_base);
1376EXPORT_SYMBOL(__per_cpu_shift);
1377
David S. Miller5cbc3072007-05-25 15:49:59 -07001378void __init real_setup_per_cpu_areas(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379{
David S. Millerb9709452008-02-13 19:20:45 -08001380 unsigned long paddr, goal, size, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 char *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
1383 /* Copy section for each CPU (we discard the original) */
David S. Miller5a089002006-12-14 23:40:57 -08001384 goal = PERCPU_ENOUGH_ROOM;
1385
Jeremy Fitzhardingeb6e35902007-05-02 19:27:12 +02001386 __per_cpu_shift = PAGE_SHIFT;
1387 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 __per_cpu_shift++;
1389
David S. Millerb9709452008-02-13 19:20:45 -08001390 paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
1391 if (!paddr) {
1392 prom_printf("Cannot allocate per-cpu memory.\n");
1393 prom_halt();
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
David S. Millerb9709452008-02-13 19:20:45 -08001396 ptr = __va(paddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 __per_cpu_base = ptr - __per_cpu_start;
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 for (i = 0; i < NR_CPUS; i++, ptr += size)
1400 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
David S. Miller951bc822006-05-31 01:24:02 -07001401
1402 /* Setup %g5 for the boot cpu. */
1403 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404}