blob: b586345fe3b9651ebd6aa6fb89a880370ef54a78 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/cache.h>
22#include <linux/jiffies.h>
23#include <linux/profile.h>
24#include <linux/bootmem.h>
25
26#include <asm/head.h>
27#include <asm/ptrace.h>
28#include <asm/atomic.h>
29#include <asm/tlbflush.h>
30#include <asm/mmu_context.h>
31#include <asm/cpudata.h>
32
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/oplib.h>
37#include <asm/uaccess.h>
38#include <asm/timer.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
David S. Miller56fb4df2006-02-26 23:24:22 -080041#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043extern void calibrate_delay(void);
44
45/* Please don't make this stuff initdata!!! --DaveM */
46static unsigned char boot_cpu_id;
47
Andrew Mortonc12a8282005-07-12 12:09:43 -070048cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
49cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050static cpumask_t smp_commenced_mask;
51static cpumask_t cpu_callout_map;
52
53void smp_info(struct seq_file *m)
54{
55 int i;
56
57 seq_printf(m, "State:\n");
58 for (i = 0; i < NR_CPUS; i++) {
59 if (cpu_online(i))
60 seq_printf(m,
61 "CPU%d:\t\tonline\n", i);
62 }
63}
64
65void smp_bogo(struct seq_file *m)
66{
67 int i;
68
69 for (i = 0; i < NR_CPUS; i++)
70 if (cpu_online(i))
71 seq_printf(m,
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i, cpu_data(i).udelay_val / (500000/HZ),
75 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76 i, cpu_data(i).clock_tick);
77}
78
79void __init smp_store_cpu_info(int id)
80{
David S. Millerf03b8a52006-02-15 00:35:50 -080081 int cpu_node, def;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id).udelay_val = loops_per_jiffy;
86
87 cpu_find_by_mid(id, &cpu_node);
88 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
89 "clock-frequency", 0);
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 cpu_data(id).idle_volume = 1;
David S. Miller80dc0d62005-09-26 00:32:17 -070092
David S. Millerf03b8a52006-02-15 00:35:50 -080093 def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
David S. Miller80dc0d62005-09-26 00:32:17 -070094 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
David S. Millerf03b8a52006-02-15 00:35:50 -080095 def);
96
97 def = 32;
David S. Miller80dc0d62005-09-26 00:32:17 -070098 cpu_data(id).dcache_line_size =
David S. Millerf03b8a52006-02-15 00:35:50 -080099 prom_getintdefault(cpu_node, "dcache-line-size", def);
100
101 def = 16 * 1024;
David S. Miller80dc0d62005-09-26 00:32:17 -0700102 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
David S. Millerf03b8a52006-02-15 00:35:50 -0800103 def);
104
105 def = 32;
David S. Miller80dc0d62005-09-26 00:32:17 -0700106 cpu_data(id).icache_line_size =
David S. Millerf03b8a52006-02-15 00:35:50 -0800107 prom_getintdefault(cpu_node, "icache-line-size", def);
108
109 def = ((tlb_type == hypervisor) ?
110 (3 * 1024 * 1024) :
111 (4 * 1024 * 1024));
David S. Miller80dc0d62005-09-26 00:32:17 -0700112 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
David S. Millerf03b8a52006-02-15 00:35:50 -0800113 def);
114
115 def = 64;
David S. Miller80dc0d62005-09-26 00:32:17 -0700116 cpu_data(id).ecache_line_size =
David S. Millerf03b8a52006-02-15 00:35:50 -0800117 prom_getintdefault(cpu_node, "ecache-line-size", def);
118
David S. Miller80dc0d62005-09-26 00:32:17 -0700119 printk("CPU[%d]: Caches "
120 "D[sz(%d):line_sz(%d)] "
121 "I[sz(%d):line_sz(%d)] "
122 "E[sz(%d):line_sz(%d)]\n",
123 id,
124 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
125 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
126 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127}
128
129static void smp_setup_percpu_timer(void);
130
131static volatile unsigned long callin_flag = 0;
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133void __init smp_callin(void)
134{
135 int cpuid = hard_smp_processor_id();
136
David S. Miller56fb4df2006-02-26 23:24:22 -0800137 __local_per_cpu_offset = __per_cpu_offset(cpuid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
David S. Miller4a07e642006-02-14 13:49:32 -0800139 if (tlb_type == hypervisor)
David S. Miller490384e2006-02-11 14:41:18 -0800140 sun4v_ktsb_register();
David S. Miller481295f2006-02-07 21:51:08 -0800141
David S. Miller56fb4df2006-02-26 23:24:22 -0800142 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 smp_setup_percpu_timer();
145
David S. Miller816242d2005-05-23 15:52:08 -0700146 if (cheetah_pcache_forced_on)
147 cheetah_enable_pcache();
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 local_irq_enable();
150
151 calibrate_delay();
152 smp_store_cpu_info(cpuid);
153 callin_flag = 1;
154 __asm__ __volatile__("membar #Sync\n\t"
155 "flush %%g6" : : : "memory");
156
157 /* Clear this or we will die instantly when we
158 * schedule back to this idler...
159 */
David S. Millerdb7d9a42005-07-24 19:36:26 -0700160 current_thread_info()->new_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 /* Attach to the address space of init_task. */
163 atomic_inc(&init_mm.mm_count);
164 current->active_mm = &init_mm;
165
166 while (!cpu_isset(cpuid, smp_commenced_mask))
David S. Miller4f071182005-08-29 12:46:22 -0700167 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 cpu_set(cpuid, cpu_online_map);
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800170
171 /* idle thread is expected to have preempt disabled */
172 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
174
175void cpu_panic(void)
176{
177 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
178 panic("SMP bolixed\n");
179}
180
David S. Millerd369ddd2005-07-10 15:45:11 -0700181static unsigned long current_tick_offset __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183/* This tick register synchronization scheme is taken entirely from
184 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
185 *
186 * The only change I've made is to rework it so that the master
187 * initiates the synchonization instead of the slave. -DaveM
188 */
189
190#define MASTER 0
191#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
192
193#define NUM_ROUNDS 64 /* magic value */
194#define NUM_ITERS 5 /* likewise */
195
196static DEFINE_SPINLOCK(itc_sync_lock);
197static unsigned long go[SLAVE + 1];
198
199#define DEBUG_TICK_SYNC 0
200
201static inline long get_delta (long *rt, long *master)
202{
203 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
204 unsigned long tcenter, t0, t1, tm;
205 unsigned long i;
206
207 for (i = 0; i < NUM_ITERS; i++) {
208 t0 = tick_ops->get_tick();
209 go[MASTER] = 1;
David S. Miller4f071182005-08-29 12:46:22 -0700210 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 while (!(tm = go[SLAVE]))
David S. Miller4f071182005-08-29 12:46:22 -0700212 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 go[SLAVE] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700214 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 t1 = tick_ops->get_tick();
216
217 if (t1 - t0 < best_t1 - best_t0)
218 best_t0 = t0, best_t1 = t1, best_tm = tm;
219 }
220
221 *rt = best_t1 - best_t0;
222 *master = best_tm - best_t0;
223
224 /* average best_t0 and best_t1 without overflow: */
225 tcenter = (best_t0/2 + best_t1/2);
226 if (best_t0 % 2 + best_t1 % 2 == 2)
227 tcenter++;
228 return tcenter - best_tm;
229}
230
231void smp_synchronize_tick_client(void)
232{
233 long i, delta, adj, adjust_latency = 0, done = 0;
234 unsigned long flags, rt, master_time_stamp, bound;
235#if DEBUG_TICK_SYNC
236 struct {
237 long rt; /* roundtrip time */
238 long master; /* master's timestamp */
239 long diff; /* difference between midpoint and master's timestamp */
240 long lat; /* estimate of itc adjustment latency */
241 } t[NUM_ROUNDS];
242#endif
243
244 go[MASTER] = 1;
245
246 while (go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700247 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 local_irq_save(flags);
250 {
251 for (i = 0; i < NUM_ROUNDS; i++) {
252 delta = get_delta(&rt, &master_time_stamp);
253 if (delta == 0) {
254 done = 1; /* let's lock on to this... */
255 bound = rt;
256 }
257
258 if (!done) {
259 if (i > 0) {
260 adjust_latency += -delta;
261 adj = -delta + adjust_latency/4;
262 } else
263 adj = -delta;
264
265 tick_ops->add_tick(adj, current_tick_offset);
266 }
267#if DEBUG_TICK_SYNC
268 t[i].rt = rt;
269 t[i].master = master_time_stamp;
270 t[i].diff = delta;
271 t[i].lat = adjust_latency/4;
272#endif
273 }
274 }
275 local_irq_restore(flags);
276
277#if DEBUG_TICK_SYNC
278 for (i = 0; i < NUM_ROUNDS; i++)
279 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
280 t[i].rt, t[i].master, t[i].diff, t[i].lat);
281#endif
282
283 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
284 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
285}
286
287static void smp_start_sync_tick_client(int cpu);
288
289static void smp_synchronize_one_tick(int cpu)
290{
291 unsigned long flags, i;
292
293 go[MASTER] = 0;
294
295 smp_start_sync_tick_client(cpu);
296
297 /* wait for client to be ready */
298 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700299 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 /* now let the client proceed into his loop */
302 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700303 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 spin_lock_irqsave(&itc_sync_lock, flags);
306 {
307 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
308 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700309 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700311 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 go[SLAVE] = tick_ops->get_tick();
David S. Miller4f071182005-08-29 12:46:22 -0700313 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 }
315 }
316 spin_unlock_irqrestore(&itc_sync_lock, flags);
317}
318
David S. Miller72aff532006-02-17 01:29:17 -0800319extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321extern unsigned long sparc64_cpu_startup;
322
323/* The OBP cpu startup callback truncates the 3rd arg cookie to
324 * 32-bits (I think) so to be safe we have it read the pointer
325 * contained here so we work on >4GB machines. -DaveM
326 */
327static struct thread_info *cpu_new_thread = NULL;
328
329static int __devinit smp_boot_one_cpu(unsigned int cpu)
330{
331 unsigned long entry =
332 (unsigned long)(&sparc64_cpu_startup);
333 unsigned long cookie =
334 (unsigned long)(&cpu_new_thread);
335 struct task_struct *p;
David S. Miller7890f792006-02-15 02:26:54 -0800336 int timeout, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338 p = fork_idle(cpu);
339 callin_flag = 0;
Al Virof3169642006-01-12 01:05:42 -0800340 cpu_new_thread = task_thread_info(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 cpu_set(cpu, cpu_callout_map);
342
David S. Miller7890f792006-02-15 02:26:54 -0800343 if (tlb_type == hypervisor) {
David S. Miller72aff532006-02-17 01:29:17 -0800344 /* Alloc the mondo queues, cpu will load them. */
345 sun4v_init_mondo_queues(0, cpu, 1, 0);
346
David S. Miller7890f792006-02-15 02:26:54 -0800347 prom_startcpu_cpuid(cpu, entry, cookie);
348 } else {
349 int cpu_node;
350
351 cpu_find_by_mid(cpu, &cpu_node);
352 prom_startcpu(cpu_node, entry, cookie);
353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 for (timeout = 0; timeout < 5000000; timeout++) {
356 if (callin_flag)
357 break;
358 udelay(100);
359 }
David S. Miller72aff532006-02-17 01:29:17 -0800360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 if (callin_flag) {
362 ret = 0;
363 } else {
364 printk("Processor %d is stuck.\n", cpu);
365 cpu_clear(cpu, cpu_callout_map);
366 ret = -ENODEV;
367 }
368 cpu_new_thread = NULL;
369
370 return ret;
371}
372
373static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
374{
375 u64 result, target;
376 int stuck, tmp;
377
378 if (this_is_starfire) {
379 /* map to real upaid */
380 cpu = (((cpu & 0x3c) << 1) |
381 ((cpu & 0x40) >> 4) |
382 (cpu & 0x3));
383 }
384
385 target = (cpu << 14) | 0x70;
386again:
387 /* Ok, this is the real Spitfire Errata #54.
388 * One must read back from a UDB internal register
389 * after writes to the UDB interrupt dispatch, but
390 * before the membar Sync for that write.
391 * So we use the high UDB control register (ASI 0x7f,
392 * ADDR 0x20) for the dummy read. -DaveM
393 */
394 tmp = 0x40;
395 __asm__ __volatile__(
396 "wrpr %1, %2, %%pstate\n\t"
397 "stxa %4, [%0] %3\n\t"
398 "stxa %5, [%0+%8] %3\n\t"
399 "add %0, %8, %0\n\t"
400 "stxa %6, [%0+%8] %3\n\t"
401 "membar #Sync\n\t"
402 "stxa %%g0, [%7] %3\n\t"
403 "membar #Sync\n\t"
404 "mov 0x20, %%g1\n\t"
405 "ldxa [%%g1] 0x7f, %%g0\n\t"
406 "membar #Sync"
407 : "=r" (tmp)
408 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
409 "r" (data0), "r" (data1), "r" (data2), "r" (target),
410 "r" (0x10), "0" (tmp)
411 : "g1");
412
413 /* NOTE: PSTATE_IE is still clear. */
414 stuck = 100000;
415 do {
416 __asm__ __volatile__("ldxa [%%g0] %1, %0"
417 : "=r" (result)
418 : "i" (ASI_INTR_DISPATCH_STAT));
419 if (result == 0) {
420 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
421 : : "r" (pstate));
422 return;
423 }
424 stuck -= 1;
425 if (stuck == 0)
426 break;
427 } while (result & 0x1);
428 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
429 : : "r" (pstate));
430 if (stuck == 0) {
431 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
432 smp_processor_id(), result);
433 } else {
434 udelay(2);
435 goto again;
436 }
437}
438
439static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
440{
441 u64 pstate;
442 int i;
443
444 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
445 for_each_cpu_mask(i, mask)
446 spitfire_xcall_helper(data0, data1, data2, pstate, i);
447}
448
449/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
450 * packet, but we have no use for that. However we do take advantage of
451 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
452 */
453static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
454{
455 u64 pstate, ver;
David S. Miller92704a12006-02-26 23:27:19 -0800456 int nack_busy_id, is_jbus;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
458 if (cpus_empty(mask))
459 return;
460
461 /* Unfortunately, someone at Sun had the brilliant idea to make the
462 * busy/nack fields hard-coded by ITID number for this Ultra-III
463 * derivative processor.
464 */
465 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
David S. Miller92704a12006-02-26 23:27:19 -0800466 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
467 (ver >> 32) == __SERRANO_ID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
470
471retry:
472 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
473 : : "r" (pstate), "i" (PSTATE_IE));
474
475 /* Setup the dispatch data registers. */
476 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
477 "stxa %1, [%4] %6\n\t"
478 "stxa %2, [%5] %6\n\t"
479 "membar #Sync\n\t"
480 : /* no outputs */
481 : "r" (data0), "r" (data1), "r" (data2),
482 "r" (0x40), "r" (0x50), "r" (0x60),
483 "i" (ASI_INTR_W));
484
485 nack_busy_id = 0;
486 {
487 int i;
488
489 for_each_cpu_mask(i, mask) {
490 u64 target = (i << 14) | 0x70;
491
David S. Miller92704a12006-02-26 23:27:19 -0800492 if (!is_jbus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 target |= (nack_busy_id << 24);
494 __asm__ __volatile__(
495 "stxa %%g0, [%0] %1\n\t"
496 "membar #Sync\n\t"
497 : /* no outputs */
498 : "r" (target), "i" (ASI_INTR_W));
499 nack_busy_id++;
500 }
501 }
502
503 /* Now, poll for completion. */
504 {
505 u64 dispatch_stat;
506 long stuck;
507
508 stuck = 100000 * nack_busy_id;
509 do {
510 __asm__ __volatile__("ldxa [%%g0] %1, %0"
511 : "=r" (dispatch_stat)
512 : "i" (ASI_INTR_DISPATCH_STAT));
513 if (dispatch_stat == 0UL) {
514 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
515 : : "r" (pstate));
516 return;
517 }
518 if (!--stuck)
519 break;
520 } while (dispatch_stat & 0x5555555555555555UL);
521
522 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
523 : : "r" (pstate));
524
525 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
526 /* Busy bits will not clear, continue instead
527 * of freezing up on this cpu.
528 */
529 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
530 smp_processor_id(), dispatch_stat);
531 } else {
532 int i, this_busy_nack = 0;
533
534 /* Delay some random time with interrupts enabled
535 * to prevent deadlock.
536 */
537 udelay(2 * nack_busy_id);
538
539 /* Clear out the mask bits for cpus which did not
540 * NACK us.
541 */
542 for_each_cpu_mask(i, mask) {
543 u64 check_mask;
544
David S. Miller92704a12006-02-26 23:27:19 -0800545 if (is_jbus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 check_mask = (0x2UL << (2*i));
547 else
548 check_mask = (0x2UL <<
549 this_busy_nack);
550 if ((dispatch_stat & check_mask) == 0)
551 cpu_clear(i, mask);
552 this_busy_nack += 2;
553 }
554
555 goto retry;
556 }
557 }
558}
559
David S. Miller1d2f1f92006-02-08 16:41:20 -0800560#if 0
561/* Multi-cpu list version. */
562static int init_cpu_list(u16 *list, cpumask_t mask)
563{
564 int i, cnt;
565
566 cnt = 0;
567 for_each_cpu_mask(i, mask)
568 list[cnt++] = i;
569
570 return cnt;
571}
572
573static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask)
574{
575 int i;
576
577 for (i = 0; i < orig_cnt; i++) {
578 if (list[i] == 0xffff)
579 cpu_clear(i, mask);
580 }
581
582 return init_cpu_list(list, mask);
583}
584
David S. Millera43fe0e2006-02-04 03:10:53 -0800585static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
586{
David S. Miller1d2f1f92006-02-08 16:41:20 -0800587 int this_cpu = get_cpu();
588 struct trap_per_cpu *tb = &trap_block[this_cpu];
589 u64 *mondo = __va(tb->cpu_mondo_block_pa);
590 u16 *cpu_list = __va(tb->cpu_list_pa);
591 int cnt, retries;
592
593 mondo[0] = data0;
594 mondo[1] = data1;
595 mondo[2] = data2;
596 wmb();
597
598 retries = 0;
599 cnt = init_cpu_list(cpu_list, mask);
600 do {
David S. Miller164c2202006-02-09 22:57:21 -0800601 register unsigned long func __asm__("%o5");
602 register unsigned long arg0 __asm__("%o0");
603 register unsigned long arg1 __asm__("%o1");
604 register unsigned long arg2 __asm__("%o2");
David S. Miller1d2f1f92006-02-08 16:41:20 -0800605
606 func = HV_FAST_CPU_MONDO_SEND;
607 arg0 = cnt;
608 arg1 = tb->cpu_list_pa;
609 arg2 = tb->cpu_mondo_block_pa;
610
611 __asm__ __volatile__("ta %8"
612 : "=&r" (func), "=&r" (arg0),
613 "=&r" (arg1), "=&r" (arg2)
614 : "0" (func), "1" (arg0),
615 "2" (arg1), "3" (arg2),
616 "i" (HV_FAST_TRAP)
617 : "memory");
David S. Millerb5a37e92006-02-11 23:07:13 -0800618 if (likely(arg0 == HV_EOK))
David S. Miller1d2f1f92006-02-08 16:41:20 -0800619 break;
620
621 if (unlikely(++retries > 100)) {
622 printk("CPU[%d]: sun4v mondo error %lu\n",
623 this_cpu, func);
624 break;
625 }
626
627 cnt = update_cpu_list(cpu_list, cnt, mask);
628
629 udelay(2 * cnt);
630 } while (1);
631
632 put_cpu();
David S. Millera43fe0e2006-02-04 03:10:53 -0800633}
David S. Miller1d2f1f92006-02-08 16:41:20 -0800634#else
635/* Single-cpu list version. */
636static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
637{
638 int this_cpu = get_cpu();
639 struct trap_per_cpu *tb = &trap_block[this_cpu];
640 u64 *mondo = __va(tb->cpu_mondo_block_pa);
641 u16 *cpu_list = __va(tb->cpu_list_pa);
642 int i;
643
644 mondo[0] = data0;
645 mondo[1] = data1;
646 mondo[2] = data2;
647 wmb();
648
649 for_each_cpu_mask(i, mask) {
650 int retries = 0;
651
652 do {
David S. Miller164c2202006-02-09 22:57:21 -0800653 register unsigned long func __asm__("%o5");
654 register unsigned long arg0 __asm__("%o0");
655 register unsigned long arg1 __asm__("%o1");
656 register unsigned long arg2 __asm__("%o2");
David S. Miller1d2f1f92006-02-08 16:41:20 -0800657
658 cpu_list[0] = i;
659 func = HV_FAST_CPU_MONDO_SEND;
660 arg0 = 1;
661 arg1 = tb->cpu_list_pa;
662 arg2 = tb->cpu_mondo_block_pa;
663
664 __asm__ __volatile__("ta %8"
665 : "=&r" (func), "=&r" (arg0),
666 "=&r" (arg1), "=&r" (arg2)
667 : "0" (func), "1" (arg0),
668 "2" (arg1), "3" (arg2),
669 "i" (HV_FAST_TRAP)
670 : "memory");
David S. Millerb5a37e92006-02-11 23:07:13 -0800671 if (likely(arg0 == HV_EOK))
David S. Miller1d2f1f92006-02-08 16:41:20 -0800672 break;
673
674 if (unlikely(++retries > 100)) {
675 printk("CPU[%d]: sun4v mondo error %lu\n",
676 this_cpu, func);
677 break;
678 }
679
680 udelay(2 * i);
681 } while (1);
682 }
683
684 put_cpu();
685}
686#endif
David S. Millera43fe0e2006-02-04 03:10:53 -0800687
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688/* Send cross call to all processors mentioned in MASK
689 * except self.
690 */
691static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
692{
693 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
694 int this_cpu = get_cpu();
695
696 cpus_and(mask, mask, cpu_online_map);
697 cpu_clear(this_cpu, mask);
698
699 if (tlb_type == spitfire)
700 spitfire_xcall_deliver(data0, data1, data2, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800701 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 cheetah_xcall_deliver(data0, data1, data2, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800703 else
704 hypervisor_xcall_deliver(data0, data1, data2, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 /* NOTE: Caller runs local copy on master. */
706
707 put_cpu();
708}
709
710extern unsigned long xcall_sync_tick;
711
712static void smp_start_sync_tick_client(int cpu)
713{
714 cpumask_t mask = cpumask_of_cpu(cpu);
715
716 smp_cross_call_masked(&xcall_sync_tick,
717 0, 0, 0, mask);
718}
719
720/* Send cross call to all processors except self. */
721#define smp_cross_call(func, ctx, data1, data2) \
722 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
723
724struct call_data_struct {
725 void (*func) (void *info);
726 void *info;
727 atomic_t finished;
728 int wait;
729};
730
731static DEFINE_SPINLOCK(call_lock);
732static struct call_data_struct *call_data;
733
734extern unsigned long xcall_call_function;
735
736/*
737 * You must not call this function with disabled interrupts or from a
738 * hardware interrupt handler or from a bottom half handler.
739 */
David S. Millerbd407912006-01-31 18:31:38 -0800740static int smp_call_function_mask(void (*func)(void *info), void *info,
741 int nonatomic, int wait, cpumask_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
743 struct call_data_struct data;
David S. Millerbd407912006-01-31 18:31:38 -0800744 int cpus = cpus_weight(mask) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 long timeout;
746
747 if (!cpus)
748 return 0;
749
750 /* Can deadlock when called with interrupts disabled */
751 WARN_ON(irqs_disabled());
752
753 data.func = func;
754 data.info = info;
755 atomic_set(&data.finished, 0);
756 data.wait = wait;
757
758 spin_lock(&call_lock);
759
760 call_data = &data;
761
David S. Millerbd407912006-01-31 18:31:38 -0800762 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 /*
765 * Wait for other cpus to complete function or at
766 * least snap the call data.
767 */
768 timeout = 1000000;
769 while (atomic_read(&data.finished) != cpus) {
770 if (--timeout <= 0)
771 goto out_timeout;
772 barrier();
773 udelay(1);
774 }
775
776 spin_unlock(&call_lock);
777
778 return 0;
779
780out_timeout:
781 spin_unlock(&call_lock);
782 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
783 (long) num_online_cpus() - 1L,
784 (long) atomic_read(&data.finished));
785 return 0;
786}
787
David S. Millerbd407912006-01-31 18:31:38 -0800788int smp_call_function(void (*func)(void *info), void *info,
789 int nonatomic, int wait)
790{
791 return smp_call_function_mask(func, info, nonatomic, wait,
792 cpu_online_map);
793}
794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795void smp_call_function_client(int irq, struct pt_regs *regs)
796{
797 void (*func) (void *info) = call_data->func;
798 void *info = call_data->info;
799
800 clear_softint(1 << irq);
801 if (call_data->wait) {
802 /* let initiator proceed only after completion */
803 func(info);
804 atomic_inc(&call_data->finished);
805 } else {
806 /* let initiator proceed after getting data */
807 atomic_inc(&call_data->finished);
808 func(info);
809 }
810}
811
David S. Millerbd407912006-01-31 18:31:38 -0800812static void tsb_sync(void *info)
813{
814 struct mm_struct *mm = info;
815
816 if (current->active_mm == mm)
817 tsb_context_switch(mm);
818}
819
820void smp_tsb_sync(struct mm_struct *mm)
821{
822 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
823}
824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825extern unsigned long xcall_flush_tlb_mm;
826extern unsigned long xcall_flush_tlb_pending;
827extern unsigned long xcall_flush_tlb_kernel_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828extern unsigned long xcall_report_regs;
829extern unsigned long xcall_receive_signal;
830
831#ifdef DCACHE_ALIASING_POSSIBLE
832extern unsigned long xcall_flush_dcache_page_cheetah;
833#endif
834extern unsigned long xcall_flush_dcache_page_spitfire;
835
836#ifdef CONFIG_DEBUG_DCFLUSH
837extern atomic_t dcpage_flushes;
838extern atomic_t dcpage_flushes_xcall;
839#endif
840
841static __inline__ void __local_flush_dcache_page(struct page *page)
842{
843#ifdef DCACHE_ALIASING_POSSIBLE
844 __flush_dcache_page(page_address(page),
845 ((tlb_type == spitfire) &&
846 page_mapping(page) != NULL));
847#else
848 if (page_mapping(page) != NULL &&
849 tlb_type == spitfire)
850 __flush_icache_page(__pa(page_address(page)));
851#endif
852}
853
854void smp_flush_dcache_page_impl(struct page *page, int cpu)
855{
856 cpumask_t mask = cpumask_of_cpu(cpu);
David S. Millera43fe0e2006-02-04 03:10:53 -0800857 int this_cpu;
858
859 if (tlb_type == hypervisor)
860 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862#ifdef CONFIG_DEBUG_DCFLUSH
863 atomic_inc(&dcpage_flushes);
864#endif
David S. Millera43fe0e2006-02-04 03:10:53 -0800865
866 this_cpu = get_cpu();
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 if (cpu == this_cpu) {
869 __local_flush_dcache_page(page);
870 } else if (cpu_online(cpu)) {
871 void *pg_addr = page_address(page);
872 u64 data0;
873
874 if (tlb_type == spitfire) {
875 data0 =
876 ((u64)&xcall_flush_dcache_page_spitfire);
877 if (page_mapping(page) != NULL)
878 data0 |= ((u64)1 << 32);
879 spitfire_xcall_deliver(data0,
880 __pa(pg_addr),
881 (u64) pg_addr,
882 mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800883 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884#ifdef DCACHE_ALIASING_POSSIBLE
885 data0 =
886 ((u64)&xcall_flush_dcache_page_cheetah);
887 cheetah_xcall_deliver(data0,
888 __pa(pg_addr),
889 0, mask);
890#endif
891 }
892#ifdef CONFIG_DEBUG_DCFLUSH
893 atomic_inc(&dcpage_flushes_xcall);
894#endif
895 }
896
897 put_cpu();
898}
899
900void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
901{
902 void *pg_addr = page_address(page);
903 cpumask_t mask = cpu_online_map;
904 u64 data0;
David S. Millera43fe0e2006-02-04 03:10:53 -0800905 int this_cpu;
906
907 if (tlb_type == hypervisor)
908 return;
909
910 this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
912 cpu_clear(this_cpu, mask);
913
914#ifdef CONFIG_DEBUG_DCFLUSH
915 atomic_inc(&dcpage_flushes);
916#endif
917 if (cpus_empty(mask))
918 goto flush_self;
919 if (tlb_type == spitfire) {
920 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
921 if (page_mapping(page) != NULL)
922 data0 |= ((u64)1 << 32);
923 spitfire_xcall_deliver(data0,
924 __pa(pg_addr),
925 (u64) pg_addr,
926 mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800927 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928#ifdef DCACHE_ALIASING_POSSIBLE
929 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
930 cheetah_xcall_deliver(data0,
931 __pa(pg_addr),
932 0, mask);
933#endif
934 }
935#ifdef CONFIG_DEBUG_DCFLUSH
936 atomic_inc(&dcpage_flushes_xcall);
937#endif
938 flush_self:
939 __local_flush_dcache_page(page);
940
941 put_cpu();
942}
943
944void smp_receive_signal(int cpu)
945{
946 cpumask_t mask = cpumask_of_cpu(cpu);
947
948 if (cpu_online(cpu)) {
949 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
950
951 if (tlb_type == spitfire)
952 spitfire_xcall_deliver(data0, 0, 0, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800953 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 cheetah_xcall_deliver(data0, 0, 0, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800955 else if (tlb_type == hypervisor)
956 hypervisor_xcall_deliver(data0, 0, 0, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 }
958}
959
960void smp_receive_signal_client(int irq, struct pt_regs *regs)
961{
962 /* Just return, rtrap takes care of the rest. */
963 clear_softint(1 << irq);
964}
965
966void smp_report_regs(void)
967{
968 smp_cross_call(&xcall_report_regs, 0, 0, 0);
969}
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/* We know that the window frames of the user have been flushed
972 * to the stack before we get here because all callers of us
973 * are flush_tlb_*() routines, and these run after flush_cache_*()
974 * which performs the flushw.
975 *
976 * The SMP TLB coherency scheme we use works as follows:
977 *
978 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
979 * space has (potentially) executed on, this is the heuristic
980 * we use to avoid doing cross calls.
981 *
982 * Also, for flushing from kswapd and also for clones, we
983 * use cpu_vm_mask as the list of cpus to make run the TLB.
984 *
985 * 2) TLB context numbers are shared globally across all processors
986 * in the system, this allows us to play several games to avoid
987 * cross calls.
988 *
989 * One invariant is that when a cpu switches to a process, and
990 * that processes tsk->active_mm->cpu_vm_mask does not have the
991 * current cpu's bit set, that tlb context is flushed locally.
992 *
993 * If the address space is non-shared (ie. mm->count == 1) we avoid
994 * cross calls when we want to flush the currently running process's
995 * tlb state. This is done by clearing all cpu bits except the current
996 * processor's in current->active_mm->cpu_vm_mask and performing the
997 * flush locally only. This will force any subsequent cpus which run
998 * this task to flush the context from the local tlb if the process
999 * migrates to another cpu (again).
1000 *
1001 * 3) For shared address spaces (threads) and swapping we bite the
1002 * bullet for most cases and perform the cross call (but only to
1003 * the cpus listed in cpu_vm_mask).
1004 *
1005 * The performance gain from "optimizing" away the cross call for threads is
1006 * questionable (in theory the big win for threads is the massive sharing of
1007 * address space state across processors).
1008 */
David S. Miller62dbec72005-11-07 14:09:58 -08001009
1010/* This currently is only used by the hugetlb arch pre-fault
1011 * hook on UltraSPARC-III+ and later when changing the pagesize
1012 * bits of the context register for an address space.
1013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014void smp_flush_tlb_mm(struct mm_struct *mm)
1015{
David S. Miller62dbec72005-11-07 14:09:58 -08001016 u32 ctx = CTX_HWBITS(mm->context);
1017 int cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
David S. Miller62dbec72005-11-07 14:09:58 -08001019 if (atomic_read(&mm->mm_users) == 1) {
1020 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1021 goto local_flush_and_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 }
David S. Miller62dbec72005-11-07 14:09:58 -08001023
1024 smp_cross_call_masked(&xcall_flush_tlb_mm,
1025 ctx, 0, 0,
1026 mm->cpu_vm_mask);
1027
1028local_flush_and_out:
1029 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1030
1031 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
1034void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1035{
1036 u32 ctx = CTX_HWBITS(mm->context);
1037 int cpu = get_cpu();
1038
Hugh Dickinsdedeb002005-11-07 14:09:01 -08001039 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
Hugh Dickinsdedeb002005-11-07 14:09:01 -08001041 else
1042 smp_cross_call_masked(&xcall_flush_tlb_pending,
1043 ctx, nr, (unsigned long) vaddrs,
1044 mm->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 __flush_tlb_pending(ctx, nr, vaddrs);
1047
1048 put_cpu();
1049}
1050
1051void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1052{
1053 start &= PAGE_MASK;
1054 end = PAGE_ALIGN(end);
1055 if (start != end) {
1056 smp_cross_call(&xcall_flush_tlb_kernel_range,
1057 0, start, end);
1058
1059 __flush_tlb_kernel_range(start, end);
1060 }
1061}
1062
1063/* CPU capture. */
1064/* #define CAPTURE_DEBUG */
1065extern unsigned long xcall_capture;
1066
1067static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1068static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1069static unsigned long penguins_are_doing_time;
1070
1071void smp_capture(void)
1072{
1073 int result = atomic_add_ret(1, &smp_capture_depth);
1074
1075 if (result == 1) {
1076 int ncpus = num_online_cpus();
1077
1078#ifdef CAPTURE_DEBUG
1079 printk("CPU[%d]: Sending penguins to jail...",
1080 smp_processor_id());
1081#endif
1082 penguins_are_doing_time = 1;
David S. Miller4f071182005-08-29 12:46:22 -07001083 membar_storestore_loadstore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 atomic_inc(&smp_capture_registry);
1085 smp_cross_call(&xcall_capture, 0, 0, 0);
1086 while (atomic_read(&smp_capture_registry) != ncpus)
David S. Miller4f071182005-08-29 12:46:22 -07001087 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088#ifdef CAPTURE_DEBUG
1089 printk("done\n");
1090#endif
1091 }
1092}
1093
1094void smp_release(void)
1095{
1096 if (atomic_dec_and_test(&smp_capture_depth)) {
1097#ifdef CAPTURE_DEBUG
1098 printk("CPU[%d]: Giving pardon to "
1099 "imprisoned penguins\n",
1100 smp_processor_id());
1101#endif
1102 penguins_are_doing_time = 0;
David S. Miller4f071182005-08-29 12:46:22 -07001103 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 atomic_dec(&smp_capture_registry);
1105 }
1106}
1107
1108/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1109 * can service tlb flush xcalls...
1110 */
1111extern void prom_world(int);
David S. Miller96c6e0d2006-01-31 18:32:29 -08001112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1114{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 clear_softint(1 << irq);
1116
1117 preempt_disable();
1118
1119 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 prom_world(1);
1121 atomic_inc(&smp_capture_registry);
David S. Miller4f071182005-08-29 12:46:22 -07001122 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 while (penguins_are_doing_time)
David S. Miller4f071182005-08-29 12:46:22 -07001124 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 atomic_dec(&smp_capture_registry);
1126 prom_world(0);
1127
1128 preempt_enable();
1129}
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1132#define prof_counter(__cpu) cpu_data(__cpu).counter
1133
1134void smp_percpu_timer_interrupt(struct pt_regs *regs)
1135{
1136 unsigned long compare, tick, pstate;
1137 int cpu = smp_processor_id();
1138 int user = user_mode(regs);
1139
1140 /*
1141 * Check for level 14 softint.
1142 */
1143 {
1144 unsigned long tick_mask = tick_ops->softint_mask;
1145
1146 if (!(get_softint() & tick_mask)) {
1147 extern void handler_irq(int, struct pt_regs *);
1148
1149 handler_irq(14, regs);
1150 return;
1151 }
1152 clear_softint(tick_mask);
1153 }
1154
1155 do {
1156 profile_tick(CPU_PROFILING, regs);
1157 if (!--prof_counter(cpu)) {
1158 irq_enter();
1159
1160 if (cpu == boot_cpu_id) {
1161 kstat_this_cpu.irqs[0]++;
1162 timer_tick_interrupt(regs);
1163 }
1164
1165 update_process_times(user);
1166
1167 irq_exit();
1168
1169 prof_counter(cpu) = prof_multiplier(cpu);
1170 }
1171
1172 /* Guarantee that the following sequences execute
1173 * uninterrupted.
1174 */
1175 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1176 "wrpr %0, %1, %%pstate"
1177 : "=r" (pstate)
1178 : "i" (PSTATE_IE));
1179
1180 compare = tick_ops->add_compare(current_tick_offset);
1181 tick = tick_ops->get_tick();
1182
1183 /* Restore PSTATE_IE. */
1184 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1185 : /* no outputs */
1186 : "r" (pstate));
1187 } while (time_after_eq(tick, compare));
1188}
1189
1190static void __init smp_setup_percpu_timer(void)
1191{
1192 int cpu = smp_processor_id();
1193 unsigned long pstate;
1194
1195 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1196
1197 /* Guarantee that the following sequences execute
1198 * uninterrupted.
1199 */
1200 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1201 "wrpr %0, %1, %%pstate"
1202 : "=r" (pstate)
1203 : "i" (PSTATE_IE));
1204
1205 tick_ops->init_tick(current_tick_offset);
1206
1207 /* Restore PSTATE_IE. */
1208 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1209 : /* no outputs */
1210 : "r" (pstate));
1211}
1212
1213void __init smp_tick_init(void)
1214{
1215 boot_cpu_id = hard_smp_processor_id();
1216 current_tick_offset = timer_tick_offset;
1217
1218 cpu_set(boot_cpu_id, cpu_online_map);
1219 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1220}
1221
1222/* /proc/profile writes can call this, don't __init it please. */
1223static DEFINE_SPINLOCK(prof_setup_lock);
1224
1225int setup_profiling_timer(unsigned int multiplier)
1226{
1227 unsigned long flags;
1228 int i;
1229
1230 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1231 return -EINVAL;
1232
1233 spin_lock_irqsave(&prof_setup_lock, flags);
1234 for (i = 0; i < NR_CPUS; i++)
1235 prof_multiplier(i) = multiplier;
1236 current_tick_offset = (timer_tick_offset / multiplier);
1237 spin_unlock_irqrestore(&prof_setup_lock, flags);
1238
1239 return 0;
1240}
1241
David S. Miller7abea922006-02-25 13:39:56 -08001242/* Constrain the number of cpus to max_cpus. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243void __init smp_prepare_cpus(unsigned int max_cpus)
1244{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 if (num_possible_cpus() > max_cpus) {
David S. Miller7abea922006-02-25 13:39:56 -08001246 int instance, mid;
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 instance = 0;
1249 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1250 if (mid != boot_cpu_id) {
1251 cpu_clear(mid, phys_cpu_present_map);
1252 if (num_possible_cpus() <= max_cpus)
1253 break;
1254 }
1255 instance++;
1256 }
1257 }
1258
1259 smp_store_cpu_info(boot_cpu_id);
1260}
1261
David S. Miller7abea922006-02-25 13:39:56 -08001262/* Set this up early so that things like the scheduler can init
1263 * properly. We use the same cpu mask for both the present and
1264 * possible cpu map.
1265 */
1266void __init smp_setup_cpu_possible_map(void)
1267{
1268 int instance, mid;
1269
1270 instance = 0;
1271 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1272 if (mid < NR_CPUS)
1273 cpu_set(mid, phys_cpu_present_map);
1274 instance++;
1275 }
1276}
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278void __devinit smp_prepare_boot_cpu(void)
1279{
David S. Miller56fb4df2006-02-26 23:24:22 -08001280 int cpu = hard_smp_processor_id();
1281
1282 if (cpu >= NR_CPUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1284 prom_halt();
1285 }
1286
David S. Miller56fb4df2006-02-26 23:24:22 -08001287 current_thread_info()->cpu = cpu;
1288 __local_per_cpu_offset = __per_cpu_offset(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290 cpu_set(smp_processor_id(), cpu_online_map);
1291 cpu_set(smp_processor_id(), phys_cpu_present_map);
1292}
1293
1294int __devinit __cpu_up(unsigned int cpu)
1295{
1296 int ret = smp_boot_one_cpu(cpu);
1297
1298 if (!ret) {
1299 cpu_set(cpu, smp_commenced_mask);
1300 while (!cpu_isset(cpu, cpu_online_map))
1301 mb();
1302 if (!cpu_isset(cpu, cpu_online_map)) {
1303 ret = -ENODEV;
1304 } else {
David S. Miller02fead72006-02-11 23:22:47 -08001305 /* On SUN4V, writes to %tick and %stick are
1306 * not allowed.
1307 */
1308 if (tlb_type != hypervisor)
1309 smp_synchronize_one_tick(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 }
1311 }
1312 return ret;
1313}
1314
1315void __init smp_cpus_done(unsigned int max_cpus)
1316{
1317 unsigned long bogosum = 0;
1318 int i;
1319
1320 for (i = 0; i < NR_CPUS; i++) {
1321 if (cpu_online(i))
1322 bogosum += cpu_data(i).udelay_val;
1323 }
1324 printk("Total of %ld processors activated "
1325 "(%lu.%02lu BogoMIPS).\n",
1326 (long) num_online_cpus(),
1327 bogosum/(500000/HZ),
1328 (bogosum/(5000/HZ))%100);
1329}
1330
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331void smp_send_reschedule(int cpu)
1332{
Nick Piggin64c7c8f2005-11-08 21:39:04 -08001333 smp_receive_signal(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334}
1335
1336/* This is a nop because we capture all other cpus
1337 * anyways when making the PROM active.
1338 */
1339void smp_send_stop(void)
1340{
1341}
1342
David S. Millerd369ddd2005-07-10 15:45:11 -07001343unsigned long __per_cpu_base __read_mostly;
1344unsigned long __per_cpu_shift __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
1346EXPORT_SYMBOL(__per_cpu_base);
1347EXPORT_SYMBOL(__per_cpu_shift);
1348
1349void __init setup_per_cpu_areas(void)
1350{
1351 unsigned long goal, size, i;
1352 char *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 /* Copy section for each CPU (we discard the original) */
David S. Miller56fb4df2006-02-26 23:24:22 -08001355 goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356#ifdef CONFIG_MODULES
1357 if (goal < PERCPU_ENOUGH_ROOM)
1358 goal = PERCPU_ENOUGH_ROOM;
1359#endif
1360 __per_cpu_shift = 0;
1361 for (size = 1UL; size < goal; size <<= 1UL)
1362 __per_cpu_shift++;
1363
David S. Miller56fb4df2006-02-26 23:24:22 -08001364 ptr = alloc_bootmem(size * NR_CPUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
1366 __per_cpu_base = ptr - __per_cpu_start;
1367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 for (i = 0; i < NR_CPUS; i++, ptr += size)
1369 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370}