blob: e657c45d91d27c5a62f7d542ab9d36f3adcb9109 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/alpha/kernel/smp.c
3 *
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
8 *
9 * This is helpful for DCPI.
10 *
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040019#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/threads.h>
21#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/interrupt.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/spinlock.h>
26#include <linux/irq.h>
27#include <linux/cache.h>
28#include <linux/profile.h>
29#include <linux/bitops.h>
Alexey Dobriyan574f34c2008-10-15 22:01:19 -070030#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <asm/hwrpb.h>
33#include <asm/ptrace.h>
34#include <asm/atomic.h>
35
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/pgtable.h>
39#include <asm/pgalloc.h>
40#include <asm/mmu_context.h>
41#include <asm/tlbflush.h>
42
43#include "proto.h"
44#include "irq_impl.h"
45
46
47#define DEBUG_SMP 0
48#if DEBUG_SMP
49#define DBGS(args) printk args
50#else
51#define DBGS(args)
52#endif
53
54/* A collection of per-processor data. */
55struct cpuinfo_alpha cpu_data[NR_CPUS];
Al Virocff52da2006-10-11 17:40:22 +010056EXPORT_SYMBOL(cpu_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58/* A collection of single bit ipi messages. */
59static struct {
60 unsigned long bits ____cacheline_aligned;
61} ipi_data[NR_CPUS] __cacheline_aligned;
62
63enum ipi_message_type {
64 IPI_RESCHEDULE,
65 IPI_CALL_FUNC,
Jens Axboec524a1d2008-06-10 20:47:29 +020066 IPI_CALL_FUNC_SINGLE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 IPI_CPU_STOP,
68};
69
70/* Set to a secondary's cpuid when it comes online. */
Al Virocc040a82007-07-17 08:49:35 +010071static int smp_secondary_alive __devinitdata = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/* Which cpus ids came online. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074cpumask_t cpu_online_map;
75
76EXPORT_SYMBOL(cpu_online_map);
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078int smp_num_probed; /* Internal processor count */
79int smp_num_cpus = 1; /* Number that came online. */
Al Virocff52da2006-10-11 17:40:22 +010080EXPORT_SYMBOL(smp_num_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082/*
83 * Called by both boot and secondaries to move global data into
84 * per-processor storage.
85 */
86static inline void __init
87smp_store_cpu_info(int cpuid)
88{
89 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
90 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
91 cpu_data[cpuid].need_new_asn = 0;
92 cpu_data[cpuid].asn_lock = 0;
93}
94
95/*
96 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
97 */
98static inline void __init
99smp_setup_percpu_timer(int cpuid)
100{
101 cpu_data[cpuid].prof_counter = 1;
102 cpu_data[cpuid].prof_multiplier = 1;
103}
104
105static void __init
106wait_boot_cpu_to_stop(int cpuid)
107{
108 unsigned long stop = jiffies + 10*HZ;
109
110 while (time_before(jiffies, stop)) {
111 if (!smp_secondary_alive)
112 return;
113 barrier();
114 }
115
116 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
117 for (;;)
118 barrier();
119}
120
121/*
122 * Where secondaries begin a life of C.
123 */
124void __init
125smp_callin(void)
126{
127 int cpuid = hard_smp_processor_id();
128
129 if (cpu_test_and_set(cpuid, cpu_online_map)) {
130 printk("??, cpu 0x%x already present??\n", cpuid);
131 BUG();
132 }
133
134 /* Turn on machine checks. */
135 wrmces(7);
136
137 /* Set trap vectors. */
138 trap_init();
139
140 /* Set interrupt vector. */
141 wrent(entInt, 0);
142
143 /* Get our local ticker going. */
144 smp_setup_percpu_timer(cpuid);
145
146 /* Call platform-specific callin, if specified */
147 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
148
149 /* All kernel threads share the same mm context. */
150 atomic_inc(&init_mm.mm_count);
151 current->active_mm = &init_mm;
152
Manfred Spraule545a612008-09-07 16:57:22 +0200153 /* inform the notifiers about the new cpu */
154 notify_cpu_starting(cpuid);
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 /* Must have completely accurate bogos. */
157 local_irq_enable();
158
159 /* Wait boot CPU to stop with irq enabled before running
160 calibrate_delay. */
161 wait_boot_cpu_to_stop(cpuid);
162 mb();
163 calibrate_delay();
164
165 smp_store_cpu_info(cpuid);
166 /* Allow master to continue only after we written loops_per_jiffy. */
167 wmb();
168 smp_secondary_alive = 1;
169
170 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
171 cpuid, current, current->active_mm));
172
173 /* Do nothing. */
174 cpu_idle();
175}
176
177/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
Al Virocc040a82007-07-17 08:49:35 +0100178static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179wait_for_txrdy (unsigned long cpumask)
180{
181 unsigned long timeout;
182
183 if (!(hwrpb->txrdy & cpumask))
184 return 0;
185
186 timeout = jiffies + 10*HZ;
187 while (time_before(jiffies, timeout)) {
188 if (!(hwrpb->txrdy & cpumask))
189 return 0;
190 udelay(10);
191 barrier();
192 }
193
194 return -1;
195}
196
197/*
198 * Send a message to a secondary's console. "START" is one such
199 * interesting message. ;-)
200 */
201static void __init
202send_secondary_console_msg(char *str, int cpuid)
203{
204 struct percpu_struct *cpu;
205 register char *cp1, *cp2;
206 unsigned long cpumask;
207 size_t len;
208
209 cpu = (struct percpu_struct *)
210 ((char*)hwrpb
211 + hwrpb->processor_offset
212 + cpuid * hwrpb->processor_size);
213
214 cpumask = (1UL << cpuid);
215 if (wait_for_txrdy(cpumask))
216 goto timeout;
217
218 cp2 = str;
219 len = strlen(cp2);
220 *(unsigned int *)&cpu->ipc_buffer[0] = len;
221 cp1 = (char *) &cpu->ipc_buffer[1];
222 memcpy(cp1, cp2, len);
223
224 /* atomic test and set */
225 wmb();
226 set_bit(cpuid, &hwrpb->rxrdy);
227
228 if (wait_for_txrdy(cpumask))
229 goto timeout;
230 return;
231
232 timeout:
233 printk("Processor %x not ready\n", cpuid);
234}
235
236/*
237 * A secondary console wants to send a message. Receive it.
238 */
239static void
240recv_secondary_console_msg(void)
241{
242 int mycpu, i, cnt;
243 unsigned long txrdy = hwrpb->txrdy;
244 char *cp1, *cp2, buf[80];
245 struct percpu_struct *cpu;
246
247 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
248
249 mycpu = hard_smp_processor_id();
250
251 for (i = 0; i < NR_CPUS; i++) {
252 if (!(txrdy & (1UL << i)))
253 continue;
254
255 DBGS(("recv_secondary_console_msg: "
256 "TXRDY contains CPU %d.\n", i));
257
258 cpu = (struct percpu_struct *)
259 ((char*)hwrpb
260 + hwrpb->processor_offset
261 + i * hwrpb->processor_size);
262
263 DBGS(("recv_secondary_console_msg: on %d from %d"
264 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
265 mycpu, i, cpu->halt_reason, cpu->flags));
266
267 cnt = cpu->ipc_buffer[0] >> 32;
268 if (cnt <= 0 || cnt >= 80)
269 strcpy(buf, "<<< BOGUS MSG >>>");
270 else {
271 cp1 = (char *) &cpu->ipc_buffer[11];
272 cp2 = buf;
273 strcpy(cp2, cp1);
274
275 while ((cp2 = strchr(cp2, '\r')) != 0) {
276 *cp2 = ' ';
277 if (cp2[1] == '\n')
278 cp2[1] = ' ';
279 }
280 }
281
282 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
283 "message is '%s'\n", mycpu, buf));
284 }
285
286 hwrpb->txrdy = 0;
287}
288
289/*
290 * Convince the console to have a secondary cpu begin execution.
291 */
292static int __init
293secondary_cpu_start(int cpuid, struct task_struct *idle)
294{
295 struct percpu_struct *cpu;
296 struct pcb_struct *hwpcb, *ipcb;
297 unsigned long timeout;
298
299 cpu = (struct percpu_struct *)
300 ((char*)hwrpb
301 + hwrpb->processor_offset
302 + cpuid * hwrpb->processor_size);
303 hwpcb = (struct pcb_struct *) cpu->hwpcb;
Al Viro37bfbaf2006-01-12 01:05:36 -0800304 ipcb = &task_thread_info(idle)->pcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 /* Initialize the CPU's HWPCB to something just good enough for
307 us to get started. Immediately after starting, we'll swpctx
308 to the target idle task's pcb. Reuse the stack in the mean
309 time. Precalculate the target PCBB. */
310 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
311 hwpcb->usp = 0;
312 hwpcb->ptbr = ipcb->ptbr;
313 hwpcb->pcc = 0;
314 hwpcb->asn = 0;
315 hwpcb->unique = virt_to_phys(ipcb);
316 hwpcb->flags = ipcb->flags;
317 hwpcb->res1 = hwpcb->res2 = 0;
318
319#if 0
320 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
321 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
322#endif
323 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
324 cpuid, idle->state, ipcb->flags));
325
326 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
327 hwrpb->CPU_restart = __smp_callin;
328 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
329
330 /* Recalculate and update the HWRPB checksum */
331 hwrpb_update_checksum(hwrpb);
332
333 /*
334 * Send a "start" command to the specified processor.
335 */
336
337 /* SRM III 3.4.1.3 */
338 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
339 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
340 wmb();
341
342 send_secondary_console_msg("START\r\n", cpuid);
343
344 /* Wait 10 seconds for an ACK from the console. */
345 timeout = jiffies + 10*HZ;
346 while (time_before(jiffies, timeout)) {
347 if (cpu->flags & 1)
348 goto started;
349 udelay(10);
350 barrier();
351 }
352 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
353 return -1;
354
355 started:
356 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
357 return 0;
358}
359
360/*
361 * Bring one cpu online.
362 */
Al Viroed5f6562007-07-26 17:34:19 +0100363static int __cpuinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364smp_boot_one_cpu(int cpuid)
365{
366 struct task_struct *idle;
367 unsigned long timeout;
368
369 /* Cook up an idler for this guy. Note that the address we
370 give to kernel_thread is irrelevant -- it's going to start
371 where HWRPB.CPU_restart says to start. But this gets all
372 the other task-y sort of data structures set up like we
373 wish. We can't use kernel_thread since we must avoid
374 rescheduling the child. */
375 idle = fork_idle(cpuid);
376 if (IS_ERR(idle))
377 panic("failed fork for CPU %d", cpuid);
378
379 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
380 cpuid, idle->state, idle->flags));
381
382 /* Signal the secondary to wait a moment. */
383 smp_secondary_alive = -1;
384
385 /* Whirrr, whirrr, whirrrrrrrrr... */
386 if (secondary_cpu_start(cpuid, idle))
387 return -1;
388
389 /* Notify the secondary CPU it can run calibrate_delay. */
390 mb();
391 smp_secondary_alive = 0;
392
393 /* We've been acked by the console; wait one second for
394 the task to start up for real. */
395 timeout = jiffies + 1*HZ;
396 while (time_before(jiffies, timeout)) {
397 if (smp_secondary_alive == 1)
398 goto alive;
399 udelay(10);
400 barrier();
401 }
402
403 /* We failed to boot the CPU. */
404
405 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
406 return -1;
407
408 alive:
409 /* Another "Red Snapper". */
410 return 0;
411}
412
413/*
414 * Called from setup_arch. Detect an SMP system and which processors
415 * are present.
416 */
417void __init
418setup_smp(void)
419{
420 struct percpu_struct *cpubase, *cpu;
421 unsigned long i;
422
423 if (boot_cpuid != 0) {
424 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
425 boot_cpuid);
426 }
427
428 if (hwrpb->nr_processors > 1) {
429 int boot_cpu_palrev;
430
431 DBGS(("setup_smp: nr_processors %ld\n",
432 hwrpb->nr_processors));
433
434 cpubase = (struct percpu_struct *)
435 ((char*)hwrpb + hwrpb->processor_offset);
436 boot_cpu_palrev = cpubase->pal_revision;
437
438 for (i = 0; i < hwrpb->nr_processors; i++) {
439 cpu = (struct percpu_struct *)
440 ((char *)cpubase + i*hwrpb->processor_size);
441 if ((cpu->flags & 0x1cc) == 0x1cc) {
442 smp_num_probed++;
Ivan Kokshayskyc7d2d282006-06-04 02:51:34 -0700443 cpu_set(i, cpu_present_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 cpu->pal_revision = boot_cpu_palrev;
445 }
446
447 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
448 i, cpu->flags, cpu->type));
449 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
450 i, cpu->pal_revision));
451 }
452 } else {
453 smp_num_probed = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Ivan Kokshayskyc7d2d282006-06-04 02:51:34 -0700456 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
457 smp_num_probed, cpu_present_map.bits[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459
460/*
461 * Called by smp_init prepare the secondaries
462 */
463void __init
464smp_prepare_cpus(unsigned int max_cpus)
465{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 /* Take care of some initial bookkeeping. */
467 memset(ipi_data, 0, sizeof(ipi_data));
468
469 current_thread_info()->cpu = boot_cpuid;
470
471 smp_store_cpu_info(boot_cpuid);
472 smp_setup_percpu_timer(boot_cpuid);
473
474 /* Nothing to do on a UP box, or when told not to. */
475 if (smp_num_probed == 1 || max_cpus == 0) {
Ivan Kokshayskyc7d2d282006-06-04 02:51:34 -0700476 cpu_present_map = cpumask_of_cpu(boot_cpuid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 printk(KERN_INFO "SMP mode deactivated.\n");
478 return;
479 }
480
481 printk(KERN_INFO "SMP starting up secondaries.\n");
482
Ivan Kokshaysky328c2a82006-02-08 11:55:06 +0300483 smp_num_cpus = smp_num_probed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
486void __devinit
487smp_prepare_boot_cpu(void)
488{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490
Al Viroed5f6562007-07-26 17:34:19 +0100491int __cpuinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492__cpu_up(unsigned int cpu)
493{
494 smp_boot_one_cpu(cpu);
495
496 return cpu_online(cpu) ? 0 : -ENOSYS;
497}
498
499void __init
500smp_cpus_done(unsigned int max_cpus)
501{
502 int cpu;
503 unsigned long bogosum = 0;
504
505 for(cpu = 0; cpu < NR_CPUS; cpu++)
506 if (cpu_online(cpu))
507 bogosum += cpu_data[cpu].loops_per_jiffy;
508
509 printk(KERN_INFO "SMP: Total of %d processors activated "
510 "(%lu.%02lu BogoMIPS).\n",
511 num_online_cpus(),
512 (bogosum + 2500) / (500000/HZ),
513 ((bogosum + 2500) / (5000/HZ)) % 100);
514}
515
516
517void
518smp_percpu_timer_interrupt(struct pt_regs *regs)
519{
Al Viro8774cb82006-10-07 14:17:31 +0100520 struct pt_regs *old_regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 int cpu = smp_processor_id();
522 unsigned long user = user_mode(regs);
523 struct cpuinfo_alpha *data = &cpu_data[cpu];
524
Al Viro8774cb82006-10-07 14:17:31 +0100525 old_regs = set_irq_regs(regs);
526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /* Record kernel PC. */
Al Viro8774cb82006-10-07 14:17:31 +0100528 profile_tick(CPU_PROFILING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
530 if (!--data->prof_counter) {
531 /* We need to make like a normal interrupt -- otherwise
532 timer interrupts ignore the global interrupt lock,
533 which would be a Bad Thing. */
534 irq_enter();
535
536 update_process_times(user);
537
538 data->prof_counter = data->prof_multiplier;
539
540 irq_exit();
541 }
Al Viro8774cb82006-10-07 14:17:31 +0100542 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
Al Viroed5f6562007-07-26 17:34:19 +0100545int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546setup_profiling_timer(unsigned int multiplier)
547{
548 return -EINVAL;
549}
550
551
552static void
553send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
554{
555 int i;
556
557 mb();
558 for_each_cpu_mask(i, to_whom)
559 set_bit(operation, &ipi_data[i].bits);
560
561 mb();
562 for_each_cpu_mask(i, to_whom)
563 wripir(i);
564}
565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566void
567handle_ipi(struct pt_regs *regs)
568{
569 int this_cpu = smp_processor_id();
570 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
571 unsigned long ops;
572
573#if 0
574 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
575 this_cpu, *pending_ipis, regs->pc));
576#endif
577
578 mb(); /* Order interrupt and bit testing. */
579 while ((ops = xchg(pending_ipis, 0)) != 0) {
580 mb(); /* Order bit clearing and data access. */
581 do {
582 unsigned long which;
583
584 which = ops & -ops;
585 ops &= ~which;
586 which = __ffs(which);
587
588 switch (which) {
589 case IPI_RESCHEDULE:
590 /* Reschedule callback. Everything to be done
591 is done by the interrupt return path. */
592 break;
593
594 case IPI_CALL_FUNC:
Jens Axboec524a1d2008-06-10 20:47:29 +0200595 generic_smp_call_function_interrupt();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 break;
Jens Axboec524a1d2008-06-10 20:47:29 +0200597
598 case IPI_CALL_FUNC_SINGLE:
599 generic_smp_call_function_single_interrupt();
600 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602 case IPI_CPU_STOP:
603 halt();
604
605 default:
606 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
607 this_cpu, which);
608 break;
609 }
610 } while (ops);
611
612 mb(); /* Order data access and bit testing. */
613 }
614
615 cpu_data[this_cpu].ipi_count++;
616
617 if (hwrpb->txrdy)
618 recv_secondary_console_msg();
619}
620
621void
622smp_send_reschedule(int cpu)
623{
624#ifdef DEBUG_IPI_MSG
625 if (cpu == hard_smp_processor_id())
626 printk(KERN_WARNING
627 "smp_send_reschedule: Sending IPI to self.\n");
628#endif
629 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
630}
631
632void
633smp_send_stop(void)
634{
635 cpumask_t to_whom = cpu_possible_map;
636 cpu_clear(smp_processor_id(), to_whom);
637#ifdef DEBUG_IPI_MSG
638 if (hard_smp_processor_id() != boot_cpu_id)
639 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
640#endif
641 send_ipi_message(to_whom, IPI_CPU_STOP);
642}
643
Jens Axboec524a1d2008-06-10 20:47:29 +0200644void arch_send_call_function_ipi(cpumask_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
Jens Axboec524a1d2008-06-10 20:47:29 +0200646 send_ipi_message(mask, IPI_CALL_FUNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
648
Jens Axboec524a1d2008-06-10 20:47:29 +0200649void arch_send_call_function_single_ipi(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
Jens Axboec524a1d2008-06-10 20:47:29 +0200651 send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
654static void
655ipi_imb(void *ignored)
656{
657 imb();
658}
659
660void
661smp_imb(void)
662{
663 /* Must wait other processors to flush their icache before continue. */
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200664 if (on_each_cpu(ipi_imb, NULL, 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 printk(KERN_CRIT "smp_imb: timed out\n");
666}
Al Virocff52da2006-10-11 17:40:22 +0100667EXPORT_SYMBOL(smp_imb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669static void
670ipi_flush_tlb_all(void *ignored)
671{
672 tbia();
673}
674
675void
676flush_tlb_all(void)
677{
678 /* Although we don't have any data to pass, we do want to
679 synchronize with the other processors. */
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200680 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 printk(KERN_CRIT "flush_tlb_all: timed out\n");
682 }
683}
684
685#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
686
687static void
688ipi_flush_tlb_mm(void *x)
689{
690 struct mm_struct *mm = (struct mm_struct *) x;
691 if (mm == current->active_mm && !asn_locked())
692 flush_tlb_current(mm);
693 else
694 flush_tlb_other(mm);
695}
696
697void
698flush_tlb_mm(struct mm_struct *mm)
699{
700 preempt_disable();
701
702 if (mm == current->active_mm) {
703 flush_tlb_current(mm);
704 if (atomic_read(&mm->mm_users) <= 1) {
705 int cpu, this_cpu = smp_processor_id();
706 for (cpu = 0; cpu < NR_CPUS; cpu++) {
707 if (!cpu_online(cpu) || cpu == this_cpu)
708 continue;
709 if (mm->context[cpu])
710 mm->context[cpu] = 0;
711 }
712 preempt_enable();
713 return;
714 }
715 }
716
Jens Axboe8691e5a2008-06-06 11:18:06 +0200717 if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
719 }
720
721 preempt_enable();
722}
Al Virocff52da2006-10-11 17:40:22 +0100723EXPORT_SYMBOL(flush_tlb_mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725struct flush_tlb_page_struct {
726 struct vm_area_struct *vma;
727 struct mm_struct *mm;
728 unsigned long addr;
729};
730
731static void
732ipi_flush_tlb_page(void *x)
733{
734 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
735 struct mm_struct * mm = data->mm;
736
737 if (mm == current->active_mm && !asn_locked())
738 flush_tlb_current_page(mm, data->vma, data->addr);
739 else
740 flush_tlb_other(mm);
741}
742
743void
744flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
745{
746 struct flush_tlb_page_struct data;
747 struct mm_struct *mm = vma->vm_mm;
748
749 preempt_disable();
750
751 if (mm == current->active_mm) {
752 flush_tlb_current_page(mm, vma, addr);
753 if (atomic_read(&mm->mm_users) <= 1) {
754 int cpu, this_cpu = smp_processor_id();
755 for (cpu = 0; cpu < NR_CPUS; cpu++) {
756 if (!cpu_online(cpu) || cpu == this_cpu)
757 continue;
758 if (mm->context[cpu])
759 mm->context[cpu] = 0;
760 }
761 preempt_enable();
762 return;
763 }
764 }
765
766 data.vma = vma;
767 data.mm = mm;
768 data.addr = addr;
769
Jens Axboe8691e5a2008-06-06 11:18:06 +0200770 if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 printk(KERN_CRIT "flush_tlb_page: timed out\n");
772 }
773
774 preempt_enable();
775}
Al Virocff52da2006-10-11 17:40:22 +0100776EXPORT_SYMBOL(flush_tlb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778void
779flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
780{
781 /* On the Alpha we always flush the whole user tlb. */
782 flush_tlb_mm(vma->vm_mm);
783}
Al Virocff52da2006-10-11 17:40:22 +0100784EXPORT_SYMBOL(flush_tlb_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786static void
787ipi_flush_icache_page(void *x)
788{
789 struct mm_struct *mm = (struct mm_struct *) x;
790 if (mm == current->active_mm && !asn_locked())
791 __load_new_mm_context(mm);
792 else
793 flush_tlb_other(mm);
794}
795
796void
797flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
798 unsigned long addr, int len)
799{
800 struct mm_struct *mm = vma->vm_mm;
801
802 if ((vma->vm_flags & VM_EXEC) == 0)
803 return;
804
805 preempt_disable();
806
807 if (mm == current->active_mm) {
808 __load_new_mm_context(mm);
809 if (atomic_read(&mm->mm_users) <= 1) {
810 int cpu, this_cpu = smp_processor_id();
811 for (cpu = 0; cpu < NR_CPUS; cpu++) {
812 if (!cpu_online(cpu) || cpu == this_cpu)
813 continue;
814 if (mm->context[cpu])
815 mm->context[cpu] = 0;
816 }
817 preempt_enable();
818 return;
819 }
820 }
821
Jens Axboe8691e5a2008-06-06 11:18:06 +0200822 if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 printk(KERN_CRIT "flush_icache_page: timed out\n");
824 }
825
826 preempt_enable();
827}