blob: 1760bf3d14636266eae1f81aa84f277076c4335d [file] [log] [blame]
Andrew Morton53ce3d92009-01-09 12:27:08 -08001/*
2 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
3 */
4
Ingo Molnar6e962812009-01-12 16:04:37 +01005#include <linux/interrupt.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08006#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04007#include <linux/export.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08008#include <linux/smp.h>
9
10int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
11 int wait)
12{
David Daney081192b2013-09-11 14:23:25 -070013 unsigned long flags;
14
Ingo Molnar93423b82009-01-11 05:15:21 +010015 WARN_ON(cpu != 0);
16
David Daney081192b2013-09-11 14:23:25 -070017 local_irq_save(flags);
18 func(info);
19 local_irq_restore(flags);
Ingo Molnar93423b82009-01-11 05:15:21 +010020
Andrew Morton53ce3d92009-01-09 12:27:08 -080021 return 0;
22}
23EXPORT_SYMBOL(smp_call_function_single);
David Daneyfa688202013-09-11 14:23:24 -070024
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +010025int smp_call_function_single_async(int cpu, struct call_single_data *csd)
Christoph Hellwig40c01e82013-11-14 14:32:08 -080026{
27 unsigned long flags;
28
29 local_irq_save(flags);
30 csd->func(csd->info);
31 local_irq_restore(flags);
Jan Kara08eed442014-02-24 16:39:57 +010032 return 0;
Christoph Hellwig40c01e82013-11-14 14:32:08 -080033}
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +010034EXPORT_SYMBOL(smp_call_function_single_async);
Christoph Hellwig40c01e82013-11-14 14:32:08 -080035
David Daneybff2dc42013-09-11 14:23:26 -070036int on_each_cpu(smp_call_func_t func, void *info, int wait)
37{
38 unsigned long flags;
39
40 local_irq_save(flags);
41 func(info);
42 local_irq_restore(flags);
43 return 0;
44}
45EXPORT_SYMBOL(on_each_cpu);
46
David Daneyfa688202013-09-11 14:23:24 -070047/*
48 * Note we still need to test the mask even for UP
49 * because we actually can get an empty mask from
50 * code that on SMP might call us without the local
51 * CPU in the mask.
52 */
53void on_each_cpu_mask(const struct cpumask *mask,
54 smp_call_func_t func, void *info, bool wait)
55{
56 unsigned long flags;
57
58 if (cpumask_test_cpu(0, mask)) {
59 local_irq_save(flags);
60 func(info);
61 local_irq_restore(flags);
62 }
63}
64EXPORT_SYMBOL(on_each_cpu_mask);
65
66/*
67 * Preemption is disabled here to make sure the cond_func is called under the
68 * same condtions in UP and SMP.
69 */
70void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
71 smp_call_func_t func, void *info, bool wait,
72 gfp_t gfp_flags)
73{
74 unsigned long flags;
75
76 preempt_disable();
77 if (cond_func(0, info)) {
78 local_irq_save(flags);
79 func(info);
80 local_irq_restore(flags);
81 }
82 preempt_enable();
83}
84EXPORT_SYMBOL(on_each_cpu_cond);