blob: b452953e21c8ae0b311a29d2af46a2a4d3036ce7 [file] [log] [blame]
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070017 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
Ingo Molnar4ce5b902009-10-26 07:55:55 +010023 * Documentation/RCU
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070024 */
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070025#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
28#include <linux/cache.h>
29
Paul E. McKenney02a5c5502016-11-02 17:25:06 -070030struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
Paul E. McKenney765a3f42014-03-14 16:37:08 -070036static inline unsigned long get_state_synchronize_rcu(void)
37{
38 return 0;
39}
40
41static inline void cond_synchronize_rcu(unsigned long oldstate)
42{
43 might_sleep();
44}
45
Paul E. McKenney24560052015-05-30 10:11:24 -070046static inline unsigned long get_state_synchronize_sched(void)
47{
48 return 0;
49}
50
51static inline void cond_synchronize_sched(unsigned long oldstate)
52{
53 might_sleep();
54}
55
Ingo Molnarf9411eb2017-02-06 09:50:49 +010056extern void rcu_barrier_bh(void);
57extern void rcu_barrier_sched(void);
Paul E. McKenney2c428182011-05-26 22:14:36 -070058
Paul E. McKenneya57eb942010-06-29 16:49:16 -070059static inline void synchronize_rcu_expedited(void)
Paul E. McKenneyda848c42010-03-30 15:46:01 -070060{
Paul E. McKenneya57eb942010-06-29 16:49:16 -070061 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
Paul E. McKenneyda848c42010-03-30 15:46:01 -070062}
Paul E. McKenney6ebb2372009-11-22 08:53:50 -080063
Paul E. McKenneya57eb942010-06-29 16:49:16 -070064static inline void rcu_barrier(void)
65{
66 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
67}
68
Paul E. McKenneya57eb942010-06-29 16:49:16 -070069static inline void synchronize_rcu_bh(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070070{
71 synchronize_sched();
72}
73
74static inline void synchronize_rcu_bh_expedited(void)
75{
76 synchronize_sched();
77}
78
Lai Jiangshan7b27d542010-10-21 11:29:05 +080079static inline void synchronize_sched_expedited(void)
80{
81 synchronize_sched();
82}
83
Paul E. McKenney486e2592012-01-06 14:11:30 -080084static inline void kfree_call_rcu(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +080085 rcu_callback_t func)
Paul E. McKenney486e2592012-01-06 14:11:30 -080086{
87 call_rcu(head, func);
88}
89
Paul E. McKenney38200cf2014-10-21 12:50:04 -070090static inline void rcu_note_context_switch(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -070091{
Paul E. McKenney284a8c92014-08-14 16:38:46 -070092 rcu_sched_qs();
Paul E. McKenneya57eb942010-06-29 16:49:16 -070093}
94
Paul E. McKenneya57eb942010-06-29 16:49:16 -070095/*
Gleb Natapov29ce8312011-05-04 16:31:03 +030096 * Take advantage of the fact that there is only one CPU, which
97 * allows us to ignore virtualization-based context switches.
98 */
99static inline void rcu_virt_note_context_switch(int cpu)
100{
101}
102
103/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800104 * Return the number of grace periods started.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700105 */
Paul E. McKenney917963d2014-11-21 17:10:16 -0800106static inline unsigned long rcu_batches_started(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700107{
108 return 0;
109}
110
111/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800112 * Return the number of bottom-half grace periods started.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700113 */
Paul E. McKenney917963d2014-11-21 17:10:16 -0800114static inline unsigned long rcu_batches_started_bh(void)
115{
116 return 0;
117}
118
119/*
120 * Return the number of sched grace periods started.
121 */
122static inline unsigned long rcu_batches_started_sched(void)
123{
124 return 0;
125}
126
127/*
128 * Return the number of grace periods completed.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700129 */
Paul E. McKenney9733e4f2014-11-21 12:49:13 -0800130static inline unsigned long rcu_batches_completed(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700131{
132 return 0;
133}
134
135/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800136 * Return the number of bottom-half grace periods completed.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700137 */
Paul E. McKenney9733e4f2014-11-21 12:49:13 -0800138static inline unsigned long rcu_batches_completed_bh(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700139{
140 return 0;
141}
142
Paul E. McKenneyc1fe9cd2014-11-21 15:45:27 -0800143/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800144 * Return the number of sched grace periods completed.
Paul E. McKenneyc1fe9cd2014-11-21 15:45:27 -0800145 */
146static inline unsigned long rcu_batches_completed_sched(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700147{
148 return 0;
149}
150
Paul E. McKenney291783b2016-01-12 13:43:30 -0800151/*
152 * Return the number of expedited grace periods completed.
153 */
154static inline unsigned long rcu_exp_batches_completed(void)
155{
156 return 0;
157}
158
159/*
160 * Return the number of expedited sched grace periods completed.
161 */
162static inline unsigned long rcu_exp_batches_completed_sched(void)
163{
164 return 0;
165}
166
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700167static inline void rcu_force_quiescent_state(void)
168{
169}
170
171static inline void rcu_bh_force_quiescent_state(void)
172{
173}
174
175static inline void rcu_sched_force_quiescent_state(void)
176{
177}
178
Paul E. McKenneyafea2272014-03-12 07:10:41 -0700179static inline void show_rcu_gp_kthreads(void)
180{
181}
182
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700183static inline void rcu_cpu_stall_reset(void)
184{
185}
186
Paul E. McKenney51952bc632015-04-21 11:15:30 -0700187static inline void rcu_idle_enter(void)
188{
189}
190
191static inline void rcu_idle_exit(void)
192{
193}
194
195static inline void rcu_irq_enter(void)
196{
197}
198
Paul E. McKenney7c9906c2015-10-31 00:59:01 -0700199static inline void rcu_irq_exit_irqson(void)
200{
201}
202
203static inline void rcu_irq_enter_irqson(void)
204{
205}
206
Paul E. McKenney51952bc632015-04-21 11:15:30 -0700207static inline void rcu_irq_exit(void)
208{
209}
210
Paul E. McKenney2439b692013-04-11 10:15:52 -0700211static inline void exit_rcu(void)
212{
213}
214
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700215#ifdef CONFIG_DEBUG_LOCK_ALLOC
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700216extern int rcu_scheduler_active __read_mostly;
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200217void rcu_scheduler_starting(void);
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700218#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700219static inline void rcu_scheduler_starting(void)
220{
221}
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700222#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
223
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700224#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700225
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700226static inline bool rcu_is_watching(void)
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700227{
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700228 return __rcu_is_watching();
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700229}
230
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700231#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
232
233static inline bool rcu_is_watching(void)
234{
235 return true;
236}
237
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700238#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700239
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800240static inline void rcu_all_qs(void)
241{
Boqun Fengbb73c522015-07-30 16:55:38 -0700242 barrier(); /* Avoid RCU read-side critical sections leaking across. */
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800243}
244
Thomas Gleixner4df83742016-07-13 17:17:03 +0000245/* RCUtree hotplug events */
246#define rcutree_prepare_cpu NULL
247#define rcutree_online_cpu NULL
248#define rcutree_offline_cpu NULL
249#define rcutree_dead_cpu NULL
250#define rcutree_dying_cpu NULL
251
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700252#endif /* __LINUX_RCUTINY_H */