blob: 74d9c3a1feeec494b693501c6ec976c9dd487186 [file] [log] [blame]
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070017 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
Ingo Molnar4ce5b902009-10-26 07:55:55 +010023 * Documentation/RCU
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070024 */
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070025#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
28#include <linux/cache.h>
29
Paul E. McKenney02a5c5502016-11-02 17:25:06 -070030struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -080036static inline bool rcu_eqs_special_set(int cpu)
37{
38 return false; /* Never flag non-existent other CPUs! */
39}
40
Paul E. McKenney765a3f42014-03-14 16:37:08 -070041static inline unsigned long get_state_synchronize_rcu(void)
42{
43 return 0;
44}
45
46static inline void cond_synchronize_rcu(unsigned long oldstate)
47{
48 might_sleep();
49}
50
Paul E. McKenney24560052015-05-30 10:11:24 -070051static inline unsigned long get_state_synchronize_sched(void)
52{
53 return 0;
54}
55
56static inline void cond_synchronize_sched(unsigned long oldstate)
57{
58 might_sleep();
59}
60
Ingo Molnarf9411eb2017-02-06 09:50:49 +010061extern void rcu_barrier_bh(void);
62extern void rcu_barrier_sched(void);
Paul E. McKenney2c428182011-05-26 22:14:36 -070063
Paul E. McKenneya57eb942010-06-29 16:49:16 -070064static inline void synchronize_rcu_expedited(void)
Paul E. McKenneyda848c42010-03-30 15:46:01 -070065{
Paul E. McKenneya57eb942010-06-29 16:49:16 -070066 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
Paul E. McKenneyda848c42010-03-30 15:46:01 -070067}
Paul E. McKenney6ebb2372009-11-22 08:53:50 -080068
Paul E. McKenneya57eb942010-06-29 16:49:16 -070069static inline void rcu_barrier(void)
70{
71 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
72}
73
Paul E. McKenneya57eb942010-06-29 16:49:16 -070074static inline void synchronize_rcu_bh(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070075{
76 synchronize_sched();
77}
78
79static inline void synchronize_rcu_bh_expedited(void)
80{
81 synchronize_sched();
82}
83
Lai Jiangshan7b27d542010-10-21 11:29:05 +080084static inline void synchronize_sched_expedited(void)
85{
86 synchronize_sched();
87}
88
Paul E. McKenney486e2592012-01-06 14:11:30 -080089static inline void kfree_call_rcu(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +080090 rcu_callback_t func)
Paul E. McKenney486e2592012-01-06 14:11:30 -080091{
92 call_rcu(head, func);
93}
94
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -070095#define rcu_note_context_switch(preempt) \
96 do { \
97 rcu_sched_qs(); \
98 rcu_note_voluntary_context_switch_lite(current); \
99 } while (0)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700100
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700101/*
Gleb Natapov29ce8312011-05-04 16:31:03 +0300102 * Take advantage of the fact that there is only one CPU, which
103 * allows us to ignore virtualization-based context switches.
104 */
105static inline void rcu_virt_note_context_switch(int cpu)
106{
107}
108
109/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800110 * Return the number of grace periods started.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700111 */
Paul E. McKenney917963d2014-11-21 17:10:16 -0800112static inline unsigned long rcu_batches_started(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700113{
114 return 0;
115}
116
117/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800118 * Return the number of bottom-half grace periods started.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700119 */
Paul E. McKenney917963d2014-11-21 17:10:16 -0800120static inline unsigned long rcu_batches_started_bh(void)
121{
122 return 0;
123}
124
125/*
126 * Return the number of sched grace periods started.
127 */
128static inline unsigned long rcu_batches_started_sched(void)
129{
130 return 0;
131}
132
133/*
134 * Return the number of grace periods completed.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700135 */
Paul E. McKenney9733e4f2014-11-21 12:49:13 -0800136static inline unsigned long rcu_batches_completed(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700137{
138 return 0;
139}
140
141/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800142 * Return the number of bottom-half grace periods completed.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700143 */
Paul E. McKenney9733e4f2014-11-21 12:49:13 -0800144static inline unsigned long rcu_batches_completed_bh(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700145{
146 return 0;
147}
148
Paul E. McKenneyc1fe9cd2014-11-21 15:45:27 -0800149/*
Paul E. McKenney917963d2014-11-21 17:10:16 -0800150 * Return the number of sched grace periods completed.
Paul E. McKenneyc1fe9cd2014-11-21 15:45:27 -0800151 */
152static inline unsigned long rcu_batches_completed_sched(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700153{
154 return 0;
155}
156
Paul E. McKenney291783b2016-01-12 13:43:30 -0800157/*
158 * Return the number of expedited grace periods completed.
159 */
160static inline unsigned long rcu_exp_batches_completed(void)
161{
162 return 0;
163}
164
165/*
166 * Return the number of expedited sched grace periods completed.
167 */
168static inline unsigned long rcu_exp_batches_completed_sched(void)
169{
170 return 0;
171}
172
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700173static inline void rcu_force_quiescent_state(void)
174{
175}
176
177static inline void rcu_bh_force_quiescent_state(void)
178{
179}
180
181static inline void rcu_sched_force_quiescent_state(void)
182{
183}
184
Paul E. McKenneyafea2272014-03-12 07:10:41 -0700185static inline void show_rcu_gp_kthreads(void)
186{
187}
188
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700189static inline void rcu_cpu_stall_reset(void)
190{
191}
192
Paul E. McKenney51952bc632015-04-21 11:15:30 -0700193static inline void rcu_idle_enter(void)
194{
195}
196
197static inline void rcu_idle_exit(void)
198{
199}
200
201static inline void rcu_irq_enter(void)
202{
203}
204
Paul E. McKenney7c9906c2015-10-31 00:59:01 -0700205static inline void rcu_irq_exit_irqson(void)
206{
207}
208
209static inline void rcu_irq_enter_irqson(void)
210{
211}
212
Paul E. McKenney51952bc632015-04-21 11:15:30 -0700213static inline void rcu_irq_exit(void)
214{
215}
216
Paul E. McKenney2439b692013-04-11 10:15:52 -0700217static inline void exit_rcu(void)
218{
219}
220
Paul E. McKenney900b1022017-02-10 14:32:54 -0800221#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700222extern int rcu_scheduler_active __read_mostly;
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200223void rcu_scheduler_starting(void);
Paul E. McKenney900b1022017-02-10 14:32:54 -0800224#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700225static inline void rcu_scheduler_starting(void)
226{
227}
Paul E. McKenney900b1022017-02-10 14:32:54 -0800228#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700229
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700230#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700231
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700232static inline bool rcu_is_watching(void)
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700233{
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700234 return __rcu_is_watching();
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700235}
236
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700237#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
238
239static inline bool rcu_is_watching(void)
240{
241 return true;
242}
243
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700244#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700245
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -0700246static inline void rcu_request_urgent_qs_task(struct task_struct *t)
247{
248}
249
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800250static inline void rcu_all_qs(void)
251{
Boqun Fengbb73c522015-07-30 16:55:38 -0700252 barrier(); /* Avoid RCU read-side critical sections leaking across. */
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800253}
254
Thomas Gleixner4df83742016-07-13 17:17:03 +0000255/* RCUtree hotplug events */
256#define rcutree_prepare_cpu NULL
257#define rcutree_online_cpu NULL
258#define rcutree_offline_cpu NULL
259#define rcutree_dead_cpu NULL
260#define rcutree_dying_cpu NULL
261
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700262#endif /* __LINUX_RCUTINY_H */