Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2001 |
| 19 | * |
| 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
| 21 | * |
| 22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> |
| 23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 24 | * Papers: |
| 25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| 26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
| 27 | * |
| 28 | * For detailed explanation of Read-Copy Update mechanism see - |
| 29 | * http://lse.sourceforge.net/locking/rcupdate.html |
| 30 | * |
| 31 | */ |
| 32 | |
| 33 | #ifndef __LINUX_RCUPDATE_H |
| 34 | #define __LINUX_RCUPDATE_H |
| 35 | |
| 36 | #ifdef __KERNEL__ |
| 37 | |
| 38 | #include <linux/cache.h> |
| 39 | #include <linux/spinlock.h> |
| 40 | #include <linux/threads.h> |
| 41 | #include <linux/percpu.h> |
| 42 | #include <linux/cpumask.h> |
| 43 | #include <linux/seqlock.h> |
| 44 | |
| 45 | /** |
| 46 | * struct rcu_head - callback structure for use with RCU |
| 47 | * @next: next update requests in a list |
| 48 | * @func: actual update function to call after the grace period. |
| 49 | */ |
| 50 | struct rcu_head { |
| 51 | struct rcu_head *next; |
| 52 | void (*func)(struct rcu_head *head); |
| 53 | }; |
| 54 | |
Dipankar Sarma | 8b6490e | 2005-09-09 13:04:07 -0700 | [diff] [blame] | 55 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
| 56 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #define INIT_RCU_HEAD(ptr) do { \ |
| 58 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
| 59 | } while (0) |
| 60 | |
| 61 | |
| 62 | |
| 63 | /* Global control variables for rcupdate callback mechanism. */ |
| 64 | struct rcu_ctrlblk { |
| 65 | long cur; /* Current batch number. */ |
| 66 | long completed; /* Number of the last completed batch */ |
| 67 | int next_pending; /* Is the next batch already waiting? */ |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 68 | |
| 69 | spinlock_t lock ____cacheline_internodealigned_in_smp; |
| 70 | cpumask_t cpumask; /* CPUs that need to switch in order */ |
| 71 | /* for current batch to proceed. */ |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 72 | } ____cacheline_internodealigned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
| 74 | /* Is batch a before batch b ? */ |
| 75 | static inline int rcu_batch_before(long a, long b) |
| 76 | { |
| 77 | return (a - b) < 0; |
| 78 | } |
| 79 | |
| 80 | /* Is batch a after batch b ? */ |
| 81 | static inline int rcu_batch_after(long a, long b) |
| 82 | { |
| 83 | return (a - b) > 0; |
| 84 | } |
| 85 | |
| 86 | /* |
| 87 | * Per-CPU data for Read-Copy UPdate. |
| 88 | * nxtlist - new callbacks are added here |
| 89 | * curlist - current batch for which quiescent cycle started if any |
| 90 | */ |
| 91 | struct rcu_data { |
| 92 | /* 1) quiescent state handling : */ |
| 93 | long quiescbatch; /* Batch # for grace period */ |
| 94 | int passed_quiesc; /* User-mode/idle loop etc. */ |
| 95 | int qs_pending; /* core waits for quiesc state */ |
| 96 | |
| 97 | /* 2) batch handling */ |
| 98 | long batch; /* Batch # for current RCU batch */ |
| 99 | struct rcu_head *nxtlist; |
| 100 | struct rcu_head **nxttail; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 101 | long qlen; /* # of queued callbacks */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | struct rcu_head *curlist; |
| 103 | struct rcu_head **curtail; |
| 104 | struct rcu_head *donelist; |
| 105 | struct rcu_head **donetail; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 106 | long blimit; /* Upper limit on a processed batch */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | int cpu; |
Dipankar Sarma | ab4720e | 2005-12-12 00:37:05 -0800 | [diff] [blame] | 108 | struct rcu_head barrier; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 109 | #ifdef CONFIG_SMP |
| 110 | long last_rs_qlen; /* qlen during the last resched */ |
| 111 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | }; |
| 113 | |
| 114 | DECLARE_PER_CPU(struct rcu_data, rcu_data); |
| 115 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); |
| 116 | extern struct rcu_ctrlblk rcu_ctrlblk; |
| 117 | extern struct rcu_ctrlblk rcu_bh_ctrlblk; |
| 118 | |
| 119 | /* |
| 120 | * Increment the quiescent state counter. |
| 121 | * The counter is a bit degenerated: We do not need to know |
| 122 | * how many quiescent states passed, just if there was at least |
| 123 | * one since the start of the grace period. Thus just a flag. |
| 124 | */ |
| 125 | static inline void rcu_qsctr_inc(int cpu) |
| 126 | { |
| 127 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
| 128 | rdp->passed_quiesc = 1; |
| 129 | } |
| 130 | static inline void rcu_bh_qsctr_inc(int cpu) |
| 131 | { |
| 132 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
| 133 | rdp->passed_quiesc = 1; |
| 134 | } |
| 135 | |
Oleg Nesterov | 6775177 | 2006-01-08 22:19:16 +0300 | [diff] [blame] | 136 | extern int rcu_pending(int cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
| 138 | /** |
| 139 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
| 140 | * |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 141 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | * are within RCU read-side critical sections, then the |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 143 | * synchronize_rcu() is guaranteed to block until after all the other |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | * CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
| 145 | * on one CPU while other CPUs are within RCU read-side critical |
| 146 | * sections, invocation of the corresponding RCU callback is deferred |
| 147 | * until after the all the other CPUs exit their critical sections. |
| 148 | * |
| 149 | * Note, however, that RCU callbacks are permitted to run concurrently |
| 150 | * with RCU read-side critical sections. One way that this can happen |
| 151 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
| 152 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
| 153 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
| 154 | * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU |
| 155 | * callback is invoked. This is legal, because the RCU read-side critical |
| 156 | * section that was running concurrently with the call_rcu() (and which |
| 157 | * therefore might be referencing something that the corresponding RCU |
| 158 | * callback would free up) has completed before the corresponding |
| 159 | * RCU callback is invoked. |
| 160 | * |
| 161 | * RCU read-side critical sections may be nested. Any deferred actions |
| 162 | * will be deferred until the outermost RCU read-side critical section |
| 163 | * completes. |
| 164 | * |
| 165 | * It is illegal to block while in an RCU read-side critical section. |
| 166 | */ |
| 167 | #define rcu_read_lock() preempt_disable() |
| 168 | |
| 169 | /** |
| 170 | * rcu_read_unlock - marks the end of an RCU read-side critical section. |
| 171 | * |
| 172 | * See rcu_read_lock() for more information. |
| 173 | */ |
| 174 | #define rcu_read_unlock() preempt_enable() |
| 175 | |
| 176 | /* |
| 177 | * So where is rcu_write_lock()? It does not exist, as there is no |
| 178 | * way for writers to lock out RCU readers. This is a feature, not |
| 179 | * a bug -- this property is what provides RCU's performance benefits. |
| 180 | * Of course, writers must coordinate with each other. The normal |
| 181 | * spinlock primitives work well for this, but any other technique may be |
| 182 | * used as well. RCU does not care how the writers keep out of each |
| 183 | * others' way, as long as they do so. |
| 184 | */ |
| 185 | |
| 186 | /** |
| 187 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section |
| 188 | * |
| 189 | * This is equivalent of rcu_read_lock(), but to be used when updates |
| 190 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks |
| 191 | * consider completion of a softirq handler to be a quiescent state, |
| 192 | * a process in RCU read-side critical section must be protected by |
| 193 | * disabling softirqs. Read-side critical sections in interrupt context |
| 194 | * can use just rcu_read_lock(). |
| 195 | * |
| 196 | */ |
| 197 | #define rcu_read_lock_bh() local_bh_disable() |
| 198 | |
| 199 | /* |
| 200 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
| 201 | * |
| 202 | * See rcu_read_lock_bh() for more information. |
| 203 | */ |
| 204 | #define rcu_read_unlock_bh() local_bh_enable() |
| 205 | |
| 206 | /** |
| 207 | * rcu_dereference - fetch an RCU-protected pointer in an |
| 208 | * RCU read-side critical section. This pointer may later |
| 209 | * be safely dereferenced. |
| 210 | * |
| 211 | * Inserts memory barriers on architectures that require them |
| 212 | * (currently only the Alpha), and, more importantly, documents |
| 213 | * exactly which pointers are protected by RCU. |
| 214 | */ |
| 215 | |
| 216 | #define rcu_dereference(p) ({ \ |
| 217 | typeof(p) _________p1 = p; \ |
| 218 | smp_read_barrier_depends(); \ |
| 219 | (_________p1); \ |
| 220 | }) |
| 221 | |
| 222 | /** |
| 223 | * rcu_assign_pointer - assign (publicize) a pointer to a newly |
| 224 | * initialized structure that will be dereferenced by RCU read-side |
| 225 | * critical sections. Returns the value assigned. |
| 226 | * |
| 227 | * Inserts memory barriers on architectures that require them |
| 228 | * (pretty much all of them other than x86), and also prevents |
| 229 | * the compiler from reordering the code that initializes the |
| 230 | * structure after the pointer assignment. More importantly, this |
| 231 | * call documents which pointers will be dereferenced by RCU read-side |
| 232 | * code. |
| 233 | */ |
| 234 | |
| 235 | #define rcu_assign_pointer(p, v) ({ \ |
| 236 | smp_wmb(); \ |
| 237 | (p) = (v); \ |
| 238 | }) |
| 239 | |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 240 | /** |
| 241 | * synchronize_sched - block until all CPUs have exited any non-preemptive |
| 242 | * kernel code sequences. |
| 243 | * |
| 244 | * This means that all preempt_disable code sequences, including NMI and |
| 245 | * hardware-interrupt handlers, in progress on entry will have completed |
| 246 | * before this primitive returns. However, this does not guarantee that |
Paul E. McKenney | bb3b9cf1 | 2006-02-03 03:04:38 -0800 | [diff] [blame] | 247 | * softirq handlers will have completed, since in some kernels, these |
| 248 | * handlers can run in process context, and can block. |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 249 | * |
| 250 | * This primitive provides the guarantees made by the (deprecated) |
| 251 | * synchronize_kernel() API. In contrast, synchronize_rcu() only |
| 252 | * guarantees that rcu_read_lock() sections will have completed. |
Paul E. McKenney | bb3b9cf1 | 2006-02-03 03:04:38 -0800 | [diff] [blame] | 253 | * In "classic RCU", these two guarantees happen to be one and |
| 254 | * the same, but can differ in realtime RCU implementations. |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 255 | */ |
| 256 | #define synchronize_sched() synchronize_rcu() |
| 257 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | extern void rcu_init(void); |
| 259 | extern void rcu_check_callbacks(int cpu, int user); |
| 260 | extern void rcu_restart_cpu(int cpu); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 261 | extern long rcu_batches_completed(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | |
| 263 | /* Exported interfaces */ |
| 264 | extern void FASTCALL(call_rcu(struct rcu_head *head, |
| 265 | void (*func)(struct rcu_head *head))); |
| 266 | extern void FASTCALL(call_rcu_bh(struct rcu_head *head, |
| 267 | void (*func)(struct rcu_head *head))); |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 268 | extern __deprecated_for_modules void synchronize_kernel(void); |
| 269 | extern void synchronize_rcu(void); |
| 270 | void synchronize_idle(void); |
Dipankar Sarma | ab4720e | 2005-12-12 00:37:05 -0800 | [diff] [blame] | 271 | extern void rcu_barrier(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | |
| 273 | #endif /* __KERNEL__ */ |
| 274 | #endif /* __LINUX_RCUPDATE_H */ |