Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Runtime locking correctness validator |
| 3 | * |
| 4 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 5 | * |
| 6 | * see Documentation/lockdep-design.txt for more details. |
| 7 | */ |
| 8 | #ifndef __LINUX_LOCKDEP_H |
| 9 | #define __LINUX_LOCKDEP_H |
| 10 | |
Michael S. Tsirkin | db0b0ea | 2006-09-29 01:59:28 -0700 | [diff] [blame] | 11 | #ifdef CONFIG_LOCKDEP |
| 12 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 13 | #include <linux/linkage.h> |
| 14 | #include <linux/list.h> |
| 15 | #include <linux/debug_locks.h> |
| 16 | #include <linux/stacktrace.h> |
| 17 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 18 | /* |
| 19 | * Lock-class usage-state bits: |
| 20 | */ |
| 21 | enum lock_usage_bit |
| 22 | { |
| 23 | LOCK_USED = 0, |
| 24 | LOCK_USED_IN_HARDIRQ, |
| 25 | LOCK_USED_IN_SOFTIRQ, |
| 26 | LOCK_ENABLED_SOFTIRQS, |
| 27 | LOCK_ENABLED_HARDIRQS, |
| 28 | LOCK_USED_IN_HARDIRQ_READ, |
| 29 | LOCK_USED_IN_SOFTIRQ_READ, |
| 30 | LOCK_ENABLED_SOFTIRQS_READ, |
| 31 | LOCK_ENABLED_HARDIRQS_READ, |
| 32 | LOCK_USAGE_STATES |
| 33 | }; |
| 34 | |
| 35 | /* |
| 36 | * Usage-state bitmasks: |
| 37 | */ |
| 38 | #define LOCKF_USED (1 << LOCK_USED) |
| 39 | #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) |
| 40 | #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) |
| 41 | #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) |
| 42 | #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) |
| 43 | |
| 44 | #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) |
| 45 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) |
| 46 | |
| 47 | #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) |
| 48 | #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) |
| 49 | #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) |
| 50 | #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) |
| 51 | |
| 52 | #define LOCKF_ENABLED_IRQS_READ \ |
| 53 | (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) |
| 54 | #define LOCKF_USED_IN_IRQ_READ \ |
| 55 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) |
| 56 | |
| 57 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
| 58 | |
| 59 | /* |
| 60 | * Lock-classes are keyed via unique addresses, by embedding the |
| 61 | * lockclass-key into the kernel (or module) .data section. (For |
| 62 | * static locks we use the lock address itself as the key.) |
| 63 | */ |
| 64 | struct lockdep_subclass_key { |
| 65 | char __one_byte; |
| 66 | } __attribute__ ((__packed__)); |
| 67 | |
| 68 | struct lock_class_key { |
| 69 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
| 70 | }; |
| 71 | |
| 72 | /* |
| 73 | * The lock-class itself: |
| 74 | */ |
| 75 | struct lock_class { |
| 76 | /* |
| 77 | * class-hash: |
| 78 | */ |
| 79 | struct list_head hash_entry; |
| 80 | |
| 81 | /* |
| 82 | * global list of all lock-classes: |
| 83 | */ |
| 84 | struct list_head lock_entry; |
| 85 | |
| 86 | struct lockdep_subclass_key *key; |
| 87 | unsigned int subclass; |
| 88 | |
| 89 | /* |
| 90 | * IRQ/softirq usage tracking bits: |
| 91 | */ |
| 92 | unsigned long usage_mask; |
| 93 | struct stack_trace usage_traces[LOCK_USAGE_STATES]; |
| 94 | |
| 95 | /* |
| 96 | * These fields represent a directed graph of lock dependencies, |
| 97 | * to every node we attach a list of "forward" and a list of |
| 98 | * "backward" graph nodes. |
| 99 | */ |
| 100 | struct list_head locks_after, locks_before; |
| 101 | |
| 102 | /* |
| 103 | * Generation counter, when doing certain classes of graph walking, |
| 104 | * to ensure that we check one node only once: |
| 105 | */ |
| 106 | unsigned int version; |
| 107 | |
| 108 | /* |
| 109 | * Statistics counter: |
| 110 | */ |
| 111 | unsigned long ops; |
| 112 | |
| 113 | const char *name; |
| 114 | int name_version; |
| 115 | }; |
| 116 | |
| 117 | /* |
| 118 | * Map the lock object (the lock instance) to the lock-class object. |
| 119 | * This is embedded into specific lock instances: |
| 120 | */ |
| 121 | struct lockdep_map { |
| 122 | struct lock_class_key *key; |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 123 | struct lock_class *class_cache; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 124 | const char *name; |
| 125 | }; |
| 126 | |
| 127 | /* |
| 128 | * Every lock has a list of other locks that were taken after it. |
| 129 | * We only grow the list, never remove from it: |
| 130 | */ |
| 131 | struct lock_list { |
| 132 | struct list_head entry; |
| 133 | struct lock_class *class; |
| 134 | struct stack_trace trace; |
| 135 | }; |
| 136 | |
| 137 | /* |
| 138 | * We record lock dependency chains, so that we can cache them: |
| 139 | */ |
| 140 | struct lock_chain { |
| 141 | struct list_head entry; |
| 142 | u64 chain_key; |
| 143 | }; |
| 144 | |
| 145 | struct held_lock { |
| 146 | /* |
| 147 | * One-way hash of the dependency chain up to this point. We |
| 148 | * hash the hashes step by step as the dependency chain grows. |
| 149 | * |
| 150 | * We use it for dependency-caching and we skip detection |
| 151 | * passes and dependency-updates if there is a cache-hit, so |
| 152 | * it is absolutely critical for 100% coverage of the validator |
| 153 | * to have a unique key value for every unique dependency path |
| 154 | * that can occur in the system, to make a unique hash value |
| 155 | * as likely as possible - hence the 64-bit width. |
| 156 | * |
| 157 | * The task struct holds the current hash value (initialized |
| 158 | * with zero), here we store the previous hash value: |
| 159 | */ |
| 160 | u64 prev_chain_key; |
| 161 | struct lock_class *class; |
| 162 | unsigned long acquire_ip; |
| 163 | struct lockdep_map *instance; |
| 164 | |
| 165 | /* |
| 166 | * The lock-stack is unified in that the lock chains of interrupt |
| 167 | * contexts nest ontop of process context chains, but we 'separate' |
| 168 | * the hashes by starting with 0 if we cross into an interrupt |
| 169 | * context, and we also keep do not add cross-context lock |
| 170 | * dependencies - the lock usage graph walking covers that area |
| 171 | * anyway, and we'd just unnecessarily increase the number of |
| 172 | * dependencies otherwise. [Note: hardirq and softirq contexts |
| 173 | * are separated from each other too.] |
| 174 | * |
| 175 | * The following field is used to detect when we cross into an |
| 176 | * interrupt context: |
| 177 | */ |
| 178 | int irq_context; |
| 179 | int trylock; |
| 180 | int read; |
| 181 | int check; |
| 182 | int hardirqs_off; |
| 183 | }; |
| 184 | |
| 185 | /* |
| 186 | * Initialization, self-test and debugging-output methods: |
| 187 | */ |
| 188 | extern void lockdep_init(void); |
| 189 | extern void lockdep_info(void); |
| 190 | extern void lockdep_reset(void); |
| 191 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
| 192 | extern void lockdep_free_key_range(void *start, unsigned long size); |
| 193 | |
| 194 | extern void lockdep_off(void); |
| 195 | extern void lockdep_on(void); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 196 | |
| 197 | /* |
| 198 | * These methods are used by specific locking variants (spinlocks, |
| 199 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
| 200 | * to lockdep: |
| 201 | */ |
| 202 | |
| 203 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 204 | struct lock_class_key *key, int subclass); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 205 | |
| 206 | /* |
| 207 | * Reinitialize a lock key - for cases where there is special locking or |
| 208 | * special initialization of locks so that the validator gets the scope |
| 209 | * of dependencies wrong: they are either too broad (they need a class-split) |
| 210 | * or they are too narrow (they suffer from a false class-split): |
| 211 | */ |
| 212 | #define lockdep_set_class(lock, key) \ |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 213 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 214 | #define lockdep_set_class_and_name(lock, key, name) \ |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 215 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
| 216 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 217 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
| 218 | #define lockdep_set_subclass(lock, sub) \ |
| 219 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
| 220 | (lock)->dep_map.key, sub) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 221 | |
| 222 | /* |
| 223 | * Acquire a lock. |
| 224 | * |
| 225 | * Values for "read": |
| 226 | * |
| 227 | * 0: exclusive (write) acquire |
| 228 | * 1: read-acquire (no recursion allowed) |
| 229 | * 2: read-acquire with same-instance recursion allowed |
| 230 | * |
| 231 | * Values for check: |
| 232 | * |
| 233 | * 0: disabled |
| 234 | * 1: simple checks (freeing, held-at-exit-time, etc.) |
| 235 | * 2: full validation |
| 236 | */ |
| 237 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
| 238 | int trylock, int read, int check, unsigned long ip); |
| 239 | |
| 240 | extern void lock_release(struct lockdep_map *lock, int nested, |
| 241 | unsigned long ip); |
| 242 | |
| 243 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
| 244 | |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 245 | #define lockdep_depth(tsk) ((tsk)->lockdep_depth) |
| 246 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 247 | #else /* !LOCKDEP */ |
| 248 | |
| 249 | static inline void lockdep_off(void) |
| 250 | { |
| 251 | } |
| 252 | |
| 253 | static inline void lockdep_on(void) |
| 254 | { |
| 255 | } |
| 256 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 257 | # define lock_acquire(l, s, t, r, c, i) do { } while (0) |
| 258 | # define lock_release(l, n, i) do { } while (0) |
| 259 | # define lockdep_init() do { } while (0) |
| 260 | # define lockdep_info() do { } while (0) |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 261 | # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 262 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
| 263 | # define lockdep_set_class_and_name(lock, key, name) \ |
| 264 | do { (void)(key); } while (0) |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 265 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 266 | do { (void)(key); } while (0) |
Andrew Morton | 07646e2 | 2006-10-11 23:45:23 -0400 | [diff] [blame] | 267 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
| 268 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 269 | # define INIT_LOCKDEP |
| 270 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
| 271 | # define lockdep_free_key_range(start, size) do { } while (0) |
| 272 | /* |
| 273 | * The class key takes no space if lockdep is disabled: |
| 274 | */ |
| 275 | struct lock_class_key { }; |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 276 | |
| 277 | #define lockdep_depth(tsk) (0) |
| 278 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 279 | #endif /* !LOCKDEP */ |
| 280 | |
Ingo Molnar | 243c762 | 2006-07-03 00:25:06 -0700 | [diff] [blame] | 281 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) |
| 282 | extern void early_init_irq_lock_class(void); |
| 283 | #else |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 284 | static inline void early_init_irq_lock_class(void) |
| 285 | { |
| 286 | } |
Ingo Molnar | 243c762 | 2006-07-03 00:25:06 -0700 | [diff] [blame] | 287 | #endif |
| 288 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 289 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 290 | extern void early_boot_irqs_off(void); |
| 291 | extern void early_boot_irqs_on(void); |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 292 | extern void print_irqtrace_events(struct task_struct *curr); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 293 | #else |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 294 | static inline void early_boot_irqs_off(void) |
| 295 | { |
| 296 | } |
| 297 | static inline void early_boot_irqs_on(void) |
| 298 | { |
| 299 | } |
| 300 | static inline void print_irqtrace_events(struct task_struct *curr) |
| 301 | { |
| 302 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 303 | #endif |
| 304 | |
| 305 | /* |
| 306 | * For trivial one-depth nesting of a lock-class, the following |
| 307 | * global define can be used. (Subsystems with multiple levels |
| 308 | * of nesting should define their own lock-nesting subclasses.) |
| 309 | */ |
| 310 | #define SINGLE_DEPTH_NESTING 1 |
| 311 | |
| 312 | /* |
| 313 | * Map the dependency ops to NOP or to real lockdep ops, depending |
| 314 | * on the per lock-class debug mode: |
| 315 | */ |
| 316 | |
| 317 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 318 | # ifdef CONFIG_PROVE_LOCKING |
| 319 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 320 | # else |
| 321 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 322 | # endif |
| 323 | # define spin_release(l, n, i) lock_release(l, n, i) |
| 324 | #else |
| 325 | # define spin_acquire(l, s, t, i) do { } while (0) |
| 326 | # define spin_release(l, n, i) do { } while (0) |
| 327 | #endif |
| 328 | |
| 329 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 330 | # ifdef CONFIG_PROVE_LOCKING |
| 331 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 332 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) |
| 333 | # else |
| 334 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 335 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) |
| 336 | # endif |
| 337 | # define rwlock_release(l, n, i) lock_release(l, n, i) |
| 338 | #else |
| 339 | # define rwlock_acquire(l, s, t, i) do { } while (0) |
| 340 | # define rwlock_acquire_read(l, s, t, i) do { } while (0) |
| 341 | # define rwlock_release(l, n, i) do { } while (0) |
| 342 | #endif |
| 343 | |
| 344 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 345 | # ifdef CONFIG_PROVE_LOCKING |
| 346 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 347 | # else |
| 348 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 349 | # endif |
| 350 | # define mutex_release(l, n, i) lock_release(l, n, i) |
| 351 | #else |
| 352 | # define mutex_acquire(l, s, t, i) do { } while (0) |
| 353 | # define mutex_release(l, n, i) do { } while (0) |
| 354 | #endif |
| 355 | |
| 356 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 357 | # ifdef CONFIG_PROVE_LOCKING |
| 358 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 359 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) |
| 360 | # else |
| 361 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 362 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) |
| 363 | # endif |
| 364 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
| 365 | #else |
| 366 | # define rwsem_acquire(l, s, t, i) do { } while (0) |
| 367 | # define rwsem_acquire_read(l, s, t, i) do { } while (0) |
| 368 | # define rwsem_release(l, n, i) do { } while (0) |
| 369 | #endif |
| 370 | |
| 371 | #endif /* __LINUX_LOCKDEP_H */ |