Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Runtime locking correctness validator |
| 3 | * |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 6 | * |
| 7 | * see Documentation/lockdep-design.txt for more details. |
| 8 | */ |
| 9 | #ifndef __LINUX_LOCKDEP_H |
| 10 | #define __LINUX_LOCKDEP_H |
| 11 | |
Heiko Carstens | a1e96b0 | 2007-02-12 00:52:20 -0800 | [diff] [blame] | 12 | struct task_struct; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 13 | struct lockdep_map; |
Heiko Carstens | a1e96b0 | 2007-02-12 00:52:20 -0800 | [diff] [blame] | 14 | |
Michael S. Tsirkin | db0b0ea | 2006-09-29 01:59:28 -0700 | [diff] [blame] | 15 | #ifdef CONFIG_LOCKDEP |
| 16 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 17 | #include <linux/linkage.h> |
| 18 | #include <linux/list.h> |
| 19 | #include <linux/debug_locks.h> |
| 20 | #include <linux/stacktrace.h> |
| 21 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 22 | /* |
| 23 | * Lock-class usage-state bits: |
| 24 | */ |
| 25 | enum lock_usage_bit |
| 26 | { |
| 27 | LOCK_USED = 0, |
| 28 | LOCK_USED_IN_HARDIRQ, |
| 29 | LOCK_USED_IN_SOFTIRQ, |
| 30 | LOCK_ENABLED_SOFTIRQS, |
| 31 | LOCK_ENABLED_HARDIRQS, |
| 32 | LOCK_USED_IN_HARDIRQ_READ, |
| 33 | LOCK_USED_IN_SOFTIRQ_READ, |
| 34 | LOCK_ENABLED_SOFTIRQS_READ, |
| 35 | LOCK_ENABLED_HARDIRQS_READ, |
| 36 | LOCK_USAGE_STATES |
| 37 | }; |
| 38 | |
| 39 | /* |
| 40 | * Usage-state bitmasks: |
| 41 | */ |
| 42 | #define LOCKF_USED (1 << LOCK_USED) |
| 43 | #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) |
| 44 | #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) |
| 45 | #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) |
| 46 | #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) |
| 47 | |
| 48 | #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) |
| 49 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) |
| 50 | |
| 51 | #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) |
| 52 | #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) |
| 53 | #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) |
| 54 | #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) |
| 55 | |
| 56 | #define LOCKF_ENABLED_IRQS_READ \ |
| 57 | (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) |
| 58 | #define LOCKF_USED_IN_IRQ_READ \ |
| 59 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) |
| 60 | |
| 61 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
| 62 | |
| 63 | /* |
| 64 | * Lock-classes are keyed via unique addresses, by embedding the |
| 65 | * lockclass-key into the kernel (or module) .data section. (For |
| 66 | * static locks we use the lock address itself as the key.) |
| 67 | */ |
| 68 | struct lockdep_subclass_key { |
| 69 | char __one_byte; |
| 70 | } __attribute__ ((__packed__)); |
| 71 | |
| 72 | struct lock_class_key { |
| 73 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
| 74 | }; |
| 75 | |
| 76 | /* |
| 77 | * The lock-class itself: |
| 78 | */ |
| 79 | struct lock_class { |
| 80 | /* |
| 81 | * class-hash: |
| 82 | */ |
| 83 | struct list_head hash_entry; |
| 84 | |
| 85 | /* |
| 86 | * global list of all lock-classes: |
| 87 | */ |
| 88 | struct list_head lock_entry; |
| 89 | |
| 90 | struct lockdep_subclass_key *key; |
| 91 | unsigned int subclass; |
| 92 | |
| 93 | /* |
| 94 | * IRQ/softirq usage tracking bits: |
| 95 | */ |
| 96 | unsigned long usage_mask; |
| 97 | struct stack_trace usage_traces[LOCK_USAGE_STATES]; |
| 98 | |
| 99 | /* |
| 100 | * These fields represent a directed graph of lock dependencies, |
| 101 | * to every node we attach a list of "forward" and a list of |
| 102 | * "backward" graph nodes. |
| 103 | */ |
| 104 | struct list_head locks_after, locks_before; |
| 105 | |
| 106 | /* |
| 107 | * Generation counter, when doing certain classes of graph walking, |
| 108 | * to ensure that we check one node only once: |
| 109 | */ |
| 110 | unsigned int version; |
| 111 | |
| 112 | /* |
| 113 | * Statistics counter: |
| 114 | */ |
| 115 | unsigned long ops; |
| 116 | |
| 117 | const char *name; |
| 118 | int name_version; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 119 | |
| 120 | #ifdef CONFIG_LOCK_STAT |
| 121 | unsigned long contention_point[4]; |
| 122 | #endif |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 123 | }; |
| 124 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 125 | #ifdef CONFIG_LOCK_STAT |
| 126 | struct lock_time { |
| 127 | s64 min; |
| 128 | s64 max; |
| 129 | s64 total; |
| 130 | unsigned long nr; |
| 131 | }; |
| 132 | |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 133 | enum bounce_type { |
| 134 | bounce_acquired_write, |
| 135 | bounce_acquired_read, |
| 136 | bounce_contended_write, |
| 137 | bounce_contended_read, |
| 138 | nr_bounce_types, |
| 139 | |
| 140 | bounce_acquired = bounce_acquired_write, |
| 141 | bounce_contended = bounce_contended_write, |
| 142 | }; |
| 143 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 144 | struct lock_class_stats { |
| 145 | unsigned long contention_point[4]; |
| 146 | struct lock_time read_waittime; |
| 147 | struct lock_time write_waittime; |
| 148 | struct lock_time read_holdtime; |
| 149 | struct lock_time write_holdtime; |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 150 | unsigned long bounces[nr_bounce_types]; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 151 | }; |
| 152 | |
| 153 | struct lock_class_stats lock_stats(struct lock_class *class); |
| 154 | void clear_lock_stats(struct lock_class *class); |
| 155 | #endif |
| 156 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 157 | /* |
| 158 | * Map the lock object (the lock instance) to the lock-class object. |
| 159 | * This is embedded into specific lock instances: |
| 160 | */ |
| 161 | struct lockdep_map { |
| 162 | struct lock_class_key *key; |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 163 | struct lock_class *class_cache; |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 164 | const char *name; |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 165 | #ifdef CONFIG_LOCK_STAT |
| 166 | int cpu; |
| 167 | #endif |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 168 | }; |
| 169 | |
| 170 | /* |
| 171 | * Every lock has a list of other locks that were taken after it. |
| 172 | * We only grow the list, never remove from it: |
| 173 | */ |
| 174 | struct lock_list { |
| 175 | struct list_head entry; |
| 176 | struct lock_class *class; |
| 177 | struct stack_trace trace; |
Jason Baron | 068135e | 2007-02-10 01:44:59 -0800 | [diff] [blame] | 178 | int distance; |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 179 | }; |
| 180 | |
| 181 | /* |
| 182 | * We record lock dependency chains, so that we can cache them: |
| 183 | */ |
| 184 | struct lock_chain { |
| 185 | struct list_head entry; |
| 186 | u64 chain_key; |
| 187 | }; |
| 188 | |
| 189 | struct held_lock { |
| 190 | /* |
| 191 | * One-way hash of the dependency chain up to this point. We |
| 192 | * hash the hashes step by step as the dependency chain grows. |
| 193 | * |
| 194 | * We use it for dependency-caching and we skip detection |
| 195 | * passes and dependency-updates if there is a cache-hit, so |
| 196 | * it is absolutely critical for 100% coverage of the validator |
| 197 | * to have a unique key value for every unique dependency path |
| 198 | * that can occur in the system, to make a unique hash value |
| 199 | * as likely as possible - hence the 64-bit width. |
| 200 | * |
| 201 | * The task struct holds the current hash value (initialized |
| 202 | * with zero), here we store the previous hash value: |
| 203 | */ |
| 204 | u64 prev_chain_key; |
| 205 | struct lock_class *class; |
| 206 | unsigned long acquire_ip; |
| 207 | struct lockdep_map *instance; |
| 208 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 209 | #ifdef CONFIG_LOCK_STAT |
| 210 | u64 waittime_stamp; |
| 211 | u64 holdtime_stamp; |
| 212 | #endif |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 213 | /* |
| 214 | * The lock-stack is unified in that the lock chains of interrupt |
| 215 | * contexts nest ontop of process context chains, but we 'separate' |
| 216 | * the hashes by starting with 0 if we cross into an interrupt |
| 217 | * context, and we also keep do not add cross-context lock |
| 218 | * dependencies - the lock usage graph walking covers that area |
| 219 | * anyway, and we'd just unnecessarily increase the number of |
| 220 | * dependencies otherwise. [Note: hardirq and softirq contexts |
| 221 | * are separated from each other too.] |
| 222 | * |
| 223 | * The following field is used to detect when we cross into an |
| 224 | * interrupt context: |
| 225 | */ |
| 226 | int irq_context; |
| 227 | int trylock; |
| 228 | int read; |
| 229 | int check; |
| 230 | int hardirqs_off; |
| 231 | }; |
| 232 | |
| 233 | /* |
| 234 | * Initialization, self-test and debugging-output methods: |
| 235 | */ |
| 236 | extern void lockdep_init(void); |
| 237 | extern void lockdep_info(void); |
| 238 | extern void lockdep_reset(void); |
| 239 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
| 240 | extern void lockdep_free_key_range(void *start, unsigned long size); |
Peter Zijlstra | b351d16 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 241 | extern void lockdep_sys_exit(void); |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 242 | |
| 243 | extern void lockdep_off(void); |
| 244 | extern void lockdep_on(void); |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 245 | |
| 246 | /* |
| 247 | * These methods are used by specific locking variants (spinlocks, |
| 248 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
| 249 | * to lockdep: |
| 250 | */ |
| 251 | |
| 252 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 253 | struct lock_class_key *key, int subclass); |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 254 | |
| 255 | /* |
Peter Zijlstra | 851a67b | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 256 | * To initialize a lockdep_map statically use this macro. |
| 257 | * Note that _name must not be NULL. |
| 258 | */ |
| 259 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
| 260 | { .name = (_name), .key = (void *)(_key), } |
| 261 | |
| 262 | /* |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 263 | * Reinitialize a lock key - for cases where there is special locking or |
| 264 | * special initialization of locks so that the validator gets the scope |
| 265 | * of dependencies wrong: they are either too broad (they need a class-split) |
| 266 | * or they are too narrow (they suffer from a false class-split): |
| 267 | */ |
| 268 | #define lockdep_set_class(lock, key) \ |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 269 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 270 | #define lockdep_set_class_and_name(lock, key, name) \ |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 271 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
| 272 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 273 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
| 274 | #define lockdep_set_subclass(lock, sub) \ |
| 275 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
| 276 | (lock)->dep_map.key, sub) |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 277 | |
| 278 | /* |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 279 | * To initialize a lockdep_map statically use this macro. |
| 280 | * Note that _name must not be NULL. |
| 281 | */ |
| 282 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
| 283 | { .name = (_name), .key = (void *)(_key), } |
| 284 | |
| 285 | |
| 286 | /* |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 287 | * Acquire a lock. |
| 288 | * |
| 289 | * Values for "read": |
| 290 | * |
| 291 | * 0: exclusive (write) acquire |
| 292 | * 1: read-acquire (no recursion allowed) |
| 293 | * 2: read-acquire with same-instance recursion allowed |
| 294 | * |
| 295 | * Values for check: |
| 296 | * |
| 297 | * 0: disabled |
| 298 | * 1: simple checks (freeing, held-at-exit-time, etc.) |
| 299 | * 2: full validation |
| 300 | */ |
| 301 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
| 302 | int trylock, int read, int check, unsigned long ip); |
| 303 | |
| 304 | extern void lock_release(struct lockdep_map *lock, int nested, |
| 305 | unsigned long ip); |
| 306 | |
| 307 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
| 308 | |
Jarek Poplawski | e3a55fd | 2007-03-22 00:11:26 -0800 | [diff] [blame] | 309 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 310 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 311 | #else /* !LOCKDEP */ |
| 312 | |
| 313 | static inline void lockdep_off(void) |
| 314 | { |
| 315 | } |
| 316 | |
| 317 | static inline void lockdep_on(void) |
| 318 | { |
| 319 | } |
| 320 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 321 | # define lock_acquire(l, s, t, r, c, i) do { } while (0) |
| 322 | # define lock_release(l, n, i) do { } while (0) |
| 323 | # define lockdep_init() do { } while (0) |
| 324 | # define lockdep_info() do { } while (0) |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 325 | # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 326 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
| 327 | # define lockdep_set_class_and_name(lock, key, name) \ |
| 328 | do { (void)(key); } while (0) |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 329 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 330 | do { (void)(key); } while (0) |
Andrew Morton | 07646e2 | 2006-10-11 23:45:23 -0400 | [diff] [blame] | 331 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
| 332 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 333 | # define INIT_LOCKDEP |
| 334 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
| 335 | # define lockdep_free_key_range(start, size) do { } while (0) |
Peter Zijlstra | b351d16 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 336 | # define lockdep_sys_exit() do { } while (0) |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 337 | /* |
| 338 | * The class key takes no space if lockdep is disabled: |
| 339 | */ |
| 340 | struct lock_class_key { }; |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 341 | |
| 342 | #define lockdep_depth(tsk) (0) |
| 343 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 344 | #endif /* !LOCKDEP */ |
| 345 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 346 | #ifdef CONFIG_LOCK_STAT |
| 347 | |
| 348 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
| 349 | extern void lock_acquired(struct lockdep_map *lock); |
| 350 | |
| 351 | #define LOCK_CONTENDED(_lock, try, lock) \ |
| 352 | do { \ |
| 353 | if (!try(_lock)) { \ |
| 354 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
| 355 | lock(_lock); \ |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 356 | } \ |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 357 | lock_acquired(&(_lock)->dep_map); \ |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 358 | } while (0) |
| 359 | |
| 360 | #else /* CONFIG_LOCK_STAT */ |
| 361 | |
| 362 | #define lock_contended(lockdep_map, ip) do {} while (0) |
| 363 | #define lock_acquired(lockdep_map) do {} while (0) |
| 364 | |
| 365 | #define LOCK_CONTENDED(_lock, try, lock) \ |
| 366 | lock(_lock) |
| 367 | |
| 368 | #endif /* CONFIG_LOCK_STAT */ |
| 369 | |
Ingo Molnar | 243c762 | 2006-07-03 00:25:06 -0700 | [diff] [blame] | 370 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) |
| 371 | extern void early_init_irq_lock_class(void); |
| 372 | #else |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 373 | static inline void early_init_irq_lock_class(void) |
| 374 | { |
| 375 | } |
Ingo Molnar | 243c762 | 2006-07-03 00:25:06 -0700 | [diff] [blame] | 376 | #endif |
| 377 | |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 378 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 379 | extern void early_boot_irqs_off(void); |
| 380 | extern void early_boot_irqs_on(void); |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 381 | extern void print_irqtrace_events(struct task_struct *curr); |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 382 | #else |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 383 | static inline void early_boot_irqs_off(void) |
| 384 | { |
| 385 | } |
| 386 | static inline void early_boot_irqs_on(void) |
| 387 | { |
| 388 | } |
| 389 | static inline void print_irqtrace_events(struct task_struct *curr) |
| 390 | { |
| 391 | } |
Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 392 | #endif |
| 393 | |
| 394 | /* |
| 395 | * For trivial one-depth nesting of a lock-class, the following |
| 396 | * global define can be used. (Subsystems with multiple levels |
| 397 | * of nesting should define their own lock-nesting subclasses.) |
| 398 | */ |
| 399 | #define SINGLE_DEPTH_NESTING 1 |
| 400 | |
| 401 | /* |
| 402 | * Map the dependency ops to NOP or to real lockdep ops, depending |
| 403 | * on the per lock-class debug mode: |
| 404 | */ |
| 405 | |
| 406 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 407 | # ifdef CONFIG_PROVE_LOCKING |
| 408 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 409 | # else |
| 410 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 411 | # endif |
| 412 | # define spin_release(l, n, i) lock_release(l, n, i) |
| 413 | #else |
| 414 | # define spin_acquire(l, s, t, i) do { } while (0) |
| 415 | # define spin_release(l, n, i) do { } while (0) |
| 416 | #endif |
| 417 | |
| 418 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 419 | # ifdef CONFIG_PROVE_LOCKING |
| 420 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 421 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) |
| 422 | # else |
| 423 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 424 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) |
| 425 | # endif |
| 426 | # define rwlock_release(l, n, i) lock_release(l, n, i) |
| 427 | #else |
| 428 | # define rwlock_acquire(l, s, t, i) do { } while (0) |
| 429 | # define rwlock_acquire_read(l, s, t, i) do { } while (0) |
| 430 | # define rwlock_release(l, n, i) do { } while (0) |
| 431 | #endif |
| 432 | |
| 433 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 434 | # ifdef CONFIG_PROVE_LOCKING |
| 435 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 436 | # else |
| 437 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 438 | # endif |
| 439 | # define mutex_release(l, n, i) lock_release(l, n, i) |
| 440 | #else |
| 441 | # define mutex_acquire(l, s, t, i) do { } while (0) |
| 442 | # define mutex_release(l, n, i) do { } while (0) |
| 443 | #endif |
| 444 | |
| 445 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 446 | # ifdef CONFIG_PROVE_LOCKING |
| 447 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) |
| 448 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) |
| 449 | # else |
| 450 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) |
| 451 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) |
| 452 | # endif |
| 453 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
| 454 | #else |
| 455 | # define rwsem_acquire(l, s, t, i) do { } while (0) |
| 456 | # define rwsem_acquire_read(l, s, t, i) do { } while (0) |
| 457 | # define rwsem_release(l, n, i) do { } while (0) |
| 458 | #endif |
| 459 | |
| 460 | #endif /* __LINUX_LOCKDEP_H */ |