blob: 3093dd16242437612913e4100ee8873b1905666e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnar6053ee32006-01-09 15:59:19 -08002/*
3 * Mutexes: blocking mutual exclusion locks
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 *
9 * This file contains the main data structure and API definitions.
10 */
11#ifndef __LINUX_MUTEX_H
12#define __LINUX_MUTEX_H
13
Maarten Lankhorst040a0a32013-06-24 10:30:04 +020014#include <asm/current.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080015#include <linux/list.h>
16#include <linux/spinlock_types.h>
Ingo Molnaref5d4702006-07-03 00:24:55 -070017#include <linux/lockdep.h>
Arun Sharma600634972011-07-26 16:09:06 -070018#include <linux/atomic.h>
Heiko Carstens083986e2013-09-28 11:23:59 +020019#include <asm/processor.h>
Jason Low90631822014-07-14 10:27:49 -070020#include <linux/osq_lock.h>
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020021#include <linux/debug_locks.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080022
Nicolai Hähnle6baa5c62016-12-21 19:46:34 +010023struct ww_acquire_ctx;
24
Ingo Molnar6053ee32006-01-09 15:59:19 -080025/*
26 * Simple, straightforward mutexes with strict semantics:
27 *
28 * - only one task can hold the mutex at a time
29 * - only the owner can unlock the mutex
30 * - multiple unlocks are not permitted
31 * - recursive locking is not permitted
32 * - a mutex object must be initialized via the API
33 * - a mutex object must not be initialized via memset or copying
34 * - task may not exit with mutex held
35 * - memory areas where held locks reside must not be freed
36 * - held mutexes must not be reinitialized
Matti Linnanvuorif20fda42007-10-16 23:29:41 -070037 * - mutexes may not be used in hardware or software interrupt
38 * contexts such as tasklets and timers
Ingo Molnar6053ee32006-01-09 15:59:19 -080039 *
40 * These semantics are fully enforced when DEBUG_MUTEXES is
41 * enabled. Furthermore, besides enforcing the above rules, the mutex
42 * debugging code also implements a number of additional features
43 * that make lock debugging easier and faster:
44 *
45 * - uses symbolic names of mutexes, whenever they are printed in debug output
46 * - point-of-acquire tracking, symbolic lookup of function names
47 * - list of all locks held in the system, printout of them
48 * - owner tracking
49 * - detects self-recursing locks and prints out all relevant info
50 * - detects multi-task circular deadlocks and prints out all affected
51 * locks and tasks (and only those tasks)
52 */
53struct mutex {
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020054 atomic_long_t owner;
Ingo Molnar6053ee32006-01-09 15:59:19 -080055 spinlock_t wait_lock;
Waiman Long2bd2c922013-04-17 15:23:13 -040056#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Jason Low90631822014-07-14 10:27:49 -070057 struct optimistic_spin_queue osq; /* Spinner MCS lock */
Waiman Long2bd2c922013-04-17 15:23:13 -040058#endif
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020059 struct list_head wait_list;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010060#ifdef CONFIG_DEBUG_MUTEXES
Ingo Molnar6053ee32006-01-09 15:59:19 -080061 void *magic;
62#endif
Ingo Molnaref5d4702006-07-03 00:24:55 -070063#ifdef CONFIG_DEBUG_LOCK_ALLOC
64 struct lockdep_map dep_map;
65#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -080066};
67
Peter Zijlstra88e77dc2018-02-20 16:01:36 +010068/*
69 * Internal helper function; C doesn't allow us to hide it :/
70 *
71 * DO NOT USE (outside of mutex code).
72 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020073static inline struct task_struct *__mutex_owner(struct mutex *lock)
74{
Peter Zijlstrae2747952017-01-11 14:17:48 +010075 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020076}
77
Ingo Molnar6053ee32006-01-09 15:59:19 -080078/*
79 * This is the control structure for tasks blocked on mutex,
80 * which resides on the blocked task's kernel stack:
81 */
82struct mutex_waiter {
83 struct list_head list;
84 struct task_struct *task;
Nicolai Hähnle6baa5c62016-12-21 19:46:34 +010085 struct ww_acquire_ctx *ww_ctx;
Ingo Molnar6053ee32006-01-09 15:59:19 -080086#ifdef CONFIG_DEBUG_MUTEXES
Ingo Molnar6053ee32006-01-09 15:59:19 -080087 void *magic;
88#endif
89};
90
91#ifdef CONFIG_DEBUG_MUTEXES
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020092
93#define __DEBUG_MUTEX_INITIALIZER(lockname) \
94 , .magic = &lockname
95
96extern void mutex_destroy(struct mutex *lock);
97
Ingo Molnar6053ee32006-01-09 15:59:19 -080098#else
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020099
Ingo Molnar6053ee32006-01-09 15:59:19 -0800100# define __DEBUG_MUTEX_INITIALIZER(lockname)
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200101
102static inline void mutex_destroy(struct mutex *lock) {}
103
104#endif
105
Randy Dunlapef5dc122010-09-02 15:48:16 -0700106/**
107 * mutex_init - initialize the mutex
108 * @mutex: the mutex to be initialized
109 *
110 * Initialize the mutex to unlocked state.
111 *
112 * It is not allowed to initialize an already locked mutex.
113 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200114#define mutex_init(mutex) \
115do { \
116 static struct lock_class_key __key; \
117 \
118 __mutex_init((mutex), #mutex, &__key); \
Ingo Molnaref5d4702006-07-03 00:24:55 -0700119} while (0)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800120
Ingo Molnaref5d4702006-07-03 00:24:55 -0700121#ifdef CONFIG_DEBUG_LOCK_ALLOC
122# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
123 , .dep_map = { .name = #lockname }
124#else
125# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
126#endif
127
Ingo Molnar6053ee32006-01-09 15:59:19 -0800128#define __MUTEX_INITIALIZER(lockname) \
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200129 { .owner = ATOMIC_LONG_INIT(0) \
Peter Zijlstra6cfd76a2006-12-06 20:37:22 -0800130 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
Ingo Molnar6053ee32006-01-09 15:59:19 -0800131 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
Ingo Molnaref5d4702006-07-03 00:24:55 -0700132 __DEBUG_MUTEX_INITIALIZER(lockname) \
133 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
Ingo Molnar6053ee32006-01-09 15:59:19 -0800134
135#define DEFINE_MUTEX(mutexname) \
136 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
137
Ingo Molnaref5d4702006-07-03 00:24:55 -0700138extern void __mutex_init(struct mutex *lock, const char *name,
139 struct lock_class_key *key);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800140
Robert P. J. Day45f8bde2007-01-26 00:57:09 -0800141/**
Ingo Molnar6053ee32006-01-09 15:59:19 -0800142 * mutex_is_locked - is the mutex locked
143 * @lock: the mutex to be queried
144 *
Yaowei Baidb076bef2018-02-06 15:41:35 -0800145 * Returns true if the mutex is locked, false if unlocked.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800146 */
Yaowei Baidb076bef2018-02-06 15:41:35 -0800147static inline bool mutex_is_locked(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800148{
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200149 return __mutex_owner(lock) != NULL;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800150}
151
152/*
Peter Zijlstra67a6de42013-11-08 08:26:39 +0100153 * See kernel/locking/mutex.c for detailed documentation of these APIs.
Davidlohr Bueso214e0ae2014-07-30 13:41:55 -0700154 * Also see Documentation/locking/mutex-design.txt.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800155 */
Ingo Molnaref5d4702006-07-03 00:24:55 -0700156#ifdef CONFIG_DEBUG_LOCK_ALLOC
157extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700158extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200159
Andrew Morton18d83622007-05-09 02:33:39 -0700160extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
161 unsigned int subclass);
Liam R. Howlettad776532007-12-06 17:37:59 -0500162extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
163 unsigned int subclass);
Tejun Heo1460cb62016-10-28 12:58:11 -0400164extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200165
166#define mutex_lock(lock) mutex_lock_nested(lock, 0)
167#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
Liam R. Howlettad776532007-12-06 17:37:59 -0500168#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
Tejun Heo1460cb62016-10-28 12:58:11 -0400169#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700170
171#define mutex_lock_nest_lock(lock, nest_lock) \
172do { \
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200173 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700174 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
175} while (0)
176
Ingo Molnaref5d4702006-07-03 00:24:55 -0700177#else
Harvey Harrisonec701582008-02-08 04:19:55 -0800178extern void mutex_lock(struct mutex *lock);
179extern int __must_check mutex_lock_interruptible(struct mutex *lock);
180extern int __must_check mutex_lock_killable(struct mutex *lock);
Tejun Heo1460cb62016-10-28 12:58:11 -0400181extern void mutex_lock_io(struct mutex *lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200182
Ingo Molnaref5d4702006-07-03 00:24:55 -0700183# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
NeilBrownd63a5a72006-12-08 02:36:17 -0800184# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500185# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700186# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
Ingo Molnarf21860b2017-01-14 17:11:36 +0100187# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
Ingo Molnaref5d4702006-07-03 00:24:55 -0700188#endif
189
Ingo Molnar6053ee32006-01-09 15:59:19 -0800190/*
191 * NOTE: mutex_trylock() follows the spin_trylock() convention,
192 * not the down_trylock() convention!
Arjan van de Vend98d38f2008-10-29 14:24:09 -0700193 *
194 * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800195 */
Harvey Harrisonec701582008-02-08 04:19:55 -0800196extern int mutex_trylock(struct mutex *lock);
197extern void mutex_unlock(struct mutex *lock);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200198
Andrew Mortona511e3f2009-04-29 15:59:58 -0700199extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
Eric Parisb1fca262009-03-23 18:22:09 +0100200
Peter Zijlstra0f5225b2016-10-07 17:43:51 +0200201/*
202 * These values are chosen such that FAIL and SUCCESS match the
203 * values of the regular mutex_trylock().
204 */
205enum mutex_trylock_recursive_enum {
206 MUTEX_TRYLOCK_FAILED = 0,
207 MUTEX_TRYLOCK_SUCCESS = 1,
208 MUTEX_TRYLOCK_RECURSIVE,
209};
210
211/**
212 * mutex_trylock_recursive - trylock variant that allows recursive locking
213 * @lock: mutex to be locked
214 *
215 * This function should not be used, _ever_. It is purely for hysterical GEM
216 * raisins, and once those are gone this will be removed.
217 *
218 * Returns:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -0300219 * - MUTEX_TRYLOCK_FAILED - trylock failed,
220 * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
221 * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
Peter Zijlstra0f5225b2016-10-07 17:43:51 +0200222 */
Ingo Molnar43496d32016-11-16 10:36:05 +0100223static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
Peter Zijlstra0f5225b2016-10-07 17:43:51 +0200224mutex_trylock_recursive(struct mutex *lock)
225{
226 if (unlikely(__mutex_owner(lock) == current))
227 return MUTEX_TRYLOCK_RECURSIVE;
228
229 return mutex_trylock(lock);
230}
231
Tim Chene7224672014-01-21 15:36:00 -0800232#endif /* __LINUX_MUTEX_H */