blob: c61c56f05dfa3d829604f48fd5efed7823eb7d9f [file] [log] [blame]
Ingo Molnar6053ee32006-01-09 15:59:19 -08001/*
Peter Zijlstra67a6de42013-11-08 08:26:39 +01002 * kernel/locking/mutex.c
Ingo Molnar6053ee32006-01-09 15:59:19 -08003 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010013 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
Davidlohr Bueso214e0ae2014-07-30 13:41:55 -070018 * Also see Documentation/locking/mutex-design.txt.
Ingo Molnar6053ee32006-01-09 15:59:19 -080019 */
20#include <linux/mutex.h>
Maarten Lankhorst1b375dc2013-07-05 09:29:32 +020021#include <linux/ww_mutex.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080022#include <linux/sched.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060023#include <linux/sched/rt.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040024#include <linux/export.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080025#include <linux/spinlock.h>
26#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070027#include <linux/debug_locks.h>
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080028#include <linux/osq_lock.h>
Riley Andrewsc8de3f42015-08-10 14:02:29 -070029#include <linux/delay.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080030
31/*
32 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
33 * which forces all calls into the slowpath:
34 */
35#ifdef CONFIG_DEBUG_MUTEXES
36# include "mutex-debug.h"
37# include <asm-generic/mutex-null.h>
Peter Zijlstra6f008e72014-03-12 13:24:42 +010038/*
39 * Must be 0 for the debug case so we do not do the unlock outside of the
40 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
41 * case.
42 */
43# undef __mutex_slowpath_needs_to_unlock
44# define __mutex_slowpath_needs_to_unlock() 0
Ingo Molnar6053ee32006-01-09 15:59:19 -080045#else
46# include "mutex.h"
47# include <asm/mutex.h>
48#endif
49
Ingo Molnaref5d4702006-07-03 00:24:55 -070050void
51__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
Ingo Molnar6053ee32006-01-09 15:59:19 -080052{
53 atomic_set(&lock->count, 1);
54 spin_lock_init(&lock->wait_lock);
55 INIT_LIST_HEAD(&lock->wait_list);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010056 mutex_clear_owner(lock);
Waiman Long2bd2c922013-04-17 15:23:13 -040057#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Jason Low4d9d9512014-07-14 10:27:50 -070058 osq_lock_init(&lock->osq);
Waiman Long2bd2c922013-04-17 15:23:13 -040059#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -080060
Ingo Molnaref5d4702006-07-03 00:24:55 -070061 debug_mutex_init(lock, name, key);
Ingo Molnar6053ee32006-01-09 15:59:19 -080062}
63
64EXPORT_SYMBOL(__mutex_init);
65
Peter Zijlstrae4564f72007-10-11 22:11:12 +020066#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar6053ee32006-01-09 15:59:19 -080067/*
68 * We split the mutex lock/unlock logic into separate fastpath and
69 * slowpath functions, to reduce the register pressure on the fastpath.
70 * We also put the fastpath first in the kernel image, to make sure the
71 * branch is predicted by the CPU as default-untaken.
72 */
Andi Kleen22d9fd32014-02-08 08:52:03 +010073__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -080074
Randy Dunlapef5dc122010-09-02 15:48:16 -070075/**
Ingo Molnar6053ee32006-01-09 15:59:19 -080076 * mutex_lock - acquire the mutex
77 * @lock: the mutex to be acquired
78 *
79 * Lock the mutex exclusively for this task. If the mutex is not
80 * available right now, it will sleep until it can get it.
81 *
82 * The mutex must later on be released by the same task that
83 * acquired it. Recursive locking is not allowed. The task
84 * may not exit without first unlocking the mutex. Also, kernel
Sharon Dvir139b6fd2015-02-01 23:47:32 +020085 * memory where the mutex resides must not be freed with
Ingo Molnar6053ee32006-01-09 15:59:19 -080086 * the mutex still locked. The mutex must first be initialized
87 * (or statically defined) before it can be locked. memset()-ing
88 * the mutex to 0 is not allowed.
89 *
90 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
91 * checks that will enforce the restrictions and will also do
92 * deadlock debugging. )
93 *
94 * This function is similar to (but not equivalent to) down().
95 */
H. Peter Anvinb09d2502009-04-01 17:21:56 -070096void __sched mutex_lock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -080097{
Ingo Molnarc544bdb2006-01-10 22:10:36 +010098 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -080099 /*
100 * The locking fastpath is the 1->0 transition from
101 * 'unlocked' into 'locked' state.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800102 */
103 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100104 mutex_set_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800105}
106
107EXPORT_SYMBOL(mutex_lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200108#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800109
Davidlohr Bueso76916512014-07-30 13:41:53 -0700110static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
111 struct ww_acquire_ctx *ww_ctx)
112{
113#ifdef CONFIG_DEBUG_MUTEXES
114 /*
115 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
116 * but released with a normal mutex_unlock in this call.
117 *
118 * This should never happen, always use ww_mutex_unlock.
119 */
120 DEBUG_LOCKS_WARN_ON(ww->ctx);
121
122 /*
123 * Not quite done after calling ww_acquire_done() ?
124 */
125 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
126
127 if (ww_ctx->contending_lock) {
128 /*
129 * After -EDEADLK you tried to
130 * acquire a different ww_mutex? Bad!
131 */
132 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
133
134 /*
135 * You called ww_mutex_lock after receiving -EDEADLK,
136 * but 'forgot' to unlock everything else first?
137 */
138 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
139 ww_ctx->contending_lock = NULL;
140 }
141
142 /*
143 * Naughty, using a different class will lead to undefined behavior!
144 */
145 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
146#endif
147 ww_ctx->acquired++;
148}
149
150/*
Davidlohr Bueso4bd19082015-01-06 11:45:06 -0800151 * After acquiring lock with fastpath or when we lost out in contested
Davidlohr Bueso76916512014-07-30 13:41:53 -0700152 * slowpath, set ctx and wake up any waiters so they can recheck.
153 *
154 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
155 * as the fastpath and opportunistic spinning are disabled in that case.
156 */
157static __always_inline void
158ww_mutex_set_context_fastpath(struct ww_mutex *lock,
159 struct ww_acquire_ctx *ctx)
160{
161 unsigned long flags;
162 struct mutex_waiter *cur;
163
164 ww_mutex_lock_acquired(lock, ctx);
165
166 lock->ctx = ctx;
167
168 /*
169 * The lock->ctx update should be visible on all cores before
170 * the atomic read is done, otherwise contended waiters might be
171 * missed. The contended waiters will either see ww_ctx == NULL
172 * and keep spinning, or it will acquire wait_lock, add itself
173 * to waiter list and sleep.
174 */
175 smp_mb(); /* ^^^ */
176
177 /*
178 * Check if lock is contended, if not there is nobody to wake up
179 */
180 if (likely(atomic_read(&lock->base.count) == 0))
181 return;
182
183 /*
184 * Uh oh, we raced in fastpath, wake up everyone in this case,
185 * so they can see the new lock->ctx.
186 */
187 spin_lock_mutex(&lock->base.wait_lock, flags);
188 list_for_each_entry(cur, &lock->base.wait_list, list) {
189 debug_mutex_wake_waiter(&lock->base, cur);
190 wake_up_process(cur->task);
191 }
192 spin_unlock_mutex(&lock->base.wait_lock, flags);
193}
194
Davidlohr Bueso4bd19082015-01-06 11:45:06 -0800195/*
196 * After acquiring lock in the slowpath set ctx and wake up any
197 * waiters so they can recheck.
198 *
199 * Callers must hold the mutex wait_lock.
200 */
201static __always_inline void
202ww_mutex_set_context_slowpath(struct ww_mutex *lock,
203 struct ww_acquire_ctx *ctx)
204{
205 struct mutex_waiter *cur;
206
207 ww_mutex_lock_acquired(lock, ctx);
208 lock->ctx = ctx;
209
210 /*
211 * Give any possible sleeping processes the chance to wake up,
212 * so they can recheck if they have to back off.
213 */
214 list_for_each_entry(cur, &lock->base.wait_list, list) {
215 debug_mutex_wake_waiter(&lock->base, cur);
216 wake_up_process(cur->task);
217 }
218}
Davidlohr Bueso76916512014-07-30 13:41:53 -0700219
Waiman Long41fcb9f2013-04-17 15:23:11 -0400220#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Waiman Long41fcb9f2013-04-17 15:23:11 -0400221/*
222 * Look out! "owner" is an entirely speculative pointer
223 * access and not reliable.
224 */
225static noinline
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800226bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
Waiman Long41fcb9f2013-04-17 15:23:11 -0400227{
Jason Low01ac33c2015-04-08 12:39:19 -0700228 bool ret = true;
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800229
Waiman Long41fcb9f2013-04-17 15:23:11 -0400230 rcu_read_lock();
Jason Low01ac33c2015-04-08 12:39:19 -0700231 while (lock->owner == owner) {
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800232 /*
233 * Ensure we emit the owner->on_cpu, dereference _after_
Jason Low01ac33c2015-04-08 12:39:19 -0700234 * checking lock->owner still matches owner. If that fails,
235 * owner might point to freed memory. If it still matches,
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800236 * the rcu_read_lock() ensures the memory stays valid.
237 */
238 barrier();
239
240 if (!owner->on_cpu || need_resched()) {
241 ret = false;
242 break;
243 }
Waiman Long41fcb9f2013-04-17 15:23:11 -0400244
Davidlohr Bueso3a6bfbc2014-06-29 15:09:33 -0700245 cpu_relax_lowlatency();
Waiman Long41fcb9f2013-04-17 15:23:11 -0400246 }
247 rcu_read_unlock();
248
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800249 return ret;
Waiman Long41fcb9f2013-04-17 15:23:11 -0400250}
Waiman Long2bd2c922013-04-17 15:23:13 -0400251
252/*
253 * Initial check for entering the mutex spinning loop
254 */
255static inline int mutex_can_spin_on_owner(struct mutex *lock)
256{
Peter Zijlstra1e40c2e2013-07-19 20:31:01 +0200257 struct task_struct *owner;
Waiman Long2bd2c922013-04-17 15:23:13 -0400258 int retval = 1;
259
Jason Low46af29e2014-01-28 11:13:12 -0800260 if (need_resched())
261 return 0;
262
Waiman Long2bd2c922013-04-17 15:23:13 -0400263 rcu_read_lock();
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800264 owner = READ_ONCE(lock->owner);
Peter Zijlstra1e40c2e2013-07-19 20:31:01 +0200265 if (owner)
266 retval = owner->on_cpu;
Waiman Long2bd2c922013-04-17 15:23:13 -0400267 rcu_read_unlock();
268 /*
269 * if lock->owner is not set, the mutex owner may have just acquired
270 * it and not set the owner yet or the mutex has been released.
271 */
272 return retval;
273}
Davidlohr Bueso76916512014-07-30 13:41:53 -0700274
275/*
276 * Atomically try to take the lock when it is available
277 */
278static inline bool mutex_try_to_acquire(struct mutex *lock)
279{
280 return !mutex_is_locked(lock) &&
Davidlohr Bueso81a43ad2015-09-30 13:03:12 -0700281 (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
Davidlohr Bueso76916512014-07-30 13:41:53 -0700282}
283
284/*
285 * Optimistic spinning.
286 *
287 * We try to spin for acquisition when we find that the lock owner
288 * is currently running on a (different) CPU and while we don't
289 * need to reschedule. The rationale is that if the lock owner is
290 * running, it is likely to release the lock soon.
291 *
292 * Since this needs the lock owner, and this mutex implementation
293 * doesn't track the owner atomically in the lock field, we need to
294 * track it non-atomically.
295 *
296 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
297 * to serialize everything.
298 *
299 * The mutex spinners are queued up using MCS lock so that only one
300 * spinner can compete for the mutex. However, if mutex spinning isn't
301 * going to happen, there is no point in going through the lock/unlock
302 * overhead.
303 *
304 * Returns true when the lock was taken, otherwise false, indicating
305 * that we need to jump to the slowpath and sleep.
306 */
307static bool mutex_optimistic_spin(struct mutex *lock,
308 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
309{
310 struct task_struct *task = current;
311
312 if (!mutex_can_spin_on_owner(lock))
313 goto done;
314
Davidlohr Buesoe42f6782015-01-06 11:45:05 -0800315 /*
316 * In order to avoid a stampede of mutex spinners trying to
317 * acquire the mutex all at once, the spinners need to take a
318 * MCS (queued) lock first before spinning on the owner field.
319 */
Davidlohr Bueso76916512014-07-30 13:41:53 -0700320 if (!osq_lock(&lock->osq))
321 goto done;
322
323 while (true) {
324 struct task_struct *owner;
325
326 if (use_ww_ctx && ww_ctx->acquired > 0) {
327 struct ww_mutex *ww;
328
329 ww = container_of(lock, struct ww_mutex, base);
330 /*
331 * If ww->ctx is set the contents are undefined, only
332 * by acquiring wait_lock there is a guarantee that
333 * they are not invalid when reading.
334 *
335 * As such, when deadlock detection needs to be
336 * performed the optimistic spinning cannot be done.
337 */
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800338 if (READ_ONCE(ww->ctx))
Davidlohr Bueso76916512014-07-30 13:41:53 -0700339 break;
340 }
341
342 /*
343 * If there's an owner, wait for it to either
344 * release the lock or go to sleep.
345 */
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800346 owner = READ_ONCE(lock->owner);
Davidlohr Bueso76916512014-07-30 13:41:53 -0700347 if (owner && !mutex_spin_on_owner(lock, owner))
348 break;
349
350 /* Try to acquire the mutex if it is unlocked. */
351 if (mutex_try_to_acquire(lock)) {
352 lock_acquired(&lock->dep_map, ip);
353
354 if (use_ww_ctx) {
355 struct ww_mutex *ww;
356 ww = container_of(lock, struct ww_mutex, base);
357
358 ww_mutex_set_context_fastpath(ww, ww_ctx);
359 }
360
361 mutex_set_owner(lock);
362 osq_unlock(&lock->osq);
363 return true;
364 }
365
366 /*
367 * When there's no owner, we might have preempted between the
368 * owner acquiring the lock and setting the owner field. If
369 * we're an RT task that will live-lock because we won't let
370 * the owner complete.
371 */
372 if (!owner && (need_resched() || rt_task(task)))
373 break;
374
375 /*
376 * The cpu_relax() call is a compiler barrier which forces
377 * everything in this loop to be re-loaded. We don't need
378 * memory barriers as we'll eventually observe the right
379 * values at the cost of a few extra spins.
380 */
381 cpu_relax_lowlatency();
Riley Andrewsc8de3f42015-08-10 14:02:29 -0700382
383 /*
384 * On arm systems, we must slow down the waiter's repeated
385 * aquisition of spin_mlock and atomics on the lock count, or
386 * we risk starving out a thread attempting to release the
387 * mutex. The mutex slowpath release must take spin lock
388 * wait_lock. This spin lock can share a monitor with the
389 * other waiter atomics in the mutex data structure, so must
390 * take care to rate limit the waiters.
391 */
392 udelay(1);
Davidlohr Bueso76916512014-07-30 13:41:53 -0700393 }
394
395 osq_unlock(&lock->osq);
396done:
397 /*
398 * If we fell out of the spin path because of need_resched(),
399 * reschedule now, before we try-lock the mutex. This avoids getting
400 * scheduled out right after we obtained the mutex.
401 */
Peter Zijlstra6f942a12014-09-24 10:18:46 +0200402 if (need_resched()) {
403 /*
404 * We _should_ have TASK_RUNNING here, but just in case
405 * we do not, make it so, otherwise we might get stuck.
406 */
407 __set_current_state(TASK_RUNNING);
Davidlohr Bueso76916512014-07-30 13:41:53 -0700408 schedule_preempt_disabled();
Peter Zijlstra6f942a12014-09-24 10:18:46 +0200409 }
Davidlohr Bueso76916512014-07-30 13:41:53 -0700410
411 return false;
412}
413#else
414static bool mutex_optimistic_spin(struct mutex *lock,
415 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
416{
417 return false;
418}
Waiman Long41fcb9f2013-04-17 15:23:11 -0400419#endif
420
Andi Kleen22d9fd32014-02-08 08:52:03 +0100421__visible __used noinline
422void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800423
Randy Dunlapef5dc122010-09-02 15:48:16 -0700424/**
Ingo Molnar6053ee32006-01-09 15:59:19 -0800425 * mutex_unlock - release the mutex
426 * @lock: the mutex to be released
427 *
428 * Unlock a mutex that has been locked by this task previously.
429 *
430 * This function must not be used in interrupt context. Unlocking
431 * of a not locked mutex is not allowed.
432 *
433 * This function is similar to (but not equivalent to) up().
434 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800435void __sched mutex_unlock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800436{
437 /*
438 * The unlocking fastpath is the 0->1 transition from 'locked'
439 * into 'unlocked' state:
Ingo Molnar6053ee32006-01-09 15:59:19 -0800440 */
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100441#ifndef CONFIG_DEBUG_MUTEXES
442 /*
443 * When debugging is enabled we must not clear the owner before time,
444 * the slow path will always be taken, and that clears the owner field
445 * after verifying that it was indeed current.
446 */
447 mutex_clear_owner(lock);
448#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800449 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
450}
451
452EXPORT_SYMBOL(mutex_unlock);
453
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200454/**
455 * ww_mutex_unlock - release the w/w mutex
456 * @lock: the mutex to be released
457 *
458 * Unlock a mutex that has been locked by this task previously with any of the
459 * ww_mutex_lock* functions (with or without an acquire context). It is
460 * forbidden to release the locks after releasing the acquire context.
461 *
462 * This function must not be used in interrupt context. Unlocking
463 * of a unlocked mutex is not allowed.
464 */
465void __sched ww_mutex_unlock(struct ww_mutex *lock)
466{
467 /*
468 * The unlocking fastpath is the 0->1 transition from 'locked'
469 * into 'unlocked' state:
470 */
471 if (lock->ctx) {
472#ifdef CONFIG_DEBUG_MUTEXES
473 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
474#endif
475 if (lock->ctx->acquired > 0)
476 lock->ctx->acquired--;
477 lock->ctx = NULL;
478 }
479
480#ifndef CONFIG_DEBUG_MUTEXES
481 /*
482 * When debugging is enabled we must not clear the owner before time,
483 * the slow path will always be taken, and that clears the owner field
484 * after verifying that it was indeed current.
485 */
486 mutex_clear_owner(&lock->base);
487#endif
488 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
489}
490EXPORT_SYMBOL(ww_mutex_unlock);
491
492static inline int __sched
Davidlohr Bueso63dc47e2015-01-06 11:45:04 -0800493__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200494{
495 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800496 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200497
498 if (!hold_ctx)
499 return 0;
500
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200501 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
502 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
503#ifdef CONFIG_DEBUG_MUTEXES
504 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
505 ctx->contending_lock = ww;
506#endif
507 return -EDEADLK;
508 }
509
510 return 0;
511}
512
Ingo Molnar6053ee32006-01-09 15:59:19 -0800513/*
514 * Lock a mutex (possibly interruptible), slowpath:
515 */
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200516static __always_inline int __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200517__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200518 struct lockdep_map *nest_lock, unsigned long ip,
Tetsuo Handab0267502013-10-17 19:45:29 +0900519 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800520{
521 struct task_struct *task = current;
522 struct mutex_waiter waiter;
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700523 unsigned long flags;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200524 int ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800525
Chris Wilson0422e832016-05-26 21:08:17 +0100526 if (use_ww_ctx) {
527 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
528 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
529 return -EALREADY;
530 }
531
Peter Zijlstra41719b02009-01-14 15:36:26 +0100532 preempt_disable();
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700533 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
Frederic Weisbeckerc0226022009-12-02 20:49:16 +0100534
Davidlohr Bueso76916512014-07-30 13:41:53 -0700535 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
536 /* got the lock, yay! */
537 preempt_enable();
538 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100539 }
Davidlohr Bueso76916512014-07-30 13:41:53 -0700540
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700541 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800542
Jason Low1e820c92014-06-11 11:37:21 -0700543 /*
544 * Once more, try to acquire the lock. Only try-lock the mutex if
Jason Low0d968dd2014-06-11 11:37:22 -0700545 * it is unlocked to reduce unnecessary xchg() operations.
Jason Low1e820c92014-06-11 11:37:21 -0700546 */
Davidlohr Bueso81a43ad2015-09-30 13:03:12 -0700547 if (!mutex_is_locked(lock) &&
548 (atomic_xchg_acquire(&lock->count, 0) == 1))
Davidlohr Buesoec83f422013-06-28 13:13:18 -0700549 goto skip_wait;
550
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700551 debug_mutex_lock_common(lock, &waiter);
Linus Torvalds6720a302016-06-23 12:11:17 -0700552 debug_mutex_add_waiter(lock, &waiter, task);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800553
554 /* add waiting tasks to the end of the waitqueue (FIFO): */
555 list_add_tail(&waiter.list, &lock->wait_list);
556 waiter.task = task;
557
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200558 lock_contended(&lock->dep_map, ip);
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700559
Ingo Molnar6053ee32006-01-09 15:59:19 -0800560 for (;;) {
561 /*
562 * Lets try to take the lock again - this is needed even if
563 * we get here for the first time (shortly after failing to
564 * acquire the lock), to make sure that we get a wakeup once
565 * it's unlocked. Later on, if we sleep, this is the
566 * operation that gives us the lock. We xchg it to -1, so
567 * that when we release the lock, we properly wake up the
Jason Low1e820c92014-06-11 11:37:21 -0700568 * other waiters. We only attempt the xchg if the count is
569 * non-negative in order to avoid unnecessary xchg operations:
Ingo Molnar6053ee32006-01-09 15:59:19 -0800570 */
Jason Low1e820c92014-06-11 11:37:21 -0700571 if (atomic_read(&lock->count) >= 0 &&
Davidlohr Bueso81a43ad2015-09-30 13:03:12 -0700572 (atomic_xchg_acquire(&lock->count, -1) == 1))
Ingo Molnar6053ee32006-01-09 15:59:19 -0800573 break;
574
575 /*
576 * got a signal? (This code gets eliminated in the
577 * TASK_UNINTERRUPTIBLE case.)
578 */
Oleg Nesterov6ad36762008-06-08 21:20:42 +0400579 if (unlikely(signal_pending_state(state, task))) {
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200580 ret = -EINTR;
581 goto err;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800582 }
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200583
Tetsuo Handab0267502013-10-17 19:45:29 +0900584 if (use_ww_ctx && ww_ctx->acquired > 0) {
Davidlohr Bueso63dc47e2015-01-06 11:45:04 -0800585 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200586 if (ret)
587 goto err;
588 }
589
Ingo Molnar6053ee32006-01-09 15:59:19 -0800590 __set_task_state(task, state);
591
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300592 /* didn't get the lock, go to sleep: */
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700593 spin_unlock_mutex(&lock->wait_lock, flags);
Thomas Gleixnerbd2f5532011-03-21 12:33:18 +0100594 schedule_preempt_disabled();
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700595 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800596 }
Davidlohr Bueso51587bc2015-01-19 17:39:21 -0800597 __set_task_state(task, TASK_RUNNING);
598
Linus Torvalds6720a302016-06-23 12:11:17 -0700599 mutex_remove_waiter(lock, &waiter, task);
Davidlohr Buesoec83f422013-06-28 13:13:18 -0700600 /* set it to 0 if there are no waiters left: */
601 if (likely(list_empty(&lock->wait_list)))
602 atomic_set(&lock->count, 0);
603 debug_mutex_free_waiter(&waiter);
604
605skip_wait:
606 /* got the lock - cleanup and rejoice! */
607 lock_acquired(&lock->dep_map, ip);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100608 mutex_set_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800609
Tetsuo Handab0267502013-10-17 19:45:29 +0900610 if (use_ww_ctx) {
Davidlohr Buesoec83f422013-06-28 13:13:18 -0700611 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
Davidlohr Bueso4bd19082015-01-06 11:45:06 -0800612 ww_mutex_set_context_slowpath(ww, ww_ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200613 }
614
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700615 spin_unlock_mutex(&lock->wait_lock, flags);
Peter Zijlstra41719b02009-01-14 15:36:26 +0100616 preempt_enable();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800617 return 0;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200618
619err:
Linus Torvalds6720a302016-06-23 12:11:17 -0700620 mutex_remove_waiter(lock, &waiter, task);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200621 spin_unlock_mutex(&lock->wait_lock, flags);
622 debug_mutex_free_waiter(&waiter);
623 mutex_release(&lock->dep_map, 1, ip);
624 preempt_enable();
625 return ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800626}
627
Ingo Molnaref5d4702006-07-03 00:24:55 -0700628#ifdef CONFIG_DEBUG_LOCK_ALLOC
629void __sched
630mutex_lock_nested(struct mutex *lock, unsigned int subclass)
631{
632 might_sleep();
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200633 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900634 subclass, NULL, _RET_IP_, NULL, 0);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700635}
636
637EXPORT_SYMBOL_GPL(mutex_lock_nested);
NeilBrownd63a5a72006-12-08 02:36:17 -0800638
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700639void __sched
640_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
641{
642 might_sleep();
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200643 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900644 0, nest, _RET_IP_, NULL, 0);
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700645}
646
647EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
648
NeilBrownd63a5a72006-12-08 02:36:17 -0800649int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500650mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
651{
652 might_sleep();
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200653 return __mutex_lock_common(lock, TASK_KILLABLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900654 subclass, NULL, _RET_IP_, NULL, 0);
Liam R. Howlettad776532007-12-06 17:37:59 -0500655}
656EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
657
658int __sched
NeilBrownd63a5a72006-12-08 02:36:17 -0800659mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
660{
661 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100662 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900663 subclass, NULL, _RET_IP_, NULL, 0);
NeilBrownd63a5a72006-12-08 02:36:17 -0800664}
665
666EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200667
Daniel Vetter23010022013-06-20 13:31:17 +0200668static inline int
669ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
670{
671#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
672 unsigned tmp;
673
674 if (ctx->deadlock_inject_countdown-- == 0) {
675 tmp = ctx->deadlock_inject_interval;
676 if (tmp > UINT_MAX/4)
677 tmp = UINT_MAX;
678 else
679 tmp = tmp*2 + tmp + tmp/2;
680
681 ctx->deadlock_inject_interval = tmp;
682 ctx->deadlock_inject_countdown = tmp;
683 ctx->contending_lock = lock;
684
685 ww_mutex_unlock(lock);
686
687 return -EDEADLK;
688 }
689#endif
690
691 return 0;
692}
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200693
694int __sched
695__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
696{
Daniel Vetter23010022013-06-20 13:31:17 +0200697 int ret;
698
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200699 might_sleep();
Daniel Vetter23010022013-06-20 13:31:17 +0200700 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900701 0, &ctx->dep_map, _RET_IP_, ctx, 1);
Maarten Lankhorst85f48962013-07-30 10:13:41 +0200702 if (!ret && ctx->acquired > 1)
Daniel Vetter23010022013-06-20 13:31:17 +0200703 return ww_mutex_deadlock_injection(lock, ctx);
704
705 return ret;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200706}
707EXPORT_SYMBOL_GPL(__ww_mutex_lock);
708
709int __sched
710__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
711{
Daniel Vetter23010022013-06-20 13:31:17 +0200712 int ret;
713
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200714 might_sleep();
Daniel Vetter23010022013-06-20 13:31:17 +0200715 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900716 0, &ctx->dep_map, _RET_IP_, ctx, 1);
Daniel Vetter23010022013-06-20 13:31:17 +0200717
Maarten Lankhorst85f48962013-07-30 10:13:41 +0200718 if (!ret && ctx->acquired > 1)
Daniel Vetter23010022013-06-20 13:31:17 +0200719 return ww_mutex_deadlock_injection(lock, ctx);
720
721 return ret;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200722}
723EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
724
Ingo Molnaref5d4702006-07-03 00:24:55 -0700725#endif
726
Ingo Molnar6053ee32006-01-09 15:59:19 -0800727/*
728 * Release the lock, slowpath:
729 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800730static inline void
Davidlohr Bueso242489c2014-07-30 13:41:50 -0700731__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800732{
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700733 unsigned long flags;
Davidlohr Bueso1329ce62016-01-24 18:23:43 -0800734 WAKE_Q(wake_q);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800735
Ingo Molnar6053ee32006-01-09 15:59:19 -0800736 /*
Davidlohr Bueso42fa5662014-07-30 13:41:51 -0700737 * As a performance measurement, release the lock before doing other
738 * wakeup related duties to follow. This allows other tasks to acquire
739 * the lock sooner, while still handling cleanups in past unlock calls.
740 * This can be done as we do not enforce strict equivalence between the
741 * mutex counter and wait_list.
742 *
743 *
744 * Some architectures leave the lock unlocked in the fastpath failure
Ingo Molnar6053ee32006-01-09 15:59:19 -0800745 * case, others need to leave it locked. In the later case we have to
Davidlohr Bueso42fa5662014-07-30 13:41:51 -0700746 * unlock it here - as the lock counter is currently 0 or negative.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800747 */
748 if (__mutex_slowpath_needs_to_unlock())
749 atomic_set(&lock->count, 1);
750
Jason Low1d8fe7d2014-01-28 11:13:14 -0800751 spin_lock_mutex(&lock->wait_lock, flags);
752 mutex_release(&lock->dep_map, nested, _RET_IP_);
753 debug_mutex_unlock(lock);
754
Ingo Molnar6053ee32006-01-09 15:59:19 -0800755 if (!list_empty(&lock->wait_list)) {
756 /* get the first entry from the wait-list: */
757 struct mutex_waiter *waiter =
758 list_entry(lock->wait_list.next,
759 struct mutex_waiter, list);
760
761 debug_mutex_wake_waiter(lock, waiter);
Davidlohr Bueso1329ce62016-01-24 18:23:43 -0800762 wake_q_add(&wake_q, waiter->task);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800763 }
764
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700765 spin_unlock_mutex(&lock->wait_lock, flags);
Davidlohr Bueso1329ce62016-01-24 18:23:43 -0800766 wake_up_q(&wake_q);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800767}
768
769/*
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700770 * Release the lock, slowpath:
771 */
Andi Kleen22d9fd32014-02-08 08:52:03 +0100772__visible void
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700773__mutex_unlock_slowpath(atomic_t *lock_count)
774{
Davidlohr Bueso242489c2014-07-30 13:41:50 -0700775 struct mutex *lock = container_of(lock_count, struct mutex, count);
776
777 __mutex_unlock_common_slowpath(lock, 1);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700778}
779
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200780#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700781/*
Ingo Molnar6053ee32006-01-09 15:59:19 -0800782 * Here come the less common (and hence less performance-critical) APIs:
783 * mutex_lock_interruptible() and mutex_trylock().
784 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800785static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200786__mutex_lock_killable_slowpath(struct mutex *lock);
Liam R. Howlettad776532007-12-06 17:37:59 -0500787
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800788static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200789__mutex_lock_interruptible_slowpath(struct mutex *lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800790
Randy Dunlapef5dc122010-09-02 15:48:16 -0700791/**
792 * mutex_lock_interruptible - acquire the mutex, interruptible
Ingo Molnar6053ee32006-01-09 15:59:19 -0800793 * @lock: the mutex to be acquired
794 *
795 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
796 * been acquired or sleep until the mutex becomes available. If a
797 * signal arrives while waiting for the lock then this function
798 * returns -EINTR.
799 *
800 * This function is similar to (but not equivalent to) down_interruptible().
801 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800802int __sched mutex_lock_interruptible(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800803{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100804 int ret;
805
Ingo Molnarc544bdb2006-01-10 22:10:36 +0100806 might_sleep();
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200807 ret = __mutex_fastpath_lock_retval(&lock->count);
808 if (likely(!ret)) {
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100809 mutex_set_owner(lock);
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200810 return 0;
811 } else
812 return __mutex_lock_interruptible_slowpath(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800813}
814
815EXPORT_SYMBOL(mutex_lock_interruptible);
816
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800817int __sched mutex_lock_killable(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500818{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100819 int ret;
820
Liam R. Howlettad776532007-12-06 17:37:59 -0500821 might_sleep();
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200822 ret = __mutex_fastpath_lock_retval(&lock->count);
823 if (likely(!ret)) {
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100824 mutex_set_owner(lock);
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200825 return 0;
826 } else
827 return __mutex_lock_killable_slowpath(lock);
Liam R. Howlettad776532007-12-06 17:37:59 -0500828}
829EXPORT_SYMBOL(mutex_lock_killable);
830
Andi Kleen22d9fd32014-02-08 08:52:03 +0100831__visible void __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200832__mutex_lock_slowpath(atomic_t *lock_count)
833{
834 struct mutex *lock = container_of(lock_count, struct mutex, count);
835
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200836 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900837 NULL, _RET_IP_, NULL, 0);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200838}
839
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800840static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200841__mutex_lock_killable_slowpath(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500842{
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200843 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900844 NULL, _RET_IP_, NULL, 0);
Liam R. Howlettad776532007-12-06 17:37:59 -0500845}
846
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800847static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200848__mutex_lock_interruptible_slowpath(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800849{
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200850 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900851 NULL, _RET_IP_, NULL, 0);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800852}
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200853
854static noinline int __sched
855__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
856{
857 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900858 NULL, _RET_IP_, ctx, 1);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200859}
860
861static noinline int __sched
862__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
863 struct ww_acquire_ctx *ctx)
864{
865 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900866 NULL, _RET_IP_, ctx, 1);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200867}
868
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200869#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800870
871/*
872 * Spinlock based trylock, we take the spinlock and check whether we
873 * can get the lock:
874 */
875static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
876{
877 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700878 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800879 int prev;
880
Jason Low72d53052014-06-11 11:37:23 -0700881 /* No need to trylock if the mutex is locked. */
882 if (mutex_is_locked(lock))
883 return 0;
884
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700885 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800886
Davidlohr Bueso81a43ad2015-09-30 13:03:12 -0700887 prev = atomic_xchg_acquire(&lock->count, -1);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700888 if (likely(prev == 1)) {
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100889 mutex_set_owner(lock);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700890 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
891 }
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100892
Ingo Molnar6053ee32006-01-09 15:59:19 -0800893 /* Set it back to 0 if there are no waiters: */
894 if (likely(list_empty(&lock->wait_list)))
895 atomic_set(&lock->count, 0);
896
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700897 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800898
899 return prev == 1;
900}
901
Randy Dunlapef5dc122010-09-02 15:48:16 -0700902/**
903 * mutex_trylock - try to acquire the mutex, without waiting
Ingo Molnar6053ee32006-01-09 15:59:19 -0800904 * @lock: the mutex to be acquired
905 *
906 * Try to acquire the mutex atomically. Returns 1 if the mutex
907 * has been acquired successfully, and 0 on contention.
908 *
909 * NOTE: this function follows the spin_trylock() convention, so
Randy Dunlapef5dc122010-09-02 15:48:16 -0700910 * it is negated from the down_trylock() return values! Be careful
Ingo Molnar6053ee32006-01-09 15:59:19 -0800911 * about this when converting semaphore users to mutexes.
912 *
913 * This function must not be used in interrupt context. The
914 * mutex must be released by the same task that acquired it.
915 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800916int __sched mutex_trylock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800917{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100918 int ret;
919
920 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
921 if (ret)
922 mutex_set_owner(lock);
923
924 return ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800925}
Ingo Molnar6053ee32006-01-09 15:59:19 -0800926EXPORT_SYMBOL(mutex_trylock);
Andrew Mortona511e3f2009-04-29 15:59:58 -0700927
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200928#ifndef CONFIG_DEBUG_LOCK_ALLOC
929int __sched
930__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
931{
932 int ret;
933
934 might_sleep();
935
936 ret = __mutex_fastpath_lock_retval(&lock->base.count);
937
938 if (likely(!ret)) {
939 ww_mutex_set_context_fastpath(lock, ctx);
940 mutex_set_owner(&lock->base);
941 } else
942 ret = __ww_mutex_lock_slowpath(lock, ctx);
943 return ret;
944}
945EXPORT_SYMBOL(__ww_mutex_lock);
946
947int __sched
948__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
949{
950 int ret;
951
952 might_sleep();
953
954 ret = __mutex_fastpath_lock_retval(&lock->base.count);
955
956 if (likely(!ret)) {
957 ww_mutex_set_context_fastpath(lock, ctx);
958 mutex_set_owner(&lock->base);
959 } else
960 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
961 return ret;
962}
963EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
964
965#endif
966
Andrew Mortona511e3f2009-04-29 15:59:58 -0700967/**
968 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
969 * @cnt: the atomic which we are to dec
970 * @lock: the mutex to return holding if we dec to 0
971 *
972 * return true and hold lock if we dec to 0, return false otherwise
973 */
974int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
975{
976 /* dec if we can't possibly hit 0 */
977 if (atomic_add_unless(cnt, -1, 1))
978 return 0;
979 /* we might hit 0, so take the lock */
980 mutex_lock(lock);
981 if (!atomic_dec_and_test(cnt)) {
982 /* when we actually did the dec, we didn't hit 0 */
983 mutex_unlock(lock);
984 return 0;
985 }
986 /* we hit 0, and we hold the lock */
987 return 1;
988}
989EXPORT_SYMBOL(atomic_dec_and_mutex_lock);