blob: e2aadbc7151f4cd69b8745e80a0af403257f1678 [file] [log] [blame]
Waiman Longa33fda32015-04-24 14:56:30 -04001/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18#ifndef __ASM_GENERIC_QSPINLOCK_H
19#define __ASM_GENERIC_QSPINLOCK_H
20
21#include <asm-generic/qspinlock_types.h>
22
23/**
24 * queued_spin_is_locked - is the spinlock locked?
25 * @lock: Pointer to queued spinlock structure
26 * Return: 1 if it is locked, 0 otherwise
27 */
28static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
29{
30 return atomic_read(&lock->val);
31}
32
33/**
34 * queued_spin_value_unlocked - is the spinlock structure unlocked?
35 * @lock: queued spinlock structure
36 * Return: 1 if it is unlocked, 0 otherwise
37 *
38 * N.B. Whenever there are tasks waiting for the lock, it is considered
39 * locked wrt the lockref code to avoid lock stealing by the lockref
40 * code and change things underneath the lock. This also allows some
41 * optimizations to be applied without conflict with lockref.
42 */
43static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
44{
45 return !atomic_read(&lock.val);
46}
47
48/**
49 * queued_spin_is_contended - check if the lock is contended
50 * @lock : Pointer to queued spinlock structure
51 * Return: 1 if lock contended, 0 otherwise
52 */
53static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
54{
55 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
56}
57/**
58 * queued_spin_trylock - try to acquire the queued spinlock
59 * @lock : Pointer to queued spinlock structure
60 * Return: 1 if lock acquired, 0 if failed
61 */
62static __always_inline int queued_spin_trylock(struct qspinlock *lock)
63{
64 if (!atomic_read(&lock->val) &&
65 (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
66 return 1;
67 return 0;
68}
69
70extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
71
72/**
73 * queued_spin_lock - acquire a queued spinlock
74 * @lock: Pointer to queued spinlock structure
75 */
76static __always_inline void queued_spin_lock(struct qspinlock *lock)
77{
78 u32 val;
79
80 val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
81 if (likely(val == 0))
82 return;
83 queued_spin_lock_slowpath(lock, val);
84}
85
86#ifndef queued_spin_unlock
87/**
88 * queued_spin_unlock - release a queued spinlock
89 * @lock : Pointer to queued spinlock structure
90 */
91static __always_inline void queued_spin_unlock(struct qspinlock *lock)
92{
93 /*
94 * smp_mb__before_atomic() in order to guarantee release semantics
95 */
96 smp_mb__before_atomic_dec();
97 atomic_sub(_Q_LOCKED_VAL, &lock->val);
98}
99#endif
100
101/**
102 * queued_spin_unlock_wait - wait until current lock holder releases the lock
103 * @lock : Pointer to queued spinlock structure
104 *
105 * There is a very slight possibility of live-lock if the lockers keep coming
106 * and the waiter is just unfortunate enough to not see any unlock state.
107 */
108static inline void queued_spin_unlock_wait(struct qspinlock *lock)
109{
110 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
111 cpu_relax();
112}
113
Peter Zijlstra43b3f022015-09-04 17:25:23 +0200114#ifndef virt_spin_lock
115static __always_inline bool virt_spin_lock(struct qspinlock *lock)
Peter Zijlstra (Intel)2aa79af2015-04-24 14:56:36 -0400116{
117 return false;
118}
119#endif
120
Waiman Longa33fda32015-04-24 14:56:30 -0400121/*
122 * Initializier
123 */
124#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
125
126/*
127 * Remapping spinlock architecture specific functions to the corresponding
128 * queued spinlock functions.
129 */
130#define arch_spin_is_locked(l) queued_spin_is_locked(l)
131#define arch_spin_is_contended(l) queued_spin_is_contended(l)
132#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
133#define arch_spin_lock(l) queued_spin_lock(l)
134#define arch_spin_trylock(l) queued_spin_trylock(l)
135#define arch_spin_unlock(l) queued_spin_unlock(l)
136#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
137#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
138
139#endif /* __ASM_GENERIC_QSPINLOCK_H */