blob: af6e95d0bed6122bf9fd1e4af2bd76a4e2be62b3 [file] [log] [blame]
Linus Torvalds2f4f12e2013-09-02 11:58:20 -07001#include <linux/export.h>
2#include <linux/lockref.h>
3
Linus Torvaldsbc08b442013-09-02 12:12:15 -07004#ifdef CONFIG_CMPXCHG_LOCKREF
5
6/*
Will Deacond2212b42013-09-26 17:27:00 +01007 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
9 */
10#ifndef cmpxchg64_relaxed
11# define cmpxchg64_relaxed cmpxchg64
12#endif
13
14/*
Heiko Carstens491f6f82013-09-23 12:59:56 +020015 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
Linus Torvaldsbc08b442013-09-02 12:12:15 -070023 * Note that the "cmpxchg()" reloads the "old" value for the
24 * failure case.
25 */
26#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
27 struct lockref old; \
28 BUILD_BUG_ON(sizeof(old) != 8); \
29 old.lock_count = ACCESS_ONCE(lockref->lock_count); \
30 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
31 struct lockref new = old, prev = old; \
32 CODE \
Will Deacond2212b42013-09-26 17:27:00 +010033 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
34 old.lock_count, \
35 new.lock_count); \
Linus Torvaldsbc08b442013-09-02 12:12:15 -070036 if (likely(old.lock_count == prev.lock_count)) { \
37 SUCCESS; \
38 } \
Heiko Carstens491f6f82013-09-23 12:59:56 +020039 arch_mutex_cpu_relax(); \
Linus Torvaldsbc08b442013-09-02 12:12:15 -070040 } \
41} while (0)
42
43#else
44
45#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
46
47#endif
48
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070049/**
50 * lockref_get - Increments reference count unconditionally
Linus Torvalds44a0cf92013-09-07 15:30:29 -070051 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070052 *
53 * This operation is only valid if you already hold a reference
54 * to the object, so you know the count cannot be zero.
55 */
56void lockref_get(struct lockref *lockref)
57{
Linus Torvaldsbc08b442013-09-02 12:12:15 -070058 CMPXCHG_LOOP(
59 new.count++;
60 ,
61 return;
62 );
63
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070064 spin_lock(&lockref->lock);
65 lockref->count++;
66 spin_unlock(&lockref->lock);
67}
68EXPORT_SYMBOL(lockref_get);
69
70/**
71 * lockref_get_not_zero - Increments count unless the count is 0
Linus Torvalds44a0cf92013-09-07 15:30:29 -070072 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070073 * Return: 1 if count updated successfully or 0 if count was zero
74 */
75int lockref_get_not_zero(struct lockref *lockref)
76{
Linus Torvaldsbc08b442013-09-02 12:12:15 -070077 int retval;
78
79 CMPXCHG_LOOP(
80 new.count++;
81 if (!old.count)
82 return 0;
83 ,
84 return 1;
85 );
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070086
87 spin_lock(&lockref->lock);
Linus Torvaldsbc08b442013-09-02 12:12:15 -070088 retval = 0;
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070089 if (lockref->count) {
90 lockref->count++;
91 retval = 1;
92 }
93 spin_unlock(&lockref->lock);
94 return retval;
95}
96EXPORT_SYMBOL(lockref_get_not_zero);
97
98/**
99 * lockref_get_or_lock - Increments count unless the count is 0
Linus Torvalds44a0cf92013-09-07 15:30:29 -0700100 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -0700101 * Return: 1 if count updated successfully or 0 if count was zero
102 * and we got the lock instead.
103 */
104int lockref_get_or_lock(struct lockref *lockref)
105{
Linus Torvaldsbc08b442013-09-02 12:12:15 -0700106 CMPXCHG_LOOP(
107 new.count++;
108 if (!old.count)
109 break;
110 ,
111 return 1;
112 );
113
Linus Torvalds2f4f12e2013-09-02 11:58:20 -0700114 spin_lock(&lockref->lock);
115 if (!lockref->count)
116 return 0;
117 lockref->count++;
118 spin_unlock(&lockref->lock);
119 return 1;
120}
121EXPORT_SYMBOL(lockref_get_or_lock);
122
123/**
124 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
Linus Torvalds44a0cf92013-09-07 15:30:29 -0700125 * @lockref: pointer to lockref structure
Linus Torvalds2f4f12e2013-09-02 11:58:20 -0700126 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
127 */
128int lockref_put_or_lock(struct lockref *lockref)
129{
Linus Torvaldsbc08b442013-09-02 12:12:15 -0700130 CMPXCHG_LOOP(
131 new.count--;
132 if (old.count <= 1)
133 break;
134 ,
135 return 1;
136 );
137
Linus Torvalds2f4f12e2013-09-02 11:58:20 -0700138 spin_lock(&lockref->lock);
139 if (lockref->count <= 1)
140 return 0;
141 lockref->count--;
142 spin_unlock(&lockref->lock);
143 return 1;
144}
145EXPORT_SYMBOL(lockref_put_or_lock);
Linus Torvaldse7d33bb2013-09-07 15:49:18 -0700146
147/**
148 * lockref_mark_dead - mark lockref dead
149 * @lockref: pointer to lockref structure
150 */
151void lockref_mark_dead(struct lockref *lockref)
152{
153 assert_spin_locked(&lockref->lock);
154 lockref->count = -128;
155}
Steven Whitehousee66cf162013-10-15 15:18:08 +0100156EXPORT_SYMBOL(lockref_mark_dead);
Linus Torvaldse7d33bb2013-09-07 15:49:18 -0700157
158/**
159 * lockref_get_not_dead - Increments count unless the ref is dead
160 * @lockref: pointer to lockref structure
161 * Return: 1 if count updated successfully or 0 if lockref was dead
162 */
163int lockref_get_not_dead(struct lockref *lockref)
164{
165 int retval;
166
167 CMPXCHG_LOOP(
168 new.count++;
169 if ((int)old.count < 0)
170 return 0;
171 ,
172 return 1;
173 );
174
175 spin_lock(&lockref->lock);
176 retval = 0;
177 if ((int) lockref->count >= 0) {
178 lockref->count++;
179 retval = 1;
180 }
181 spin_unlock(&lockref->lock);
182 return retval;
183}
184EXPORT_SYMBOL(lockref_get_not_dead);