blob: fd31a474145d01623e58b1f692604447fd81dab1 [file] [log] [blame]
Waiman Longa33fda32015-04-24 14:56:30 -04001/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
17 *
18 * Authors: Waiman Long <waiman.long@hp.com>
19 * Peter Zijlstra <peterz@infradead.org>
20 */
21#include <linux/smp.h>
22#include <linux/bug.h>
23#include <linux/cpumask.h>
24#include <linux/percpu.h>
25#include <linux/hardirq.h>
26#include <linux/mutex.h>
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040027#include <asm/byteorder.h>
Waiman Longa33fda32015-04-24 14:56:30 -040028#include <asm/qspinlock.h>
29
30/*
31 * The basic principle of a queue-based spinlock can best be understood
32 * by studying a classic queue-based spinlock implementation called the
33 * MCS lock. The paper below provides a good description for this kind
34 * of lock.
35 *
36 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
37 *
38 * This queued spinlock implementation is based on the MCS lock, however to make
39 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
40 * API, we must modify it somehow.
41 *
42 * In particular; where the traditional MCS lock consists of a tail pointer
43 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
44 * unlock the next pending (next->locked), we compress both these: {tail,
45 * next->locked} into a single u32 value.
46 *
47 * Since a spinlock disables recursion of its own context and there is a limit
48 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
49 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
50 * we can encode the tail by combining the 2-bit nesting level with the cpu
51 * number. With one byte for the lock value and 3 bytes for the tail, only a
52 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
53 * we extend it to a full byte to achieve better performance for architectures
54 * that support atomic byte write.
55 *
56 * We also change the first spinner to spin on the lock bit instead of its
57 * node; whereby avoiding the need to carry a node from lock to unlock, and
58 * preserving existing lock API. This also makes the unlock code simpler and
59 * faster.
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040060 *
61 * N.B. The current implementation only supports architectures that allow
62 * atomic operations on smaller 8-bit and 16-bit data types.
63 *
Waiman Longa33fda32015-04-24 14:56:30 -040064 */
65
66#include "mcs_spinlock.h"
67
68/*
69 * Per-CPU queue node structures; we can never have more than 4 nested
70 * contexts: task, softirq, hardirq, nmi.
71 *
72 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
73 */
74static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
75
76/*
77 * We must be able to distinguish between no-tail and the tail at 0:0,
78 * therefore increment the cpu number by one.
79 */
80
81static inline u32 encode_tail(int cpu, int idx)
82{
83 u32 tail;
84
85#ifdef CONFIG_DEBUG_SPINLOCK
86 BUG_ON(idx > 3);
87#endif
88 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
89 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
90
91 return tail;
92}
93
94static inline struct mcs_spinlock *decode_tail(u32 tail)
95{
96 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
97 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
98
99 return per_cpu_ptr(&mcs_nodes[idx], cpu);
100}
101
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400102#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
103
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400104/*
105 * By using the whole 2nd least significant byte for the pending bit, we
106 * can allow better optimization of the lock acquisition for the pending
107 * bit holder.
Waiman Long2c83e8e2015-04-24 14:56:35 -0400108 *
109 * This internal structure is also used by the set_locked function which
110 * is not restricted to _Q_PENDING_BITS == 8.
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400111 */
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400112struct __qspinlock {
113 union {
114 atomic_t val;
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400115#ifdef __LITTLE_ENDIAN
Waiman Long2c83e8e2015-04-24 14:56:35 -0400116 struct {
117 u8 locked;
118 u8 pending;
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400119 };
Waiman Long2c83e8e2015-04-24 14:56:35 -0400120 struct {
121 u16 locked_pending;
122 u16 tail;
123 };
124#else
125 struct {
126 u16 tail;
127 u16 locked_pending;
128 };
129 struct {
130 u8 reserved[2];
131 u8 pending;
132 u8 locked;
133 };
134#endif
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400135 };
136};
137
Waiman Long2c83e8e2015-04-24 14:56:35 -0400138#if _Q_PENDING_BITS == 8
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400139/**
140 * clear_pending_set_locked - take ownership and clear the pending bit.
141 * @lock: Pointer to queued spinlock structure
142 *
143 * *,1,0 -> *,0,1
144 *
145 * Lock stealing is not allowed if this function is used.
146 */
147static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
148{
149 struct __qspinlock *l = (void *)lock;
150
151 WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
152}
153
154/*
155 * xchg_tail - Put in the new queue tail code word & retrieve previous one
156 * @lock : Pointer to queued spinlock structure
157 * @tail : The new queue tail code word
158 * Return: The previous queue tail code word
159 *
160 * xchg(lock, tail)
161 *
162 * p,*,* -> n,*,* ; prev = xchg(lock, node)
163 */
164static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
165{
166 struct __qspinlock *l = (void *)lock;
167
168 return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
169}
170
171#else /* _Q_PENDING_BITS == 8 */
172
Waiman Longa33fda32015-04-24 14:56:30 -0400173/**
Waiman Long6403bd72015-04-24 14:56:33 -0400174 * clear_pending_set_locked - take ownership and clear the pending bit.
175 * @lock: Pointer to queued spinlock structure
176 *
177 * *,1,0 -> *,0,1
178 */
179static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
180{
181 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
182}
183
184/**
185 * xchg_tail - Put in the new queue tail code word & retrieve previous one
186 * @lock : Pointer to queued spinlock structure
187 * @tail : The new queue tail code word
188 * Return: The previous queue tail code word
189 *
190 * xchg(lock, tail)
191 *
192 * p,*,* -> n,*,* ; prev = xchg(lock, node)
193 */
194static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
195{
196 u32 old, new, val = atomic_read(&lock->val);
197
198 for (;;) {
199 new = (val & _Q_LOCKED_PENDING_MASK) | tail;
200 old = atomic_cmpxchg(&lock->val, val, new);
201 if (old == val)
202 break;
203
204 val = old;
205 }
206 return old;
207}
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400208#endif /* _Q_PENDING_BITS == 8 */
Waiman Long6403bd72015-04-24 14:56:33 -0400209
210/**
Waiman Long2c83e8e2015-04-24 14:56:35 -0400211 * set_locked - Set the lock bit and own the lock
212 * @lock: Pointer to queued spinlock structure
213 *
214 * *,*,0 -> *,0,1
215 */
216static __always_inline void set_locked(struct qspinlock *lock)
217{
218 struct __qspinlock *l = (void *)lock;
219
220 WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
221}
222
223/**
Waiman Longa33fda32015-04-24 14:56:30 -0400224 * queued_spin_lock_slowpath - acquire the queued spinlock
225 * @lock: Pointer to queued spinlock structure
226 * @val: Current value of the queued spinlock 32-bit word
227 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400228 * (queue tail, pending bit, lock value)
Waiman Longa33fda32015-04-24 14:56:30 -0400229 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400230 * fast : slow : unlock
231 * : :
232 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
233 * : | ^--------.------. / :
234 * : v \ \ | :
235 * pending : (0,1,1) +--> (0,1,0) \ | :
236 * : | ^--' | | :
237 * : v | | :
238 * uncontended : (n,x,y) +--> (n,0,0) --' | :
239 * queue : | ^--' | :
240 * : v | :
241 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
242 * queue : ^--' :
Waiman Longa33fda32015-04-24 14:56:30 -0400243 */
244void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
245{
246 struct mcs_spinlock *prev, *next, *node;
247 u32 new, old, tail;
248 int idx;
249
250 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
251
Peter Zijlstra (Intel)2aa79af2015-04-24 14:56:36 -0400252 if (virt_queued_spin_lock(lock))
253 return;
254
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400255 /*
256 * wait for in-progress pending->locked hand-overs
257 *
258 * 0,1,0 -> 0,0,1
259 */
260 if (val == _Q_PENDING_VAL) {
261 while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
262 cpu_relax();
263 }
264
265 /*
266 * trylock || pending
267 *
268 * 0,0,0 -> 0,0,1 ; trylock
269 * 0,0,1 -> 0,1,1 ; pending
270 */
271 for (;;) {
272 /*
273 * If we observe any contention; queue.
274 */
275 if (val & ~_Q_LOCKED_MASK)
276 goto queue;
277
278 new = _Q_LOCKED_VAL;
279 if (val == new)
280 new |= _Q_PENDING_VAL;
281
282 old = atomic_cmpxchg(&lock->val, val, new);
283 if (old == val)
284 break;
285
286 val = old;
287 }
288
289 /*
290 * we won the trylock
291 */
292 if (new == _Q_LOCKED_VAL)
293 return;
294
295 /*
296 * we're pending, wait for the owner to go away.
297 *
298 * *,1,1 -> *,1,0
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400299 *
300 * this wait loop must be a load-acquire such that we match the
301 * store-release that clears the locked bit and create lock
302 * sequentiality; this is because not all clear_pending_set_locked()
303 * implementations imply full barriers.
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400304 */
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400305 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400306 cpu_relax();
307
308 /*
309 * take ownership and clear the pending bit.
310 *
311 * *,1,0 -> *,0,1
312 */
Waiman Long6403bd72015-04-24 14:56:33 -0400313 clear_pending_set_locked(lock);
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400314 return;
315
316 /*
317 * End of pending bit optimistic spinning and beginning of MCS
318 * queuing.
319 */
320queue:
Waiman Longa33fda32015-04-24 14:56:30 -0400321 node = this_cpu_ptr(&mcs_nodes[0]);
322 idx = node->count++;
323 tail = encode_tail(smp_processor_id(), idx);
324
325 node += idx;
326 node->locked = 0;
327 node->next = NULL;
328
329 /*
Waiman Long6403bd72015-04-24 14:56:33 -0400330 * We touched a (possibly) cold cacheline in the per-cpu queue node;
331 * attempt the trylock once more in the hope someone let go while we
332 * weren't watching.
333 */
334 if (queued_spin_trylock(lock))
335 goto release;
336
337 /*
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400338 * We have already touched the queueing cacheline; don't bother with
339 * pending stuff.
340 *
Waiman Long6403bd72015-04-24 14:56:33 -0400341 * p,*,* -> n,*,*
Waiman Longa33fda32015-04-24 14:56:30 -0400342 */
Waiman Long6403bd72015-04-24 14:56:33 -0400343 old = xchg_tail(lock, tail);
Waiman Longa33fda32015-04-24 14:56:30 -0400344
345 /*
346 * if there was a previous node; link it and wait until reaching the
347 * head of the waitqueue.
348 */
Waiman Long6403bd72015-04-24 14:56:33 -0400349 if (old & _Q_TAIL_MASK) {
Waiman Longa33fda32015-04-24 14:56:30 -0400350 prev = decode_tail(old);
351 WRITE_ONCE(prev->next, node);
352
353 arch_mcs_spin_lock_contended(&node->locked);
354 }
355
356 /*
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400357 * we're at the head of the waitqueue, wait for the owner & pending to
358 * go away.
Waiman Longa33fda32015-04-24 14:56:30 -0400359 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400360 * *,x,y -> *,0,0
Waiman Long2c83e8e2015-04-24 14:56:35 -0400361 *
362 * this wait loop must use a load-acquire such that we match the
363 * store-release that clears the locked bit and create lock
364 * sequentiality; this is because the set_locked() function below
365 * does not imply a full barrier.
366 *
Waiman Longa33fda32015-04-24 14:56:30 -0400367 */
Waiman Long2c83e8e2015-04-24 14:56:35 -0400368 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
Waiman Longa33fda32015-04-24 14:56:30 -0400369 cpu_relax();
370
371 /*
372 * claim the lock:
373 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400374 * n,0,0 -> 0,0,1 : lock, uncontended
375 * *,0,0 -> *,0,1 : lock, contended
Waiman Long2c83e8e2015-04-24 14:56:35 -0400376 *
377 * If the queue head is the only one in the queue (lock value == tail),
378 * clear the tail code and grab the lock. Otherwise, we only need
379 * to grab the lock.
Waiman Longa33fda32015-04-24 14:56:30 -0400380 */
381 for (;;) {
Waiman Long2c83e8e2015-04-24 14:56:35 -0400382 if (val != tail) {
383 set_locked(lock);
Waiman Longa33fda32015-04-24 14:56:30 -0400384 break;
Waiman Long2c83e8e2015-04-24 14:56:35 -0400385 }
386 old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
387 if (old == val)
388 goto release; /* No contention */
Waiman Longa33fda32015-04-24 14:56:30 -0400389
390 val = old;
391 }
392
393 /*
394 * contended path; wait for next, release.
395 */
Waiman Long2c83e8e2015-04-24 14:56:35 -0400396 while (!(next = READ_ONCE(node->next)))
397 cpu_relax();
Waiman Longa33fda32015-04-24 14:56:30 -0400398
Waiman Long2c83e8e2015-04-24 14:56:35 -0400399 arch_mcs_spin_unlock_contended(&next->locked);
Waiman Longa33fda32015-04-24 14:56:30 -0400400
401release:
402 /*
403 * release the node
404 */
405 this_cpu_dec(mcs_nodes[0].count);
406}
407EXPORT_SYMBOL(queued_spin_lock_slowpath);