blob: 0d9848de677d96eb186f71c578dd6031f6c47611 [file] [log] [blame]
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07001#ifndef __LINUX_SPINLOCK_UP_H
2#define __LINUX_SPINLOCK_UP_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
Stephen Rothwelld974d902011-05-20 15:48:17 +10008#include <asm/processor.h> /* for cpu_relax() */
Peter Zijlstra726328d2016-05-26 10:35:03 +02009#include <asm/barrier.h>
Stephen Rothwelld974d902011-05-20 15:48:17 +100010
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070011/*
12 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 *
17 * In the debug case, 1 means unlocked, 0 means locked. (the values
18 * are inverted, to catch initialization bugs)
19 *
Linus Torvalds386afc92013-04-09 10:48:33 -070020 * No atomicity anywhere, we are on UP. However, we still need
21 * the compiler barriers, because we do not want the compiler to
22 * move potentially faulting instructions (notably user accesses)
23 * into the locked sequence, resulting in non-atomic execution.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070024 */
25
26#ifdef CONFIG_DEBUG_SPINLOCK
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010027#define arch_spin_is_locked(x) ((x)->slock == 0)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070028
Peter Zijlstra726328d2016-05-26 10:35:03 +020029static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
30{
31 smp_cond_load_acquire(&lock->slock, VAL);
32}
33
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010034static inline void arch_spin_lock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070035{
36 lock->slock = 0;
Linus Torvalds386afc92013-04-09 10:48:33 -070037 barrier();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070038}
39
40static inline void
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010041arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070042{
43 local_irq_save(flags);
44 lock->slock = 0;
Linus Torvalds386afc92013-04-09 10:48:33 -070045 barrier();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070046}
47
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010048static inline int arch_spin_trylock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070049{
50 char oldval = lock->slock;
51
52 lock->slock = 0;
Linus Torvalds386afc92013-04-09 10:48:33 -070053 barrier();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070054
55 return oldval > 0;
56}
57
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010058static inline void arch_spin_unlock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070059{
Linus Torvalds386afc92013-04-09 10:48:33 -070060 barrier();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070061 lock->slock = 1;
62}
63
64/*
65 * Read-write spinlocks. No debug version.
66 */
Linus Torvalds386afc92013-04-09 10:48:33 -070067#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
68#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
69#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
70#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
71#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
72#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070073
74#else /* DEBUG_SPINLOCK */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010075#define arch_spin_is_locked(lock) ((void)(lock), 0)
Peter Zijlstra726328d2016-05-26 10:35:03 +020076#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0)
Viresh Kumar0a0fca92013-06-04 13:10:24 +053077/* for sched/core.c and kernel_lock.c: */
Linus Torvalds386afc92013-04-09 10:48:33 -070078# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
79# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
80# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
81# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070082#endif /* DEBUG_SPINLOCK */
83
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010084#define arch_spin_is_contended(lock) (((void)(lock), 0))
Nick Piggin95c354f2008-01-30 13:31:20 +010085
Thomas Gleixnere5931942009-12-03 20:08:46 +010086#define arch_read_can_lock(lock) (((void)(lock), 1))
87#define arch_write_can_lock(lock) (((void)(lock), 1))
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070088
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070089#endif /* __LINUX_SPINLOCK_UP_H */