blob: fbee74b15782752b04754c2999d6f0d149e4fbb8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Tony Luck7f304912008-08-01 10:13:32 -07002 * R/W semaphores for ia64
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
Christoph Lameter16592d22005-08-22 12:20:00 -07006 * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Based on asm-i386/rwsem.h and other architecture implementation.
9 *
10 * The MSW of the count is the negated number of active writers and
11 * waiting lockers, and the LSW is the total number of active locks.
12 *
13 * The lock count is initialized to 0 (no active and no waiting lockers).
14 *
Christoph Lameter16592d22005-08-22 12:20:00 -070015 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
16 * the case of an uncontended lock. Readers increment by 1 and see a positive
17 * value when uncontended, negative if there are writers (and maybe) readers
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * waiting (in which case it goes to sleep).
19 */
20
21#ifndef _ASM_IA64_RWSEM_H
22#define _ASM_IA64_RWSEM_H
23
Robert P. J. Daybd807f92007-07-18 07:33:08 -040024#ifndef _LINUX_RWSEM_H
25#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
26#endif
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/list.h>
29#include <linux/spinlock.h>
30
31#include <asm/intrinsics.h>
32
33/*
34 * the semaphore definition
35 */
36struct rw_semaphore {
Christoph Lameter16592d22005-08-22 12:20:00 -070037 signed long count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 spinlock_t wait_lock;
39 struct list_head wait_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040};
41
Christoph Lameter16592d22005-08-22 12:20:00 -070042#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
43#define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001)
44#define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff)
45#define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
Ingo Molnar61f4c3d2006-07-03 00:24:29 -070051 LIST_HEAD_INIT((name).wait_list) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53#define DECLARE_RWSEM(name) \
54 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
55
56extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
57extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
58extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
59extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
60
61static inline void
62init_rwsem (struct rw_semaphore *sem)
63{
64 sem->count = RWSEM_UNLOCKED_VALUE;
65 spin_lock_init(&sem->wait_lock);
66 INIT_LIST_HEAD(&sem->wait_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067}
68
69/*
70 * lock for reading
71 */
72static inline void
73__down_read (struct rw_semaphore *sem)
74{
Christoph Lameter16592d22005-08-22 12:20:00 -070075 long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77 if (result < 0)
78 rwsem_down_read_failed(sem);
79}
80
81/*
82 * lock for writing
83 */
84static inline void
85__down_write (struct rw_semaphore *sem)
86{
Christoph Lameter16592d22005-08-22 12:20:00 -070087 long old, new;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 do {
90 old = sem->count;
91 new = old + RWSEM_ACTIVE_WRITE_BIAS;
92 } while (cmpxchg_acq(&sem->count, old, new) != old);
93
94 if (old != 0)
95 rwsem_down_write_failed(sem);
96}
97
98/*
99 * unlock after reading
100 */
101static inline void
102__up_read (struct rw_semaphore *sem)
103{
Christoph Lameter16592d22005-08-22 12:20:00 -0700104 long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
107 rwsem_wake(sem);
108}
109
110/*
111 * unlock after writing
112 */
113static inline void
114__up_write (struct rw_semaphore *sem)
115{
Christoph Lameter16592d22005-08-22 12:20:00 -0700116 long old, new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118 do {
119 old = sem->count;
120 new = old - RWSEM_ACTIVE_WRITE_BIAS;
121 } while (cmpxchg_rel(&sem->count, old, new) != old);
122
123 if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
124 rwsem_wake(sem);
125}
126
127/*
128 * trylock for reading -- returns 1 if successful, 0 if contention
129 */
130static inline int
131__down_read_trylock (struct rw_semaphore *sem)
132{
Christoph Lameter16592d22005-08-22 12:20:00 -0700133 long tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 while ((tmp = sem->count) >= 0) {
135 if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
136 return 1;
137 }
138 }
139 return 0;
140}
141
142/*
143 * trylock for writing -- returns 1 if successful, 0 if contention
144 */
145static inline int
146__down_write_trylock (struct rw_semaphore *sem)
147{
Christoph Lameter16592d22005-08-22 12:20:00 -0700148 long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 RWSEM_ACTIVE_WRITE_BIAS);
150 return tmp == RWSEM_UNLOCKED_VALUE;
151}
152
153/*
154 * downgrade write lock to read lock
155 */
156static inline void
157__downgrade_write (struct rw_semaphore *sem)
158{
Christoph Lameter16592d22005-08-22 12:20:00 -0700159 long old, new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 do {
162 old = sem->count;
163 new = old - RWSEM_WAITING_BIAS;
164 } while (cmpxchg_rel(&sem->count, old, new) != old);
165
166 if (old < 0)
167 rwsem_downgrade_wake(sem);
168}
169
170/*
171 * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
172 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
173 */
Christoph Lameter16592d22005-08-22 12:20:00 -0700174#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
175#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Rik Van Rieleb92f4e2005-10-29 18:15:44 -0700177static inline int rwsem_is_locked(struct rw_semaphore *sem)
178{
179 return (sem->count != 0);
180}
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182#endif /* _ASM_IA64_RWSEM_H */