| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Tony Luck | 7f30491 | 2008-08-01 10:13:32 -0700 | [diff] [blame] | 2 |  * R/W semaphores for ia64 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  * | 
 | 4 |  * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> | 
 | 5 |  * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 6 |  * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  * | 
 | 8 |  * Based on asm-i386/rwsem.h and other architecture implementation. | 
 | 9 |  * | 
 | 10 |  * The MSW of the count is the negated number of active writers and | 
 | 11 |  * waiting lockers, and the LSW is the total number of active locks. | 
 | 12 |  * | 
 | 13 |  * The lock count is initialized to 0 (no active and no waiting lockers). | 
 | 14 |  * | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 15 |  * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for | 
 | 16 |  * the case of an uncontended lock. Readers increment by 1 and see a positive | 
 | 17 |  * value when uncontended, negative if there are writers (and maybe) readers | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  * waiting (in which case it goes to sleep). | 
 | 19 |  */ | 
 | 20 |  | 
 | 21 | #ifndef _ASM_IA64_RWSEM_H | 
 | 22 | #define _ASM_IA64_RWSEM_H | 
 | 23 |  | 
| Robert P. J. Day | bd807f9 | 2007-07-18 07:33:08 -0400 | [diff] [blame] | 24 | #ifndef _LINUX_RWSEM_H | 
 | 25 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." | 
 | 26 | #endif | 
 | 27 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/list.h> | 
 | 29 | #include <linux/spinlock.h> | 
 | 30 |  | 
 | 31 | #include <asm/intrinsics.h> | 
 | 32 |  | 
 | 33 | /* | 
 | 34 |  * the semaphore definition | 
 | 35 |  */ | 
 | 36 | struct rw_semaphore { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 37 | 	signed long		count; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | 	spinlock_t		wait_lock; | 
 | 39 | 	struct list_head	wait_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | }; | 
 | 41 |  | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 42 | #define RWSEM_UNLOCKED_VALUE		__IA64_UL_CONST(0x0000000000000000) | 
 | 43 | #define RWSEM_ACTIVE_BIAS		__IA64_UL_CONST(0x0000000000000001) | 
 | 44 | #define RWSEM_ACTIVE_MASK		__IA64_UL_CONST(0x00000000ffffffff) | 
 | 45 | #define RWSEM_WAITING_BIAS		-__IA64_UL_CONST(0x0000000100000000) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS | 
 | 47 | #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 
 | 48 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #define __RWSEM_INITIALIZER(name) \ | 
| Thomas Gleixner | 21fc3fd | 2009-11-06 22:42:01 +0000 | [diff] [blame] | 50 | 	{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | 
| Ingo Molnar | 61f4c3d | 2006-07-03 00:24:29 -0700 | [diff] [blame] | 51 | 	  LIST_HEAD_INIT((name).wait_list) } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 |  | 
 | 53 | #define DECLARE_RWSEM(name) \ | 
 | 54 | 	struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 
 | 55 |  | 
 | 56 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | 
 | 57 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | 
 | 58 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | 
 | 59 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | 
 | 60 |  | 
 | 61 | static inline void | 
 | 62 | init_rwsem (struct rw_semaphore *sem) | 
 | 63 | { | 
 | 64 | 	sem->count = RWSEM_UNLOCKED_VALUE; | 
 | 65 | 	spin_lock_init(&sem->wait_lock); | 
 | 66 | 	INIT_LIST_HEAD(&sem->wait_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | } | 
 | 68 |  | 
 | 69 | /* | 
 | 70 |  * lock for reading | 
 | 71 |  */ | 
 | 72 | static inline void | 
 | 73 | __down_read (struct rw_semaphore *sem) | 
 | 74 | { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 75 | 	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
 | 77 | 	if (result < 0) | 
 | 78 | 		rwsem_down_read_failed(sem); | 
 | 79 | } | 
 | 80 |  | 
 | 81 | /* | 
 | 82 |  * lock for writing | 
 | 83 |  */ | 
 | 84 | static inline void | 
 | 85 | __down_write (struct rw_semaphore *sem) | 
 | 86 | { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 87 | 	long old, new; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 |  | 
 | 89 | 	do { | 
 | 90 | 		old = sem->count; | 
 | 91 | 		new = old + RWSEM_ACTIVE_WRITE_BIAS; | 
 | 92 | 	} while (cmpxchg_acq(&sem->count, old, new) != old); | 
 | 93 |  | 
 | 94 | 	if (old != 0) | 
 | 95 | 		rwsem_down_write_failed(sem); | 
 | 96 | } | 
 | 97 |  | 
 | 98 | /* | 
 | 99 |  * unlock after reading | 
 | 100 |  */ | 
 | 101 | static inline void | 
 | 102 | __up_read (struct rw_semaphore *sem) | 
 | 103 | { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 104 | 	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 |  | 
 | 106 | 	if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) | 
 | 107 | 		rwsem_wake(sem); | 
 | 108 | } | 
 | 109 |  | 
 | 110 | /* | 
 | 111 |  * unlock after writing | 
 | 112 |  */ | 
 | 113 | static inline void | 
 | 114 | __up_write (struct rw_semaphore *sem) | 
 | 115 | { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 116 | 	long old, new; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 |  | 
 | 118 | 	do { | 
 | 119 | 		old = sem->count; | 
 | 120 | 		new = old - RWSEM_ACTIVE_WRITE_BIAS; | 
 | 121 | 	} while (cmpxchg_rel(&sem->count, old, new) != old); | 
 | 122 |  | 
 | 123 | 	if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0) | 
 | 124 | 		rwsem_wake(sem); | 
 | 125 | } | 
 | 126 |  | 
 | 127 | /* | 
 | 128 |  * trylock for reading -- returns 1 if successful, 0 if contention | 
 | 129 |  */ | 
 | 130 | static inline int | 
 | 131 | __down_read_trylock (struct rw_semaphore *sem) | 
 | 132 | { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 133 | 	long tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | 	while ((tmp = sem->count) >= 0) { | 
 | 135 | 		if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { | 
 | 136 | 			return 1; | 
 | 137 | 		} | 
 | 138 | 	} | 
 | 139 | 	return 0; | 
 | 140 | } | 
 | 141 |  | 
 | 142 | /* | 
 | 143 |  * trylock for writing -- returns 1 if successful, 0 if contention | 
 | 144 |  */ | 
 | 145 | static inline int | 
 | 146 | __down_write_trylock (struct rw_semaphore *sem) | 
 | 147 | { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 148 | 	long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | 			      RWSEM_ACTIVE_WRITE_BIAS); | 
 | 150 | 	return tmp == RWSEM_UNLOCKED_VALUE; | 
 | 151 | } | 
 | 152 |  | 
 | 153 | /* | 
 | 154 |  * downgrade write lock to read lock | 
 | 155 |  */ | 
 | 156 | static inline void | 
 | 157 | __downgrade_write (struct rw_semaphore *sem) | 
 | 158 | { | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 159 | 	long old, new; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 |  | 
 | 161 | 	do { | 
 | 162 | 		old = sem->count; | 
 | 163 | 		new = old - RWSEM_WAITING_BIAS; | 
 | 164 | 	} while (cmpxchg_rel(&sem->count, old, new) != old); | 
 | 165 |  | 
 | 166 | 	if (old < 0) | 
 | 167 | 		rwsem_downgrade_wake(sem); | 
 | 168 | } | 
 | 169 |  | 
 | 170 | /* | 
 | 171 |  * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1 | 
 | 172 |  * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. | 
 | 173 |  */ | 
| Christoph Lameter | 16592d2 | 2005-08-22 12:20:00 -0700 | [diff] [blame] | 174 | #define rwsem_atomic_add(delta, sem)	atomic64_add(delta, (atomic64_t *)(&(sem)->count)) | 
 | 175 | #define rwsem_atomic_update(delta, sem)	atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 |  | 
| Rik Van Riel | eb92f4e | 2005-10-29 18:15:44 -0700 | [diff] [blame] | 177 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | 
 | 178 | { | 
 | 179 | 	return (sem->count != 0); | 
 | 180 | } | 
 | 181 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | #endif /* _ASM_IA64_RWSEM_H */ |