blob: 1ae580f3893344fe7ad99c71b4c5fa19c0692870 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 spinlock support
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_SPINLOCK_H
12#define _ASM_SPINLOCK_H
13
Arun Sharma600634972011-07-26 16:09:06 -070014#include <linux/atomic.h>
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010015#include <asm/rwlock.h>
16#include <asm/page.h>
David Howellsb920de12008-02-08 04:19:31 -080017
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010018/*
19 * Simple spin lock operations. There are two variants, one clears IRQ's
20 * on the local processor, one does not.
21 *
22 * We make no fairness assumptions. They have a cost.
23 */
24
25#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
26#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
27
28static inline void arch_spin_unlock(arch_spinlock_t *lock)
29{
30 asm volatile(
31 " bclr 1,(0,%0) \n"
32 :
33 : "a"(&lock->slock)
34 : "memory", "cc");
35}
36
37static inline int arch_spin_trylock(arch_spinlock_t *lock)
38{
39 int ret;
40
41 asm volatile(
42 " mov 1,%0 \n"
43 " bset %0,(%1) \n"
44 " bne 1f \n"
45 " clr %0 \n"
46 "1: xor 1,%0 \n"
47 : "=d"(ret)
48 : "a"(&lock->slock)
49 : "memory", "cc");
50
51 return ret;
52}
53
54static inline void arch_spin_lock(arch_spinlock_t *lock)
55{
56 asm volatile(
57 "1: bset 1,(0,%0) \n"
58 " bne 1b \n"
59 :
60 : "a"(&lock->slock)
61 : "memory", "cc");
62}
63
64static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
65 unsigned long flags)
66{
67 int temp;
68
69 asm volatile(
70 "1: bset 1,(0,%2) \n"
71 " beq 3f \n"
72 " mov %1,epsw \n"
73 "2: mov (0,%2),%0 \n"
74 " or %0,%0 \n"
75 " bne 2b \n"
76 " mov %3,%0 \n"
77 " mov %0,epsw \n"
78 " nop \n"
79 " nop \n"
80 " bra 1b\n"
81 "3: \n"
82 : "=&d" (temp)
83 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
84 : "memory", "cc");
85}
86
87#ifdef __KERNEL__
88
89/*
90 * Read-write spinlocks, allowing multiple readers
91 * but only one writer.
92 *
93 * NOTE! it is quite common to have readers in interrupts
94 * but no interrupt writers. For those circumstances we
95 * can "mix" irq-safe locks - any writer needs to get a
96 * irq-safe write-lock, but readers can get non-irqsafe
97 * read-locks.
98 */
99
100/**
101 * read_can_lock - would read_trylock() succeed?
102 * @lock: the rwlock in question.
103 */
104#define arch_read_can_lock(x) ((int)(x)->lock > 0)
105
106/**
107 * write_can_lock - would write_trylock() succeed?
108 * @lock: the rwlock in question.
109 */
110#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
111
112/*
113 * On mn10300, we implement read-write locks as a 32-bit counter
114 * with the high bit (sign) being the "contended" bit.
115 */
116static inline void arch_read_lock(arch_rwlock_t *rw)
117{
118#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
119 __build_read_lock(rw, "__read_lock_failed");
120#else
121 {
122 atomic_t *count = (atomic_t *)rw;
123 while (atomic_dec_return(count) < 0)
124 atomic_inc(count);
125 }
126#endif
127}
128
129static inline void arch_write_lock(arch_rwlock_t *rw)
130{
131#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
132 __build_write_lock(rw, "__write_lock_failed");
133#else
134 {
135 atomic_t *count = (atomic_t *)rw;
136 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
137 atomic_add(RW_LOCK_BIAS, count);
138 }
139#endif
140}
141
142static inline void arch_read_unlock(arch_rwlock_t *rw)
143{
144#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
145 __build_read_unlock(rw);
146#else
147 {
148 atomic_t *count = (atomic_t *)rw;
149 atomic_inc(count);
150 }
151#endif
152}
153
154static inline void arch_write_unlock(arch_rwlock_t *rw)
155{
156#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
157 __build_write_unlock(rw);
158#else
159 {
160 atomic_t *count = (atomic_t *)rw;
161 atomic_add(RW_LOCK_BIAS, count);
162 }
163#endif
164}
165
166static inline int arch_read_trylock(arch_rwlock_t *lock)
167{
168 atomic_t *count = (atomic_t *)lock;
169 atomic_dec(count);
170 if (atomic_read(count) >= 0)
171 return 1;
172 atomic_inc(count);
173 return 0;
174}
175
176static inline int arch_write_trylock(arch_rwlock_t *lock)
177{
178 atomic_t *count = (atomic_t *)lock;
179 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
180 return 1;
181 atomic_add(RW_LOCK_BIAS, count);
182 return 0;
183}
184
185#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
186#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
187
188#define _raw_spin_relax(lock) cpu_relax()
189#define _raw_read_relax(lock) cpu_relax()
190#define _raw_write_relax(lock) cpu_relax()
191
192#endif /* __KERNEL__ */
David Howellsb920de12008-02-08 04:19:31 -0800193#endif /* _ASM_SPINLOCK_H */