blob: f709983f41f883ed567aeba87a6c1e15bc196340 [file] [log] [blame]
Martin Schwidefsky951f22d2005-07-27 11:44:57 -07001/*
Martin Schwidefsky951f22d2005-07-27 11:44:57 -07002 * Out of line spinlock code.
3 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2004, 2006
Martin Schwidefsky951f22d2005-07-27 11:44:57 -07005 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <linux/init.h>
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040012#include <linux/smp.h>
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070013#include <asm/io.h>
14
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070015int spin_retry = 1000;
16
17/**
18 * spin_retry= parameter
19 */
20static int __init spin_retry_setup(char *str)
21{
22 spin_retry = simple_strtoul(str, &str, 0);
23 return 1;
24}
25__setup("spin_retry=", spin_retry_setup);
26
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010027void arch_spin_lock_wait(arch_spinlock_t *lp)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070028{
29 int count = spin_retry;
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070030 unsigned int cpu = ~smp_processor_id();
Gerald Schaefer59b69782010-02-26 22:37:40 +010031 unsigned int owner;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070032
33 while (1) {
Gerald Schaefer59b69782010-02-26 22:37:40 +010034 owner = lp->owner_cpu;
35 if (!owner || smp_vcpu_scheduled(~owner)) {
36 for (count = spin_retry; count > 0; count--) {
37 if (arch_spin_is_locked(lp))
38 continue;
39 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
40 cpu) == 0)
41 return;
42 }
43 if (MACHINE_IS_LPAR)
44 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070045 }
Gerald Schaefer59b69782010-02-26 22:37:40 +010046 owner = lp->owner_cpu;
47 if (owner)
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040048 smp_yield_cpu(~owner);
Heiko Carstens3b4beb32008-01-26 14:11:03 +010049 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070050 return;
51 }
52}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010053EXPORT_SYMBOL(arch_spin_lock_wait);
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070054
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010055void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010056{
57 int count = spin_retry;
58 unsigned int cpu = ~smp_processor_id();
Gerald Schaefer59b69782010-02-26 22:37:40 +010059 unsigned int owner;
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010060
61 local_irq_restore(flags);
62 while (1) {
Gerald Schaefer59b69782010-02-26 22:37:40 +010063 owner = lp->owner_cpu;
64 if (!owner || smp_vcpu_scheduled(~owner)) {
65 for (count = spin_retry; count > 0; count--) {
66 if (arch_spin_is_locked(lp))
67 continue;
68 local_irq_disable();
69 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
70 cpu) == 0)
71 return;
72 local_irq_restore(flags);
73 }
74 if (MACHINE_IS_LPAR)
75 continue;
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010076 }
Gerald Schaefer59b69782010-02-26 22:37:40 +010077 owner = lp->owner_cpu;
78 if (owner)
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040079 smp_yield_cpu(~owner);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010080 local_irq_disable();
81 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
82 return;
83 local_irq_restore(flags);
84 }
85}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086EXPORT_SYMBOL(arch_spin_lock_wait_flags);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010087
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010088int arch_spin_trylock_retry(arch_spinlock_t *lp)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070089{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070090 unsigned int cpu = ~smp_processor_id();
91 int count;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070092
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070093 for (count = spin_retry; count > 0; count--) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010094 if (arch_spin_is_locked(lp))
Christian Ehrhardt96567162006-03-09 17:33:49 -080095 continue;
Heiko Carstens3b4beb32008-01-26 14:11:03 +010096 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070097 return 1;
98 }
99 return 0;
100}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100101EXPORT_SYMBOL(arch_spin_trylock_retry);
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700102
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100103void arch_spin_relax(arch_spinlock_t *lock)
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -0700104{
105 unsigned int cpu = lock->owner_cpu;
Gerald Schaefer59b69782010-02-26 22:37:40 +0100106 if (cpu != 0) {
107 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
108 !smp_vcpu_scheduled(~cpu))
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400109 smp_yield_cpu(~cpu);
Gerald Schaefer59b69782010-02-26 22:37:40 +0100110 }
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -0700111}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100112EXPORT_SYMBOL(arch_spin_relax);
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -0700113
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100114void _raw_read_lock_wait(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700115{
116 unsigned int old;
117 int count = spin_retry;
118
119 while (1) {
120 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400121 smp_yield();
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700122 count = spin_retry;
123 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100124 if (!arch_read_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800125 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700126 old = rw->lock & 0x7fffffffU;
127 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
128 return;
129 }
130}
131EXPORT_SYMBOL(_raw_read_lock_wait);
132
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100133void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200134{
135 unsigned int old;
136 int count = spin_retry;
137
138 local_irq_restore(flags);
139 while (1) {
140 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400141 smp_yield();
Heiko Carstensce58ae62009-06-12 10:26:22 +0200142 count = spin_retry;
143 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100144 if (!arch_read_can_lock(rw))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200145 continue;
146 old = rw->lock & 0x7fffffffU;
147 local_irq_disable();
148 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
149 return;
150 }
151}
152EXPORT_SYMBOL(_raw_read_lock_wait_flags);
153
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100154int _raw_read_trylock_retry(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700155{
156 unsigned int old;
157 int count = spin_retry;
158
159 while (count-- > 0) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100160 if (!arch_read_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800161 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700162 old = rw->lock & 0x7fffffffU;
163 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
164 return 1;
165 }
166 return 0;
167}
168EXPORT_SYMBOL(_raw_read_trylock_retry);
169
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100170void _raw_write_lock_wait(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700171{
172 int count = spin_retry;
173
174 while (1) {
175 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400176 smp_yield();
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700177 count = spin_retry;
178 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100179 if (!arch_write_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800180 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700181 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
182 return;
183 }
184}
185EXPORT_SYMBOL(_raw_write_lock_wait);
186
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100187void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200188{
189 int count = spin_retry;
190
191 local_irq_restore(flags);
192 while (1) {
193 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400194 smp_yield();
Heiko Carstensce58ae62009-06-12 10:26:22 +0200195 count = spin_retry;
196 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100197 if (!arch_write_can_lock(rw))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200198 continue;
199 local_irq_disable();
200 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
201 return;
202 }
203}
204EXPORT_SYMBOL(_raw_write_lock_wait_flags);
205
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100206int _raw_write_trylock_retry(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700207{
208 int count = spin_retry;
209
210 while (count-- > 0) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100211 if (!arch_write_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800212 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700213 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
214 return 1;
215 }
216 return 0;
217}
218EXPORT_SYMBOL(_raw_write_trylock_retry);