Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Spin and read/write lock operations. |
| 3 | * |
| 4 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM |
| 5 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM |
| 6 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM |
| 7 | * Rework to support virtual processors |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU General Public License |
| 11 | * as published by the Free Software Foundation; either version |
| 12 | * 2 of the License, or (at your option) any later version. |
| 13 | */ |
| 14 | |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/stringify.h> |
Paul Mackerras | 734d652 | 2005-10-31 13:57:01 +1100 | [diff] [blame] | 19 | #include <linux/smp.h> |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 20 | |
| 21 | /* waiting for a spinlock... */ |
| 22 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
| 23 | #include <asm/hvcall.h> |
Kelly Daly | 1da4403 | 2005-11-01 16:59:20 +1100 | [diff] [blame] | 24 | #include <asm/iseries/hv_call.h> |
Paul Mackerras | 2249ca9 | 2005-11-07 13:18:13 +1100 | [diff] [blame] | 25 | #include <asm/smp.h> |
Stephen Rothwell | 4f896e5 | 2006-08-24 13:29:33 +1000 | [diff] [blame] | 26 | #include <asm/firmware.h> |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 27 | |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 28 | void __spin_yield(arch_spinlock_t *lock) |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 29 | { |
| 30 | unsigned int lock_value, holder_cpu, yield_count; |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 31 | |
| 32 | lock_value = lock->slock; |
| 33 | if (lock_value == 0) |
| 34 | return; |
| 35 | holder_cpu = lock_value & 0xffff; |
| 36 | BUG_ON(holder_cpu >= NR_CPUS); |
David Gibson | 3356bb9f7 | 2006-01-13 10:26:42 +1100 | [diff] [blame] | 37 | yield_count = lppaca[holder_cpu].yield_count; |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 38 | if ((yield_count & 1) == 0) |
| 39 | return; /* virtual cpu is currently running */ |
| 40 | rmb(); |
| 41 | if (lock->slock != lock_value) |
| 42 | return; /* something has changed */ |
Stephen Rothwell | 4f896e5 | 2006-08-24 13:29:33 +1000 | [diff] [blame] | 43 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
| 44 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, |
| 45 | ((u64)holder_cpu << 32) | yield_count); |
Stephen Rothwell | 1c56f83 | 2007-02-23 14:41:41 +1100 | [diff] [blame] | 46 | #ifdef CONFIG_PPC_SPLPAR |
Stephen Rothwell | 4f896e5 | 2006-08-24 13:29:33 +1000 | [diff] [blame] | 47 | else |
| 48 | plpar_hcall_norets(H_CONFER, |
| 49 | get_hard_smp_processor_id(holder_cpu), yield_count); |
Stephen Rothwell | 1c56f83 | 2007-02-23 14:41:41 +1100 | [diff] [blame] | 50 | #endif |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | /* |
| 54 | * Waiting for a read lock or a write lock on a rwlock... |
| 55 | * This turns out to be the same for read and write locks, since |
| 56 | * we only know the holder if it is write-locked. |
| 57 | */ |
| 58 | void __rw_yield(raw_rwlock_t *rw) |
| 59 | { |
| 60 | int lock_value; |
| 61 | unsigned int holder_cpu, yield_count; |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 62 | |
| 63 | lock_value = rw->lock; |
| 64 | if (lock_value >= 0) |
| 65 | return; /* no write lock at present */ |
| 66 | holder_cpu = lock_value & 0xffff; |
| 67 | BUG_ON(holder_cpu >= NR_CPUS); |
David Gibson | 3356bb9f7 | 2006-01-13 10:26:42 +1100 | [diff] [blame] | 68 | yield_count = lppaca[holder_cpu].yield_count; |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 69 | if ((yield_count & 1) == 0) |
| 70 | return; /* virtual cpu is currently running */ |
| 71 | rmb(); |
| 72 | if (rw->lock != lock_value) |
| 73 | return; /* something has changed */ |
Stephen Rothwell | 4f896e5 | 2006-08-24 13:29:33 +1000 | [diff] [blame] | 74 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
| 75 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, |
| 76 | ((u64)holder_cpu << 32) | yield_count); |
Stephen Rothwell | 1c56f83 | 2007-02-23 14:41:41 +1100 | [diff] [blame] | 77 | #ifdef CONFIG_PPC_SPLPAR |
Stephen Rothwell | 4f896e5 | 2006-08-24 13:29:33 +1000 | [diff] [blame] | 78 | else |
| 79 | plpar_hcall_norets(H_CONFER, |
| 80 | get_hard_smp_processor_id(holder_cpu), yield_count); |
Stephen Rothwell | 1c56f83 | 2007-02-23 14:41:41 +1100 | [diff] [blame] | 81 | #endif |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 82 | } |
| 83 | #endif |
| 84 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame^] | 85 | void arch_spin_unlock_wait(arch_spinlock_t *lock) |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 86 | { |
| 87 | while (lock->slock) { |
| 88 | HMT_low(); |
| 89 | if (SHARED_PROCESSOR) |
| 90 | __spin_yield(lock); |
| 91 | } |
| 92 | HMT_medium(); |
| 93 | } |
| 94 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame^] | 95 | EXPORT_SYMBOL(arch_spin_unlock_wait); |