blob: 353963d42059aa409eb36c15a00bbfb5b49068a0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * bitops.c: atomic operations which got too long to be inlined all over
3 * the place.
4 *
5 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
6 * Copyright 2000 Grant Grundler (grundler@cup.hp.com)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/kernel.h>
10#include <linux/spinlock.h>
11#include <asm/system.h>
12#include <asm/atomic.h>
13
14#ifdef CONFIG_SMP
Thomas Gleixner445c8952009-12-02 19:49:50 +010015arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010016 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
Linus Torvalds1da177e2005-04-16 15:20:36 -070017};
18#endif
19
Helge Dellera8f44e32007-01-28 14:58:52 +010020#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070021unsigned long __xchg64(unsigned long x, unsigned long *ptr)
22{
23 unsigned long temp, flags;
24
25 _atomic_spin_lock_irqsave(ptr, flags);
26 temp = *ptr;
27 *ptr = x;
28 _atomic_spin_unlock_irqrestore(ptr, flags);
29 return temp;
30}
31#endif
32
33unsigned long __xchg32(int x, int *ptr)
34{
35 unsigned long flags;
36 long temp;
37
38 _atomic_spin_lock_irqsave(ptr, flags);
39 temp = (long) *ptr; /* XXX - sign extension wanted? */
40 *ptr = x;
41 _atomic_spin_unlock_irqrestore(ptr, flags);
42 return (unsigned long)temp;
43}
44
45
46unsigned long __xchg8(char x, char *ptr)
47{
48 unsigned long flags;
49 long temp;
50
51 _atomic_spin_lock_irqsave(ptr, flags);
52 temp = (long) *ptr; /* XXX - sign extension wanted? */
53 *ptr = x;
54 _atomic_spin_unlock_irqrestore(ptr, flags);
55 return (unsigned long)temp;
56}
57
58
Helge Dellera8f44e32007-01-28 14:58:52 +010059#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070060unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
61{
62 unsigned long flags;
63 unsigned long prev;
64
65 _atomic_spin_lock_irqsave(ptr, flags);
66 if ((prev = *ptr) == old)
67 *ptr = new;
68 _atomic_spin_unlock_irqrestore(ptr, flags);
69 return prev;
70}
71#endif
72
73unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
74{
75 unsigned long flags;
76 unsigned int prev;
77
78 _atomic_spin_lock_irqsave(ptr, flags);
79 if ((prev = *ptr) == old)
80 *ptr = new;
81 _atomic_spin_unlock_irqrestore(ptr, flags);
82 return (unsigned long)prev;
83}