blob: 8657b084a922e3ff0cb5187130961ada08262cee [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_CRIS_SYSTEM_H
2#define __ASM_CRIS_SYSTEM_H
3
Jesper Nilsson556dcee2008-10-21 17:45:58 +02004#include <arch/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6/* the switch_to macro calls resume, an asm function in entry.S which does the actual
7 * task switching.
8 */
9
10extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#define switch_to(prev,next,last) last = resume(prev,next, \
12 (int)&((struct task_struct *)0)->thread)
13
14#define barrier() __asm__ __volatile__("": : :"memory")
15#define mb() barrier()
16#define rmb() mb()
17#define wmb() mb()
18#define read_barrier_depends() do { } while(0)
19#define set_mb(var, value) do { var = value; mb(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#ifdef CONFIG_SMP
22#define smp_mb() mb()
23#define smp_rmb() rmb()
24#define smp_wmb() wmb()
25#define smp_read_barrier_depends() read_barrier_depends()
26#else
27#define smp_mb() barrier()
28#define smp_rmb() barrier()
29#define smp_wmb() barrier()
30#define smp_read_barrier_depends() do { } while(0)
31#endif
32
33#define iret()
34
35/*
36 * disable hlt during certain critical i/o operations
37 */
38#define HAVE_DISABLE_HLT
39void disable_hlt(void);
40void enable_hlt(void);
41
Adrian Bunkd9b54442005-11-07 00:58:44 -080042static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
44 /* since Etrax doesn't have any atomic xchg instructions, we need to disable
45 irq's (if enabled) and do it with move.d's */
46 unsigned long flags,temp;
Jiri Kosina047c7c42007-02-10 01:43:50 -080047 local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 switch (size) {
49 case 1:
50 *((unsigned char *)&temp) = x;
51 x = *(unsigned char *)ptr;
52 *(unsigned char *)ptr = *((unsigned char *)&temp);
53 break;
54 case 2:
55 *((unsigned short *)&temp) = x;
56 x = *(unsigned short *)ptr;
57 *(unsigned short *)ptr = *((unsigned short *)&temp);
58 break;
59 case 4:
60 temp = x;
61 x = *(unsigned long *)ptr;
62 *(unsigned long *)ptr = temp;
63 break;
64 }
65 local_irq_restore(flags); /* restore irq enable bit */
66 return x;
67}
68
Mathieu Desnoyers7732ba32008-02-07 00:16:14 -080069#include <asm-generic/cmpxchg-local.h>
70
71/*
72 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
73 * them available.
74 */
75#define cmpxchg_local(ptr, o, n) \
76 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
77 (unsigned long)(n), sizeof(*(ptr))))
78#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
79
80#ifndef CONFIG_SMP
81#include <asm-generic/cmpxchg.h>
82#endif
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#define arch_align_stack(x) (x)
85
Adrian Bunkcdb04522006-03-24 03:15:57 -080086void default_idle(void);
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#endif