blob: a5a973c0c07f55f272d4fccd7c6587f516afff84 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __PARISC_SYSTEM_H
2#define __PARISC_SYSTEM_H
3
4#include <linux/config.h>
5#include <asm/psw.h>
6
7/* The program status word as bitfields. */
8struct pa_psw {
9 unsigned int y:1;
10 unsigned int z:1;
11 unsigned int rv:2;
12 unsigned int w:1;
13 unsigned int e:1;
14 unsigned int s:1;
15 unsigned int t:1;
16
17 unsigned int h:1;
18 unsigned int l:1;
19 unsigned int n:1;
20 unsigned int x:1;
21 unsigned int b:1;
22 unsigned int c:1;
23 unsigned int v:1;
24 unsigned int m:1;
25
26 unsigned int cb:8;
27
28 unsigned int o:1;
29 unsigned int g:1;
30 unsigned int f:1;
31 unsigned int r:1;
32 unsigned int q:1;
33 unsigned int p:1;
34 unsigned int d:1;
35 unsigned int i:1;
36};
37
38#ifdef __LP64__
39#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))
40#else
41#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))
42#endif
43
44struct task_struct;
45
46extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *);
47
48#define switch_to(prev, next, last) do { \
49 (last) = _switch_to(prev, next); \
50} while(0)
51
Ingo Molnar4dc7a0b2006-01-12 01:05:27 -080052/*
53 * On SMP systems, when the scheduler does migration-cost autodetection,
54 * it needs a way to flush as much of the CPU's caches as possible.
55 *
56 * TODO: fill this in!
57 */
58static inline void sched_cacheflush(void)
59{
60}
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62
63/* interrupt control */
64#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
65#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
66#define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
67
68#define local_irq_save(x) \
69 __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" )
70#define local_irq_restore(x) \
71 __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" )
72
73#define irqs_disabled() \
74({ \
75 unsigned long flags; \
76 local_save_flags(flags); \
77 (flags & PSW_I) == 0; \
78})
79
80#define mfctl(reg) ({ \
81 unsigned long cr; \
82 __asm__ __volatile__( \
83 "mfctl " #reg ",%0" : \
84 "=r" (cr) \
85 ); \
86 cr; \
87})
88
89#define mtctl(gr, cr) \
90 __asm__ __volatile__("mtctl %0,%1" \
91 : /* no outputs */ \
92 : "r" (gr), "i" (cr) : "memory")
93
94/* these are here to de-mystefy the calling code, and to provide hooks */
95/* which I needed for debugging EIEM problems -PB */
96#define get_eiem() mfctl(15)
97static inline void set_eiem(unsigned long val)
98{
99 mtctl(val, 15);
100}
101
102#define mfsp(reg) ({ \
103 unsigned long cr; \
104 __asm__ __volatile__( \
105 "mfsp " #reg ",%0" : \
106 "=r" (cr) \
107 ); \
108 cr; \
109})
110
111#define mtsp(gr, cr) \
112 __asm__ __volatile__("mtsp %0,%1" \
113 : /* no outputs */ \
114 : "r" (gr), "i" (cr) : "memory")
115
116
117/*
118** This is simply the barrier() macro from linux/kernel.h but when serial.c
119** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
120** hasn't yet been included yet so it fails, thus repeating the macro here.
121**
122** PA-RISC architecture allows for weakly ordered memory accesses although
123** none of the processors use it. There is a strong ordered bit that is
124** set in the O-bit of the page directory entry. Operating systems that
125** can not tolerate out of order accesses should set this bit when mapping
126** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
127** of the processor implemented the PSW O-bit). The PCX-W ERS states that
128** the TLB O-bit is not implemented so the page directory does not need to
129** have the O-bit set when mapping pages (section 3.1). This section also
130** states that the PSW Y, Z, G, and O bits are not implemented.
131** So it looks like nothing needs to be done for parisc-linux (yet).
132** (thanks to chada for the above comment -ggg)
133**
134** The __asm__ op below simple prevents gcc/ld from reordering
135** instructions across the mb() "call".
136*/
137#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
138#define rmb() mb()
139#define wmb() mb()
140#define smp_mb() mb()
141#define smp_rmb() mb()
142#define smp_wmb() mb()
143#define smp_read_barrier_depends() do { } while(0)
144#define read_barrier_depends() do { } while(0)
145
146#define set_mb(var, value) do { var = value; mb(); } while (0)
147#define set_wmb(var, value) do { var = value; wmb(); } while (0)
148
149
Matthew Wilcox14e256c2005-10-21 22:41:25 -0400150#ifndef CONFIG_PA20
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
152 and GCC only guarantees 8-byte alignment for stack locals, we can't
153 be assured of 16-byte alignment for atomic lock data even if we
154 specify "__attribute ((aligned(16)))" in the type declaration. So,
155 we use a struct containing an array of four ints for the atomic lock
156 type and dynamically select the 16-byte aligned int from the array
157 for the semaphore. */
Matthew Wilcox14e256c2005-10-21 22:41:25 -0400158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159#define __PA_LDCW_ALIGNMENT 16
160#define __ldcw_align(a) ({ \
161 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
162 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \
163 (volatile unsigned int *) __ret; \
164})
Matthew Wilcox14e256c2005-10-21 22:41:25 -0400165#define LDCW "ldcw"
166
167#else /*CONFIG_PA20*/
168/* From: "Jim Hull" <jim.hull of hp.com>
169 I've attached a summary of the change, but basically, for PA 2.0, as
170 long as the ",CO" (coherent operation) completer is specified, then the
171 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
172 they only require "natural" alignment (4-byte for ldcw, 8-byte for
173 ldcd). */
174
175#define __PA_LDCW_ALIGNMENT 4
176#define __ldcw_align(a) ((volatile unsigned int *)a)
177#define LDCW "ldcw,co"
178
179#endif /*!CONFIG_PA20*/
180
181/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
182#define __ldcw(a) ({ \
183 unsigned __ret; \
184 __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \
185 __ret; \
186})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188#ifdef CONFIG_SMP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700189# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190#endif
191
192#define KERNEL_START (0x10100000 - 0x1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193#define arch_align_stack(x) (x)
194
195#endif