Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __PARISC_SYSTEM_H |
| 2 | #define __PARISC_SYSTEM_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <asm/psw.h> |
| 5 | |
| 6 | /* The program status word as bitfields. */ |
| 7 | struct pa_psw { |
| 8 | unsigned int y:1; |
| 9 | unsigned int z:1; |
| 10 | unsigned int rv:2; |
| 11 | unsigned int w:1; |
| 12 | unsigned int e:1; |
| 13 | unsigned int s:1; |
| 14 | unsigned int t:1; |
| 15 | |
| 16 | unsigned int h:1; |
| 17 | unsigned int l:1; |
| 18 | unsigned int n:1; |
| 19 | unsigned int x:1; |
| 20 | unsigned int b:1; |
| 21 | unsigned int c:1; |
| 22 | unsigned int v:1; |
| 23 | unsigned int m:1; |
| 24 | |
| 25 | unsigned int cb:8; |
| 26 | |
| 27 | unsigned int o:1; |
| 28 | unsigned int g:1; |
| 29 | unsigned int f:1; |
| 30 | unsigned int r:1; |
| 31 | unsigned int q:1; |
| 32 | unsigned int p:1; |
| 33 | unsigned int d:1; |
| 34 | unsigned int i:1; |
| 35 | }; |
| 36 | |
Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 37 | #ifdef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) |
| 39 | #else |
| 40 | #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) |
| 41 | #endif |
| 42 | |
| 43 | struct task_struct; |
| 44 | |
| 45 | extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *); |
| 46 | |
| 47 | #define switch_to(prev, next, last) do { \ |
| 48 | (last) = _switch_to(prev, next); \ |
| 49 | } while(0) |
| 50 | |
Ingo Molnar | 4dc7a0b | 2006-01-12 01:05:27 -0800 | [diff] [blame] | 51 | /* |
| 52 | * On SMP systems, when the scheduler does migration-cost autodetection, |
| 53 | * it needs a way to flush as much of the CPU's caches as possible. |
| 54 | * |
| 55 | * TODO: fill this in! |
| 56 | */ |
| 57 | static inline void sched_cacheflush(void) |
| 58 | { |
| 59 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
| 61 | |
| 62 | /* interrupt control */ |
| 63 | #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") |
| 64 | #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) |
| 65 | #define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) |
| 66 | |
| 67 | #define local_irq_save(x) \ |
| 68 | __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" ) |
| 69 | #define local_irq_restore(x) \ |
| 70 | __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" ) |
| 71 | |
| 72 | #define irqs_disabled() \ |
| 73 | ({ \ |
| 74 | unsigned long flags; \ |
| 75 | local_save_flags(flags); \ |
| 76 | (flags & PSW_I) == 0; \ |
| 77 | }) |
| 78 | |
| 79 | #define mfctl(reg) ({ \ |
| 80 | unsigned long cr; \ |
| 81 | __asm__ __volatile__( \ |
| 82 | "mfctl " #reg ",%0" : \ |
| 83 | "=r" (cr) \ |
| 84 | ); \ |
| 85 | cr; \ |
| 86 | }) |
| 87 | |
| 88 | #define mtctl(gr, cr) \ |
| 89 | __asm__ __volatile__("mtctl %0,%1" \ |
| 90 | : /* no outputs */ \ |
| 91 | : "r" (gr), "i" (cr) : "memory") |
| 92 | |
| 93 | /* these are here to de-mystefy the calling code, and to provide hooks */ |
| 94 | /* which I needed for debugging EIEM problems -PB */ |
| 95 | #define get_eiem() mfctl(15) |
| 96 | static inline void set_eiem(unsigned long val) |
| 97 | { |
| 98 | mtctl(val, 15); |
| 99 | } |
| 100 | |
| 101 | #define mfsp(reg) ({ \ |
| 102 | unsigned long cr; \ |
| 103 | __asm__ __volatile__( \ |
| 104 | "mfsp " #reg ",%0" : \ |
| 105 | "=r" (cr) \ |
| 106 | ); \ |
| 107 | cr; \ |
| 108 | }) |
| 109 | |
| 110 | #define mtsp(gr, cr) \ |
| 111 | __asm__ __volatile__("mtsp %0,%1" \ |
| 112 | : /* no outputs */ \ |
| 113 | : "r" (gr), "i" (cr) : "memory") |
| 114 | |
| 115 | |
| 116 | /* |
| 117 | ** This is simply the barrier() macro from linux/kernel.h but when serial.c |
| 118 | ** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h |
| 119 | ** hasn't yet been included yet so it fails, thus repeating the macro here. |
| 120 | ** |
| 121 | ** PA-RISC architecture allows for weakly ordered memory accesses although |
| 122 | ** none of the processors use it. There is a strong ordered bit that is |
| 123 | ** set in the O-bit of the page directory entry. Operating systems that |
| 124 | ** can not tolerate out of order accesses should set this bit when mapping |
| 125 | ** pages. The O-bit of the PSW should also be set to 1 (I don't believe any |
| 126 | ** of the processor implemented the PSW O-bit). The PCX-W ERS states that |
| 127 | ** the TLB O-bit is not implemented so the page directory does not need to |
| 128 | ** have the O-bit set when mapping pages (section 3.1). This section also |
| 129 | ** states that the PSW Y, Z, G, and O bits are not implemented. |
| 130 | ** So it looks like nothing needs to be done for parisc-linux (yet). |
| 131 | ** (thanks to chada for the above comment -ggg) |
| 132 | ** |
| 133 | ** The __asm__ op below simple prevents gcc/ld from reordering |
| 134 | ** instructions across the mb() "call". |
| 135 | */ |
| 136 | #define mb() __asm__ __volatile__("":::"memory") /* barrier() */ |
| 137 | #define rmb() mb() |
| 138 | #define wmb() mb() |
| 139 | #define smp_mb() mb() |
| 140 | #define smp_rmb() mb() |
| 141 | #define smp_wmb() mb() |
| 142 | #define smp_read_barrier_depends() do { } while(0) |
| 143 | #define read_barrier_depends() do { } while(0) |
| 144 | |
| 145 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | |
Matthew Wilcox | 14e256c | 2005-10-21 22:41:25 -0400 | [diff] [blame] | 147 | #ifndef CONFIG_PA20 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, |
| 149 | and GCC only guarantees 8-byte alignment for stack locals, we can't |
| 150 | be assured of 16-byte alignment for atomic lock data even if we |
| 151 | specify "__attribute ((aligned(16)))" in the type declaration. So, |
| 152 | we use a struct containing an array of four ints for the atomic lock |
| 153 | type and dynamically select the 16-byte aligned int from the array |
| 154 | for the semaphore. */ |
Matthew Wilcox | 14e256c | 2005-10-21 22:41:25 -0400 | [diff] [blame] | 155 | |
Kyle McMartin | 64f4953 | 2006-04-22 00:48:22 -0600 | [diff] [blame] | 156 | #define __PA_LDCW_ALIGNMENT 16 |
| 157 | #define __ldcw_align(a) ({ \ |
| 158 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ |
| 159 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ |
| 160 | & ~(__PA_LDCW_ALIGNMENT - 1); \ |
| 161 | (volatile unsigned int *) __ret; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | }) |
Kyle McMartin | 64f4953 | 2006-04-22 00:48:22 -0600 | [diff] [blame] | 163 | #define __LDCW "ldcw" |
Matthew Wilcox | 14e256c | 2005-10-21 22:41:25 -0400 | [diff] [blame] | 164 | |
| 165 | #else /*CONFIG_PA20*/ |
| 166 | /* From: "Jim Hull" <jim.hull of hp.com> |
| 167 | I've attached a summary of the change, but basically, for PA 2.0, as |
| 168 | long as the ",CO" (coherent operation) completer is specified, then the |
| 169 | 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead |
| 170 | they only require "natural" alignment (4-byte for ldcw, 8-byte for |
| 171 | ldcd). */ |
| 172 | |
Kyle McMartin | 64f4953 | 2006-04-22 00:48:22 -0600 | [diff] [blame] | 173 | #define __PA_LDCW_ALIGNMENT 4 |
Matthew Wilcox | 14e256c | 2005-10-21 22:41:25 -0400 | [diff] [blame] | 174 | #define __ldcw_align(a) ((volatile unsigned int *)a) |
Kyle McMartin | 64f4953 | 2006-04-22 00:48:22 -0600 | [diff] [blame] | 175 | #define __LDCW "ldcw,co" |
Matthew Wilcox | 14e256c | 2005-10-21 22:41:25 -0400 | [diff] [blame] | 176 | |
| 177 | #endif /*!CONFIG_PA20*/ |
| 178 | |
| 179 | /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ |
Kyle McMartin | 64f4953 | 2006-04-22 00:48:22 -0600 | [diff] [blame] | 180 | #define __ldcw(a) ({ \ |
| 181 | unsigned __ret; \ |
| 182 | __asm__ __volatile__(__LDCW " 0(%1),%0" \ |
| 183 | : "=r" (__ret) : "r" (a)); \ |
| 184 | __ret; \ |
Matthew Wilcox | 14e256c | 2005-10-21 22:41:25 -0400 | [diff] [blame] | 185 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
| 187 | #ifdef CONFIG_SMP |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 188 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | #endif |
| 190 | |
| 191 | #define KERNEL_START (0x10100000 - 0x1000) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | #define arch_align_stack(x) (x) |
| 193 | |
| 194 | #endif |