Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Hartmut Penner (hp@de.ibm.com), |
| 5 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 6 | * |
| 7 | * Derived from "include/asm-i386/processor.h" |
| 8 | * Copyright (C) 1994, Linus Torvalds |
| 9 | */ |
| 10 | |
| 11 | #ifndef __ASM_S390_PROCESSOR_H |
| 12 | #define __ASM_S390_PROCESSOR_H |
| 13 | |
Heiko Carstens | 92778b9 | 2015-10-06 16:23:39 +0200 | [diff] [blame] | 14 | #include <linux/const.h> |
| 15 | |
Martin Schwidefsky | d3a73ac | 2014-04-15 12:55:07 +0200 | [diff] [blame] | 16 | #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ |
| 17 | #define CIF_ASCE 1 /* user asce needs fixup / uaccess */ |
Martin Schwidefsky | fe0f497 | 2014-09-30 17:37:52 +0200 | [diff] [blame] | 18 | #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ |
Hendrik Brueckner | b075390 | 2015-10-06 12:25:59 +0200 | [diff] [blame] | 19 | #define CIF_FPU 3 /* restore FPU registers */ |
Heiko Carstens | db7e007 | 2015-08-15 11:42:21 +0200 | [diff] [blame] | 20 | #define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ |
Martin Schwidefsky | 419123f | 2015-11-19 11:09:45 +0100 | [diff] [blame] | 21 | #define CIF_ENABLED_WAIT 5 /* in enabled wait state */ |
Martin Schwidefsky | d3a73ac | 2014-04-15 12:55:07 +0200 | [diff] [blame] | 22 | |
Heiko Carstens | 92778b9 | 2015-10-06 16:23:39 +0200 | [diff] [blame] | 23 | #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) |
| 24 | #define _CIF_ASCE _BITUL(CIF_ASCE) |
| 25 | #define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) |
| 26 | #define _CIF_FPU _BITUL(CIF_FPU) |
| 27 | #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) |
Martin Schwidefsky | 419123f | 2015-11-19 11:09:45 +0100 | [diff] [blame] | 28 | #define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT) |
Martin Schwidefsky | d3a73ac | 2014-04-15 12:55:07 +0200 | [diff] [blame] | 29 | |
Heiko Carstens | eb608fb | 2012-09-05 13:26:11 +0200 | [diff] [blame] | 30 | #ifndef __ASSEMBLY__ |
| 31 | |
Heiko Carstens | edd5378 | 2008-12-25 13:39:16 +0100 | [diff] [blame] | 32 | #include <linux/linkage.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 33 | #include <linux/irqflags.h> |
Heiko Carstens | e86a6ed | 2009-09-11 10:29:04 +0200 | [diff] [blame] | 34 | #include <asm/cpu.h> |
Christian Ehrhardt | 25097bf | 2009-04-14 15:36:16 +0200 | [diff] [blame] | 35 | #include <asm/page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/ptrace.h> |
Christian Ehrhardt | 25097bf | 2009-04-14 15:36:16 +0200 | [diff] [blame] | 37 | #include <asm/setup.h> |
Jan Glauber | e4b8b3f | 2012-07-31 10:52:05 +0200 | [diff] [blame] | 38 | #include <asm/runtime_instr.h> |
Hendrik Brueckner | b075390 | 2015-10-06 12:25:59 +0200 | [diff] [blame] | 39 | #include <asm/fpu/types.h> |
| 40 | #include <asm/fpu/internal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Martin Schwidefsky | d3a73ac | 2014-04-15 12:55:07 +0200 | [diff] [blame] | 42 | static inline void set_cpu_flag(int flag) |
| 43 | { |
Heiko Carstens | ac25e79 | 2015-10-06 16:23:29 +0200 | [diff] [blame] | 44 | S390_lowcore.cpu_flags |= (1UL << flag); |
Martin Schwidefsky | d3a73ac | 2014-04-15 12:55:07 +0200 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | static inline void clear_cpu_flag(int flag) |
| 48 | { |
Heiko Carstens | ac25e79 | 2015-10-06 16:23:29 +0200 | [diff] [blame] | 49 | S390_lowcore.cpu_flags &= ~(1UL << flag); |
Martin Schwidefsky | d3a73ac | 2014-04-15 12:55:07 +0200 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | static inline int test_cpu_flag(int flag) |
| 53 | { |
Heiko Carstens | ac25e79 | 2015-10-06 16:23:29 +0200 | [diff] [blame] | 54 | return !!(S390_lowcore.cpu_flags & (1UL << flag)); |
Martin Schwidefsky | d3a73ac | 2014-04-15 12:55:07 +0200 | [diff] [blame] | 55 | } |
| 56 | |
Martin Schwidefsky | 419123f | 2015-11-19 11:09:45 +0100 | [diff] [blame] | 57 | /* |
| 58 | * Test CIF flag of another CPU. The caller needs to ensure that |
| 59 | * CPU hotplug can not happen, e.g. by disabling preemption. |
| 60 | */ |
| 61 | static inline int test_cpu_flag_of(int flag, int cpu) |
| 62 | { |
Heiko Carstens | c667aea | 2015-12-31 10:29:00 +0100 | [diff] [blame] | 63 | struct lowcore *lc = lowcore_ptr[cpu]; |
Martin Schwidefsky | 419123f | 2015-11-19 11:09:45 +0100 | [diff] [blame] | 64 | return !!(lc->cpu_flags & (1UL << flag)); |
| 65 | } |
| 66 | |
Martin Schwidefsky | fe0f497 | 2014-09-30 17:37:52 +0200 | [diff] [blame] | 67 | #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) |
| 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | /* |
| 70 | * Default implementation of macro that returns current |
| 71 | * instruction pointer ("program counter"). |
| 72 | */ |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 73 | #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
Heiko Carstens | e86a6ed | 2009-09-11 10:29:04 +0200 | [diff] [blame] | 75 | static inline void get_cpu_id(struct cpuid *ptr) |
Michael Holzheu | 72960a0 | 2007-02-21 10:55:18 +0100 | [diff] [blame] | 76 | { |
Martin Schwidefsky | 987bcda | 2010-02-26 22:37:31 +0100 | [diff] [blame] | 77 | asm volatile("stidp %0" : "=Q" (*ptr)); |
Michael Holzheu | 72960a0 | 2007-02-21 10:55:18 +0100 | [diff] [blame] | 78 | } |
| 79 | |
Heiko Carstens | 097a116 | 2016-04-14 12:35:22 +0200 | [diff] [blame] | 80 | void s390_adjust_jiffies(void); |
| 81 | void s390_update_cpu_mhz(void); |
| 82 | void cpu_detect_mhz_feature(void); |
| 83 | |
Martin Schwidefsky | 638ad34 | 2011-10-30 15:17:13 +0100 | [diff] [blame] | 84 | extern const struct seq_operations cpuinfo_op; |
| 85 | extern int sysctl_ieee_emulation_warnings; |
Al Viro | 65f22a9 | 2012-09-06 15:48:11 -0400 | [diff] [blame] | 86 | extern void execve_tail(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | /* |
Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 89 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 92 | #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 93 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ |
| 94 | (1UL << 30) : (1UL << 41)) |
| 95 | #define TASK_SIZE TASK_SIZE_OF(current) |
Martin Schwidefsky | ee6ee55 | 2013-07-26 15:04:03 +0200 | [diff] [blame] | 96 | #define TASK_MAX_SIZE (1UL << 53) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 98 | #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) |
| 99 | #define STACK_TOP_MAX (1UL << 42) |
David Howells | 922a70d | 2008-02-08 04:19:26 -0800 | [diff] [blame] | 100 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | #define HAVE_ARCH_PICK_MMAP_LAYOUT |
| 102 | |
| 103 | typedef struct { |
| 104 | __u32 ar4; |
| 105 | } mm_segment_t; |
| 106 | |
| 107 | /* |
| 108 | * Thread structure |
| 109 | */ |
| 110 | struct thread_struct { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | unsigned int acrs[NUM_ACRS]; |
| 112 | unsigned long ksp; /* kernel stack pointer */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | mm_segment_t mm_segment; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 114 | unsigned long gmap_addr; /* address of last gmap fault. */ |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 115 | unsigned int gmap_write_flag; /* gmap fault write indication */ |
David Hildenbrand | 4a49443 | 2016-03-08 12:31:52 +0100 | [diff] [blame] | 116 | unsigned int gmap_int_code; /* int code of last gmap fault */ |
Dominik Dingel | 24eb3a8 | 2013-06-17 16:25:18 +0200 | [diff] [blame] | 117 | unsigned int gmap_pfault; /* signal of a pending guest pfault */ |
Martin Schwidefsky | 5e9a269 | 2011-01-05 12:48:10 +0100 | [diff] [blame] | 118 | struct per_regs per_user; /* User specified PER registers */ |
| 119 | struct per_event per_event; /* Cause of the last PER trap */ |
Martin Schwidefsky | d35339a | 2012-07-31 11:03:04 +0200 | [diff] [blame] | 120 | unsigned long per_flags; /* Flags to control debug behavior */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | /* pfault_wait is used to block the process on a pfault event */ |
| 122 | unsigned long pfault_wait; |
Heiko Carstens | f2db2e6 | 2011-05-23 10:24:34 +0200 | [diff] [blame] | 123 | struct list_head list; |
Jan Glauber | e4b8b3f | 2012-07-31 10:52:05 +0200 | [diff] [blame] | 124 | /* cpu runtime instrumentation */ |
| 125 | struct runtime_instr_cb *ri_cb; |
Martin Schwidefsky | d35339a | 2012-07-31 11:03:04 +0200 | [diff] [blame] | 126 | unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ |
Martin Schwidefsky | 3f6813b | 2016-04-01 15:42:15 +0200 | [diff] [blame] | 127 | /* |
| 128 | * Warning: 'fpu' is dynamically-sized. It *MUST* be at |
| 129 | * the end. |
| 130 | */ |
| 131 | struct fpu fpu; /* FP and VX register save area */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | }; |
| 133 | |
Michael Mueller | 64597f9 | 2013-07-02 22:58:26 +0200 | [diff] [blame] | 134 | /* Flag to disable transactions. */ |
| 135 | #define PER_FLAG_NO_TE 1UL |
| 136 | /* Flag to enable random transaction aborts. */ |
| 137 | #define PER_FLAG_TE_ABORT_RAND 2UL |
| 138 | /* Flag to specify random transaction abort mode: |
| 139 | * - abort each transaction at a random instruction before TEND if set. |
| 140 | * - abort random transactions at a random instruction if cleared. |
| 141 | */ |
| 142 | #define PER_FLAG_TE_ABORT_RAND_TEND 4UL |
Martin Schwidefsky | d35339a | 2012-07-31 11:03:04 +0200 | [diff] [blame] | 143 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | typedef struct thread_struct thread_struct; |
| 145 | |
| 146 | /* |
| 147 | * Stack layout of a C stack frame. |
| 148 | */ |
| 149 | #ifndef __PACK_STACK |
| 150 | struct stack_frame { |
| 151 | unsigned long back_chain; |
| 152 | unsigned long empty1[5]; |
| 153 | unsigned long gprs[10]; |
| 154 | unsigned int empty2[8]; |
| 155 | }; |
| 156 | #else |
| 157 | struct stack_frame { |
| 158 | unsigned long empty1[5]; |
| 159 | unsigned int empty2[8]; |
| 160 | unsigned long gprs[10]; |
| 161 | unsigned long back_chain; |
| 162 | }; |
| 163 | #endif |
| 164 | |
| 165 | #define ARCH_MIN_TASKALIGN 8 |
| 166 | |
Martin Schwidefsky | 6f3fa3f | 2007-10-22 12:52:45 +0200 | [diff] [blame] | 167 | #define INIT_THREAD { \ |
| 168 | .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ |
Martin Schwidefsky | 3f6813b | 2016-04-01 15:42:15 +0200 | [diff] [blame] | 169 | .fpu.regs = (void *) init_task.thread.fpu.fprs, \ |
Martin Schwidefsky | 6f3fa3f | 2007-10-22 12:52:45 +0200 | [diff] [blame] | 170 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | |
| 172 | /* |
| 173 | * Do necessary setup to start up a new thread. |
| 174 | */ |
Martin Schwidefsky | b50511e | 2011-10-30 15:16:50 +0100 | [diff] [blame] | 175 | #define start_thread(regs, new_psw, new_stackp) do { \ |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 176 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \ |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 177 | regs->psw.addr = new_psw; \ |
Martin Schwidefsky | b50511e | 2011-10-30 15:16:50 +0100 | [diff] [blame] | 178 | regs->gprs[15] = new_stackp; \ |
Al Viro | 65f22a9 | 2012-09-06 15:48:11 -0400 | [diff] [blame] | 179 | execve_tail(); \ |
Martin Schwidefsky | 63506c4 | 2008-07-14 09:58:54 +0200 | [diff] [blame] | 180 | } while (0) |
| 181 | |
Martin Schwidefsky | b50511e | 2011-10-30 15:16:50 +0100 | [diff] [blame] | 182 | #define start_thread31(regs, new_psw, new_stackp) do { \ |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 183 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 184 | regs->psw.addr = new_psw; \ |
Martin Schwidefsky | b50511e | 2011-10-30 15:16:50 +0100 | [diff] [blame] | 185 | regs->gprs[15] = new_stackp; \ |
Gerald Schaefer | 723cacb | 2016-04-15 16:38:40 +0200 | [diff] [blame] | 186 | crst_table_downgrade(current->mm); \ |
Al Viro | 65f22a9 | 2012-09-06 15:48:11 -0400 | [diff] [blame] | 187 | execve_tail(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | } while (0) |
| 189 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | /* Forward declaration, a strange C thing */ |
| 191 | struct task_struct; |
| 192 | struct mm_struct; |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 193 | struct seq_file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | |
Heiko Carstens | d020863 | 2016-10-17 11:08:31 +0200 | [diff] [blame] | 195 | typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable); |
Heiko Carstens | 758d39e | 2016-02-09 12:58:54 +0100 | [diff] [blame] | 196 | void dump_trace(dump_trace_func_t func, void *data, |
| 197 | struct task_struct *task, unsigned long sp); |
| 198 | |
Heiko Carstens | 5a79859 | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 199 | void show_cacheinfo(struct seq_file *m); |
Heiko Carstens | 6668022 | 2012-08-29 14:12:20 +0200 | [diff] [blame] | 200 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | /* Free all resources held by a thread. */ |
| 202 | extern void release_thread(struct task_struct *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | /* |
| 205 | * Return saved PC of a blocked thread. |
| 206 | */ |
| 207 | extern unsigned long thread_saved_pc(struct task_struct *t); |
| 208 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | unsigned long get_wchan(struct task_struct *p); |
Al Viro | c7584fb | 2006-01-12 01:05:49 -0800 | [diff] [blame] | 210 | #define task_pt_regs(tsk) ((struct pt_regs *) \ |
Al Viro | 30af712 | 2006-01-12 01:05:50 -0800 | [diff] [blame] | 211 | (task_stack_page(tsk) + THREAD_SIZE) - 1) |
Al Viro | c7584fb | 2006-01-12 01:05:49 -0800 | [diff] [blame] | 212 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) |
| 213 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
Heiko Carstens | 5ebf250 | 2013-10-16 09:58:01 +0200 | [diff] [blame] | 215 | /* Has task runtime instrumentation enabled ? */ |
| 216 | #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb) |
| 217 | |
Heiko Carstens | 76737ce | 2016-01-31 17:06:16 +0100 | [diff] [blame] | 218 | static inline unsigned long current_stack_pointer(void) |
| 219 | { |
| 220 | unsigned long sp; |
| 221 | |
| 222 | asm volatile("la %0,0(15)" : "=a" (sp)); |
| 223 | return sp; |
| 224 | } |
| 225 | |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 226 | static inline unsigned short stap(void) |
| 227 | { |
| 228 | unsigned short cpu_address; |
| 229 | |
| 230 | asm volatile("stap %0" : "=m" (cpu_address)); |
| 231 | return cpu_address; |
| 232 | } |
| 233 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | /* |
| 235 | * Give up the time slice of the virtual PU. |
| 236 | */ |
Heiko Carstens | 4d92f50 | 2015-01-28 07:43:56 +0100 | [diff] [blame] | 237 | void cpu_relax(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Davidlohr Bueso | 3a6bfbc | 2014-06-29 15:09:33 -0700 | [diff] [blame] | 239 | #define cpu_relax_lowlatency() barrier() |
Heiko Carstens | 083986e | 2013-09-28 11:23:59 +0200 | [diff] [blame] | 240 | |
Heiko Carstens | 097a116 | 2016-04-14 12:35:22 +0200 | [diff] [blame] | 241 | #define ECAG_CACHE_ATTRIBUTE 0 |
| 242 | #define ECAG_CPU_ATTRIBUTE 1 |
| 243 | |
| 244 | static inline unsigned long __ecag(unsigned int asi, unsigned char parm) |
| 245 | { |
| 246 | unsigned long val; |
| 247 | |
| 248 | asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */ |
| 249 | : "=d" (val) : "a" (asi << 8 | parm)); |
| 250 | return val; |
| 251 | } |
| 252 | |
Heiko Carstens | dc74d7f | 2007-06-19 13:10:06 +0200 | [diff] [blame] | 253 | static inline void psw_set_key(unsigned int key) |
| 254 | { |
| 255 | asm volatile("spka 0(%0)" : : "d" (key)); |
| 256 | } |
| 257 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | /* |
Heiko Carstens | 77fa224 | 2005-06-25 14:55:30 -0700 | [diff] [blame] | 259 | * Set PSW to specified value. |
| 260 | */ |
| 261 | static inline void __load_psw(psw_t psw) |
| 262 | { |
Martin Schwidefsky | 987bcda | 2010-02-26 22:37:31 +0100 | [diff] [blame] | 263 | asm volatile("lpswe %0" : : "Q" (psw) : "cc"); |
Heiko Carstens | 77fa224 | 2005-06-25 14:55:30 -0700 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | * Set PSW mask to specified value, while leaving the |
| 268 | * PSW addr pointing to the next instruction. |
| 269 | */ |
Heiko Carstens | ecbafda | 2015-10-12 11:54:03 +0200 | [diff] [blame] | 270 | static inline void __load_psw_mask(unsigned long mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | { |
| 272 | unsigned long addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | psw_t psw; |
Heiko Carstens | 77fa224 | 2005-06-25 14:55:30 -0700 | [diff] [blame] | 274 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | psw.mask = mask; |
| 276 | |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 277 | asm volatile( |
| 278 | " larl %0,1f\n" |
Martin Schwidefsky | 987bcda | 2010-02-26 22:37:31 +0100 | [diff] [blame] | 279 | " stg %0,%O1+8(%R1)\n" |
| 280 | " lpswe %1\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | "1:" |
Martin Schwidefsky | 987bcda | 2010-02-26 22:37:31 +0100 | [diff] [blame] | 282 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | } |
Martin Schwidefsky | ccf45ca | 2011-10-30 15:16:48 +0100 | [diff] [blame] | 284 | |
| 285 | /* |
Martin Schwidefsky | 22362a0 | 2015-07-08 10:20:04 +0200 | [diff] [blame] | 286 | * Extract current PSW mask |
| 287 | */ |
| 288 | static inline unsigned long __extract_psw(void) |
| 289 | { |
| 290 | unsigned int reg1, reg2; |
| 291 | |
| 292 | asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2)); |
| 293 | return (((unsigned long) reg1) << 32) | ((unsigned long) reg2); |
| 294 | } |
| 295 | |
Heiko Carstens | ecbafda | 2015-10-12 11:54:03 +0200 | [diff] [blame] | 296 | static inline void local_mcck_enable(void) |
| 297 | { |
| 298 | __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK); |
| 299 | } |
| 300 | |
| 301 | static inline void local_mcck_disable(void) |
| 302 | { |
| 303 | __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK); |
| 304 | } |
| 305 | |
Martin Schwidefsky | 22362a0 | 2015-07-08 10:20:04 +0200 | [diff] [blame] | 306 | /* |
Martin Schwidefsky | ccf45ca | 2011-10-30 15:16:48 +0100 | [diff] [blame] | 307 | * Rewind PSW instruction address by specified number of bytes. |
| 308 | */ |
| 309 | static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) |
| 310 | { |
Martin Schwidefsky | ccf45ca | 2011-10-30 15:16:48 +0100 | [diff] [blame] | 311 | unsigned long mask; |
| 312 | |
| 313 | mask = (psw.mask & PSW_MASK_EA) ? -1UL : |
| 314 | (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : |
| 315 | (1UL << 24) - 1; |
| 316 | return (psw.addr - ilc) & mask; |
Martin Schwidefsky | ccf45ca | 2011-10-30 15:16:48 +0100 | [diff] [blame] | 317 | } |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 318 | |
| 319 | /* |
| 320 | * Function to stop a processor until the next interrupt occurs |
| 321 | */ |
| 322 | void enabled_wait(void); |
| 323 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | * Function to drop a processor into disabled wait state |
| 326 | */ |
Joe Perches | ff2d8b1 | 2012-01-12 17:17:21 -0800 | [diff] [blame] | 327 | static inline void __noreturn disabled_wait(unsigned long code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | { |
Heiko Carstens | f9e6edf | 2015-10-12 12:28:28 +0200 | [diff] [blame] | 329 | psw_t psw; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | |
Heiko Carstens | f9e6edf | 2015-10-12 12:28:28 +0200 | [diff] [blame] | 331 | psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; |
| 332 | psw.addr = code; |
| 333 | __load_psw(psw); |
Heiko Carstens | edd5378 | 2008-12-25 13:39:16 +0100 | [diff] [blame] | 334 | while (1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | /* |
Heiko Carstens | ab14de6 | 2007-02-05 21:18:37 +0100 | [diff] [blame] | 338 | * Basic Machine Check/Program Check Handler. |
| 339 | */ |
| 340 | |
| 341 | extern void s390_base_mcck_handler(void); |
| 342 | extern void s390_base_pgm_handler(void); |
| 343 | extern void s390_base_ext_handler(void); |
| 344 | |
| 345 | extern void (*s390_base_mcck_handler_fn)(void); |
| 346 | extern void (*s390_base_pgm_handler_fn)(void); |
| 347 | extern void (*s390_base_ext_handler_fn)(void); |
| 348 | |
Heiko Carstens | dfd54cb | 2006-09-25 23:31:33 -0700 | [diff] [blame] | 349 | #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL |
| 350 | |
Heiko Carstens | fbe7656 | 2012-06-05 09:59:52 +0200 | [diff] [blame] | 351 | extern int memcpy_real(void *, void *, size_t); |
| 352 | extern void memcpy_absolute(void *, void *, size_t); |
| 353 | |
| 354 | #define mem_assign_absolute(dest, val) { \ |
| 355 | __typeof__(dest) __tmp = (val); \ |
| 356 | \ |
| 357 | BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \ |
| 358 | memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ |
| 359 | } |
| 360 | |
Heiko Carstens | eb608fb | 2012-09-05 13:26:11 +0200 | [diff] [blame] | 361 | #endif /* __ASSEMBLY__ */ |
| 362 | |
| 363 | #endif /* __ASM_S390_PROCESSOR_H */ |