Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PROCESSOR_H |
| 2 | #define _ASM_POWERPC_PROCESSOR_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2001 PPC 64 Team, IBM Corp |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 13 | #include <asm/reg.h> |
| 14 | |
Michael Neuling | c6e6771 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 15 | #ifdef CONFIG_VSX |
| 16 | #define TS_FPRWIDTH 2 |
Anton Blanchard | e156bd8 | 2013-09-23 12:04:37 +1000 | [diff] [blame] | 17 | |
| 18 | #ifdef __BIG_ENDIAN__ |
| 19 | #define TS_FPROFFSET 0 |
| 20 | #define TS_VSRLOWOFFSET 1 |
| 21 | #else |
| 22 | #define TS_FPROFFSET 1 |
| 23 | #define TS_VSRLOWOFFSET 0 |
| 24 | #endif |
| 25 | |
Michael Neuling | c6e6771 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 26 | #else |
Michael Neuling | 9c75a31 | 2008-06-26 17:07:48 +1000 | [diff] [blame] | 27 | #define TS_FPRWIDTH 1 |
Anton Blanchard | e156bd8 | 2013-09-23 12:04:37 +1000 | [diff] [blame] | 28 | #define TS_FPROFFSET 0 |
Michael Neuling | c6e6771 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 29 | #endif |
Michael Neuling | 9c75a31 | 2008-06-26 17:07:48 +1000 | [diff] [blame] | 30 | |
Haren Myneni | 9277924 | 2012-12-06 21:49:56 +0000 | [diff] [blame] | 31 | #ifdef CONFIG_PPC64 |
| 32 | /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */ |
| 33 | #define PPR_PRIORITY 3 |
| 34 | #ifdef __ASSEMBLY__ |
| 35 | #define INIT_PPR (PPR_PRIORITY << 50) |
| 36 | #else |
| 37 | #define INIT_PPR ((u64)PPR_PRIORITY << 50) |
| 38 | #endif /* __ASSEMBLY__ */ |
| 39 | #endif /* CONFIG_PPC64 */ |
| 40 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 41 | #ifndef __ASSEMBLY__ |
Christophe Leroy | 62b8426 | 2018-07-05 16:25:09 +0000 | [diff] [blame] | 42 | #include <linux/types.h> |
| 43 | #include <asm/thread_info.h> |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 44 | #include <asm/ptrace.h> |
Michael Neuling | 9422de3 | 2012-12-20 14:06:44 +0000 | [diff] [blame] | 45 | #include <asm/hw_breakpoint.h> |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 46 | |
Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 47 | /* We do _not_ want to define new machine types at all, those must die |
| 48 | * in favor of using the device-tree |
| 49 | * -- BenH. |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 50 | */ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 51 | |
Paul Bolle | 933ee71 | 2013-03-27 00:47:03 +0000 | [diff] [blame] | 52 | /* PREP sub-platform types. Unused */ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 53 | #define _PREP_Motorola 0x01 /* motorola prep */ |
| 54 | #define _PREP_Firm 0x02 /* firmworks prep */ |
| 55 | #define _PREP_IBM 0x00 /* ibm prep */ |
| 56 | #define _PREP_Bull 0x03 /* bull prep */ |
| 57 | |
Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 58 | /* CHRP sub-platform types. These are arbitrary */ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 59 | #define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */ |
| 60 | #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */ |
| 61 | #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */ |
Benjamin Herrenschmidt | 26c5032 | 2006-07-04 14:16:28 +1000 | [diff] [blame] | 62 | #define _CHRP_briq 0x07 /* TotalImpact's briQ */ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 63 | |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 64 | #if defined(__KERNEL__) && defined(CONFIG_PPC32) |
| 65 | |
| 66 | extern int _chrp_type; |
Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 67 | |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 68 | #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */ |
| 69 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 70 | /* |
| 71 | * Default implementation of macro that returns current |
| 72 | * instruction pointer ("program counter"). |
| 73 | */ |
| 74 | #define current_text_addr() ({ __label__ _l; _l: &&_l;}) |
| 75 | |
| 76 | /* Macros for adjusting thread priority (hardware multi-threading) */ |
| 77 | #define HMT_very_low() asm volatile("or 31,31,31 # very low priority") |
| 78 | #define HMT_low() asm volatile("or 1,1,1 # low priority") |
| 79 | #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") |
| 80 | #define HMT_medium() asm volatile("or 2,2,2 # medium priority") |
| 81 | #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") |
| 82 | #define HMT_high() asm volatile("or 3,3,3 # high priority") |
| 83 | |
| 84 | #ifdef __KERNEL__ |
| 85 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 86 | struct task_struct; |
| 87 | void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); |
| 88 | void release_thread(struct task_struct *); |
| 89 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 90 | #ifdef CONFIG_PPC32 |
Rune Torgersen | 7c4f10b | 2008-05-24 01:59:15 +1000 | [diff] [blame] | 91 | |
| 92 | #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START |
| 93 | #error User TASK_SIZE overlaps with KERNEL_START address |
| 94 | #endif |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 95 | #define TASK_SIZE (CONFIG_TASK_SIZE) |
| 96 | |
| 97 | /* This decides where the kernel will search for a free chunk of vm |
| 98 | * space during mmap's. |
| 99 | */ |
| 100 | #define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) |
| 101 | #endif |
| 102 | |
| 103 | #ifdef CONFIG_PPC64 |
Aneesh Kumar K.V | f6eedbb | 2017-03-22 09:06:57 +0530 | [diff] [blame] | 104 | /* |
| 105 | * 64-bit user address space can have multiple limits |
| 106 | * For now supported values are: |
| 107 | */ |
| 108 | #define TASK_SIZE_64TB (0x0000400000000000UL) |
| 109 | #define TASK_SIZE_128TB (0x0000800000000000UL) |
| 110 | #define TASK_SIZE_512TB (0x0002000000000000UL) |
Aneesh Kumar K.V | c2b4d8b | 2018-03-26 15:34:49 +0530 | [diff] [blame] | 111 | #define TASK_SIZE_1PB (0x0004000000000000UL) |
| 112 | #define TASK_SIZE_2PB (0x0008000000000000UL) |
| 113 | /* |
| 114 | * With 52 bits in the address we can support |
| 115 | * upto 4PB of range. |
| 116 | */ |
| 117 | #define TASK_SIZE_4PB (0x0010000000000000UL) |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 118 | |
Aneesh Kumar K.V | 92d9dfd | 2017-06-01 20:05:04 +0530 | [diff] [blame] | 119 | /* |
| 120 | * For now 512TB is only supported with book3s and 64K linux page size. |
| 121 | */ |
| 122 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) |
Aneesh Kumar K.V | f6eedbb | 2017-03-22 09:06:57 +0530 | [diff] [blame] | 123 | /* |
| 124 | * Max value currently used: |
| 125 | */ |
Aneesh Kumar K.V | c2b4d8b | 2018-03-26 15:34:49 +0530 | [diff] [blame] | 126 | #define TASK_SIZE_USER64 TASK_SIZE_4PB |
Aneesh Kumar K.V | 92d9dfd | 2017-06-01 20:05:04 +0530 | [diff] [blame] | 127 | #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 128 | #define TASK_CONTEXT_SIZE TASK_SIZE_512TB |
Aneesh Kumar K.V | f6eedbb | 2017-03-22 09:06:57 +0530 | [diff] [blame] | 129 | #else |
Aneesh Kumar K.V | 92d9dfd | 2017-06-01 20:05:04 +0530 | [diff] [blame] | 130 | #define TASK_SIZE_USER64 TASK_SIZE_64TB |
| 131 | #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 132 | /* |
| 133 | * We don't need to allocate extended context ids for 4K page size, because |
| 134 | * we limit the max effective address on this config to 64TB. |
| 135 | */ |
| 136 | #define TASK_CONTEXT_SIZE TASK_SIZE_64TB |
Aneesh Kumar K.V | f6eedbb | 2017-03-22 09:06:57 +0530 | [diff] [blame] | 137 | #endif |
| 138 | |
| 139 | /* |
| 140 | * 32-bit user address space is 4GB - 1 page |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 141 | * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT |
| 142 | */ |
| 143 | #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE)) |
| 144 | |
Dave Hansen | 8245525 | 2008-02-04 22:28:59 -0800 | [diff] [blame] | 145 | #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 146 | TASK_SIZE_USER32 : TASK_SIZE_USER64) |
Dave Hansen | 8245525 | 2008-02-04 22:28:59 -0800 | [diff] [blame] | 147 | #define TASK_SIZE TASK_SIZE_OF(current) |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 148 | /* This decides where the kernel will search for a free chunk of vm |
| 149 | * space during mmap's. |
| 150 | */ |
| 151 | #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) |
Aneesh Kumar K.V | 92d9dfd | 2017-06-01 20:05:04 +0530 | [diff] [blame] | 152 | #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4)) |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 153 | |
Denis Kirjanov | cab175f | 2010-08-27 03:49:11 +0000 | [diff] [blame] | 154 | #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 155 | TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) |
| 156 | #endif |
| 157 | |
Aneesh Kumar K.V | f4ea6dc | 2017-03-30 16:35:21 +0530 | [diff] [blame] | 158 | /* |
| 159 | * Initial task size value for user applications. For book3s 64 we start |
| 160 | * with 128TB and conditionally enable upto 512TB |
| 161 | */ |
| 162 | #ifdef CONFIG_PPC_BOOK3S_64 |
Aneesh Kumar K.V | 92d9dfd | 2017-06-01 20:05:04 +0530 | [diff] [blame] | 163 | #define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ |
| 164 | TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64) |
Aneesh Kumar K.V | f4ea6dc | 2017-03-30 16:35:21 +0530 | [diff] [blame] | 165 | #else |
| 166 | #define DEFAULT_MAP_WINDOW TASK_SIZE |
| 167 | #endif |
| 168 | |
David Howells | 922a70d | 2008-02-08 04:19:26 -0800 | [diff] [blame] | 169 | #ifdef __powerpc64__ |
| 170 | |
Aneesh Kumar K.V | 92d9dfd | 2017-06-01 20:05:04 +0530 | [diff] [blame] | 171 | #define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64 |
David Howells | 922a70d | 2008-02-08 04:19:26 -0800 | [diff] [blame] | 172 | #define STACK_TOP_USER32 TASK_SIZE_USER32 |
| 173 | |
Denis Kirjanov | cab175f | 2010-08-27 03:49:11 +0000 | [diff] [blame] | 174 | #define STACK_TOP (is_32bit_task() ? \ |
David Howells | 922a70d | 2008-02-08 04:19:26 -0800 | [diff] [blame] | 175 | STACK_TOP_USER32 : STACK_TOP_USER64) |
| 176 | |
Aneesh Kumar K.V | f4ea6dc | 2017-03-30 16:35:21 +0530 | [diff] [blame] | 177 | #define STACK_TOP_MAX TASK_SIZE_USER64 |
David Howells | 922a70d | 2008-02-08 04:19:26 -0800 | [diff] [blame] | 178 | |
| 179 | #else /* __powerpc64__ */ |
| 180 | |
| 181 | #define STACK_TOP TASK_SIZE |
| 182 | #define STACK_TOP_MAX STACK_TOP |
| 183 | |
| 184 | #endif /* __powerpc64__ */ |
David Howells | 922a70d | 2008-02-08 04:19:26 -0800 | [diff] [blame] | 185 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 186 | typedef struct { |
| 187 | unsigned long seg; |
| 188 | } mm_segment_t; |
| 189 | |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 190 | #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] |
Cyril Bur | 000ec28 | 2016-09-23 16:18:25 +1000 | [diff] [blame] | 191 | #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET] |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 192 | |
| 193 | /* FP and VSX 0-31 register set */ |
| 194 | struct thread_fp_state { |
| 195 | u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); |
| 196 | u64 fpscr; /* Floating point status */ |
| 197 | }; |
| 198 | |
| 199 | /* Complete AltiVec register set including VSCR */ |
| 200 | struct thread_vr_state { |
| 201 | vector128 vr[32] __attribute__((aligned(16))); |
| 202 | vector128 vscr __attribute__((aligned(16))); |
| 203 | }; |
Michael Neuling | 9c75a31 | 2008-06-26 17:07:48 +1000 | [diff] [blame] | 204 | |
Bharat Bhushan | 51ae8d4 | 2013-07-04 11:45:46 +0530 | [diff] [blame] | 205 | struct debug_reg { |
Dave Kleikamp | 99396ac | 2010-02-08 11:53:26 +0000 | [diff] [blame] | 206 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 207 | /* |
| 208 | * The following help to manage the use of Debug Control Registers |
| 209 | * om the BookE platforms. |
| 210 | */ |
Bharat Bhushan | d8899bb | 2013-05-22 09:50:58 +0530 | [diff] [blame] | 211 | uint32_t dbcr0; |
| 212 | uint32_t dbcr1; |
Dave Kleikamp | 99396ac | 2010-02-08 11:53:26 +0000 | [diff] [blame] | 213 | #ifdef CONFIG_BOOKE |
Bharat Bhushan | d8899bb | 2013-05-22 09:50:58 +0530 | [diff] [blame] | 214 | uint32_t dbcr2; |
Dave Kleikamp | 99396ac | 2010-02-08 11:53:26 +0000 | [diff] [blame] | 215 | #endif |
| 216 | /* |
| 217 | * The stored value of the DBSR register will be the value at the |
| 218 | * last debug interrupt. This register can only be read from the |
| 219 | * user (will never be written to) and has value while helping to |
| 220 | * describe the reason for the last debug trap. Torez |
| 221 | */ |
Bharat Bhushan | d8899bb | 2013-05-22 09:50:58 +0530 | [diff] [blame] | 222 | uint32_t dbsr; |
Dave Kleikamp | 99396ac | 2010-02-08 11:53:26 +0000 | [diff] [blame] | 223 | /* |
| 224 | * The following will contain addresses used by debug applications |
| 225 | * to help trace and trap on particular address locations. |
| 226 | * The bits in the Debug Control Registers above help define which |
| 227 | * of the following registers will contain valid data and/or addresses. |
| 228 | */ |
| 229 | unsigned long iac1; |
| 230 | unsigned long iac2; |
| 231 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
| 232 | unsigned long iac3; |
| 233 | unsigned long iac4; |
| 234 | #endif |
| 235 | unsigned long dac1; |
| 236 | unsigned long dac2; |
| 237 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
| 238 | unsigned long dvc1; |
| 239 | unsigned long dvc2; |
| 240 | #endif |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 241 | #endif |
Bharat Bhushan | 51ae8d4 | 2013-07-04 11:45:46 +0530 | [diff] [blame] | 242 | }; |
| 243 | |
| 244 | struct thread_struct { |
| 245 | unsigned long ksp; /* Kernel stack pointer */ |
Bharat Bhushan | 9579198 | 2013-06-26 11:12:22 +0530 | [diff] [blame] | 246 | |
Bharat Bhushan | 51ae8d4 | 2013-07-04 11:45:46 +0530 | [diff] [blame] | 247 | #ifdef CONFIG_PPC64 |
| 248 | unsigned long ksp_vsid; |
| 249 | #endif |
| 250 | struct pt_regs *regs; /* Pointer to saved register state */ |
Michael Ellerman | ba0635fc | 2018-05-14 23:03:15 +1000 | [diff] [blame] | 251 | mm_segment_t addr_limit; /* for get_fs() validation */ |
Bharat Bhushan | 51ae8d4 | 2013-07-04 11:45:46 +0530 | [diff] [blame] | 252 | #ifdef CONFIG_BOOKE |
| 253 | /* BookE base exception scratch space; align on cacheline */ |
| 254 | unsigned long normsave[8] ____cacheline_aligned; |
| 255 | #endif |
| 256 | #ifdef CONFIG_PPC32 |
| 257 | void *pgdir; /* root of page-table tree */ |
| 258 | unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ |
| 259 | #endif |
Bharat Bhushan | 9579198 | 2013-06-26 11:12:22 +0530 | [diff] [blame] | 260 | /* Debug Registers */ |
Bharat Bhushan | 51ae8d4 | 2013-07-04 11:45:46 +0530 | [diff] [blame] | 261 | struct debug_reg debug; |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 262 | struct thread_fp_state fp_state; |
Paul Mackerras | 1846196 | 2013-09-10 20:21:10 +1000 | [diff] [blame] | 263 | struct thread_fp_state *fp_save_area; |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 264 | int fpexc_mode; /* floating-point exception mode */ |
Paul Mackerras | e9370ae | 2006-06-07 16:15:39 +1000 | [diff] [blame] | 265 | unsigned int align_ctl; /* alignment handling control */ |
K.Prasad | 5aae8a5 | 2010-06-15 11:35:19 +0530 | [diff] [blame] | 266 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 267 | struct perf_event *ptrace_bps[HBP_NUM]; |
| 268 | /* |
| 269 | * Helps identify source of single-step exception and subsequent |
| 270 | * hw-breakpoint enablement |
| 271 | */ |
| 272 | struct perf_event *last_hit_ubp; |
| 273 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
Michael Neuling | 9422de3 | 2012-12-20 14:06:44 +0000 | [diff] [blame] | 274 | struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ |
Ananth N Mavinakayanahalli | 41ab526 | 2012-08-23 21:27:09 +0000 | [diff] [blame] | 275 | unsigned long trap_nr; /* last trap # on this thread */ |
Cyril Bur | 70fe3d9 | 2016-02-29 17:53:47 +1100 | [diff] [blame] | 276 | u8 load_fp; |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 277 | #ifdef CONFIG_ALTIVEC |
Cyril Bur | 70fe3d9 | 2016-02-29 17:53:47 +1100 | [diff] [blame] | 278 | u8 load_vec; |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 279 | struct thread_vr_state vr_state; |
Paul Mackerras | 1846196 | 2013-09-10 20:21:10 +1000 | [diff] [blame] | 280 | struct thread_vr_state *vr_save_area; |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 281 | unsigned long vrsave; |
| 282 | int used_vr; /* set if process has used altivec */ |
| 283 | #endif /* CONFIG_ALTIVEC */ |
Michael Neuling | c6e6771 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 284 | #ifdef CONFIG_VSX |
| 285 | /* VSR status */ |
Simon Guo | 71528d8 | 2016-03-25 01:12:21 +0800 | [diff] [blame] | 286 | int used_vsr; /* set if process has used VSX */ |
Michael Neuling | c6e6771 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 287 | #endif /* CONFIG_VSX */ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 288 | #ifdef CONFIG_SPE |
| 289 | unsigned long evr[32]; /* upper 32-bits of SPE regs */ |
| 290 | u64 acc; /* Accumulator */ |
| 291 | unsigned long spefscr; /* SPE & eFP status */ |
Joseph Myers | 640e922 | 2013-12-10 23:07:45 +0000 | [diff] [blame] | 292 | unsigned long spefscr_last; /* SPEFSCR value on last prctl |
| 293 | call or trap return */ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 294 | int used_spe; /* set if process has used spe */ |
| 295 | #endif /* CONFIG_SPE */ |
Michael Neuling | f4c3aff | 2013-02-13 16:21:31 +0000 | [diff] [blame] | 296 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Cyril Bur | 5d176f7 | 2016-09-14 18:02:16 +1000 | [diff] [blame] | 297 | u8 load_tm; |
Michael Neuling | f4c3aff | 2013-02-13 16:21:31 +0000 | [diff] [blame] | 298 | u64 tm_tfhar; /* Transaction fail handler addr */ |
| 299 | u64 tm_texasr; /* Transaction exception & summary */ |
| 300 | u64 tm_tfiar; /* Transaction fail instr address reg */ |
Michael Neuling | f4c3aff | 2013-02-13 16:21:31 +0000 | [diff] [blame] | 301 | struct pt_regs ckpt_regs; /* Checkpointed registers */ |
| 302 | |
Michael Neuling | 28e61cc | 2013-08-09 17:29:31 +1000 | [diff] [blame] | 303 | unsigned long tm_tar; |
| 304 | unsigned long tm_ppr; |
| 305 | unsigned long tm_dscr; |
| 306 | |
Michael Neuling | f4c3aff | 2013-02-13 16:21:31 +0000 | [diff] [blame] | 307 | /* |
Cyril Bur | dc31066 | 2016-09-23 16:18:24 +1000 | [diff] [blame] | 308 | * Checkpointed FP and VSX 0-31 register set. |
Michael Neuling | f4c3aff | 2013-02-13 16:21:31 +0000 | [diff] [blame] | 309 | * |
| 310 | * When a transaction is active/signalled/scheduled etc., *regs is the |
| 311 | * most recent set of/speculated GPRs with ckpt_regs being the older |
| 312 | * checkpointed regs to which we roll back if transaction aborts. |
| 313 | * |
Cyril Bur | dc31066 | 2016-09-23 16:18:24 +1000 | [diff] [blame] | 314 | * These are analogous to how ckpt_regs and pt_regs work |
Michael Neuling | f4c3aff | 2013-02-13 16:21:31 +0000 | [diff] [blame] | 315 | */ |
Cyril Bur | 000ec28 | 2016-09-23 16:18:25 +1000 | [diff] [blame] | 316 | struct thread_fp_state ckfp_state; /* Checkpointed FP state */ |
| 317 | struct thread_vr_state ckvr_state; /* Checkpointed VR state */ |
| 318 | unsigned long ckvrsave; /* Checkpointed VRSAVE */ |
Michael Neuling | f4c3aff | 2013-02-13 16:21:31 +0000 | [diff] [blame] | 319 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
Ram Pai | 06bb53b | 2018-01-18 17:50:31 -0800 | [diff] [blame] | 320 | #ifdef CONFIG_PPC_MEM_KEYS |
| 321 | unsigned long amr; |
| 322 | unsigned long iamr; |
| 323 | unsigned long uamor; |
| 324 | #endif |
Alexander Graf | 97e4925 | 2010-04-16 00:11:51 +0200 | [diff] [blame] | 325 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
| 326 | void* kvm_shadow_vcpu; /* KVM internal data */ |
| 327 | #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 328 | #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) |
| 329 | struct kvm_vcpu *kvm_vcpu; |
| 330 | #endif |
Alexey Kardashevskiy | efcac65 | 2011-03-02 15:18:48 +0000 | [diff] [blame] | 331 | #ifdef CONFIG_PPC64 |
| 332 | unsigned long dscr; |
Anton Blanchard | 152d523 | 2015-10-29 11:43:55 +1100 | [diff] [blame] | 333 | unsigned long fscr; |
Anshuman Khandual | d3cb06e | 2015-05-21 12:13:04 +0530 | [diff] [blame] | 334 | /* |
| 335 | * This member element dscr_inherit indicates that the process |
| 336 | * has explicitly attempted and changed the DSCR register value |
| 337 | * for itself. Hence kernel wont use the default CPU DSCR value |
| 338 | * contained in the PACA structure anymore during process context |
| 339 | * switch. Once this variable is set, this behaviour will also be |
| 340 | * inherited to all the children of this process from that point |
| 341 | * onwards. |
| 342 | */ |
Alexey Kardashevskiy | efcac65 | 2011-03-02 15:18:48 +0000 | [diff] [blame] | 343 | int dscr_inherit; |
Haren Myneni | 9277924 | 2012-12-06 21:49:56 +0000 | [diff] [blame] | 344 | unsigned long ppr; /* used to save/restore SMT priority */ |
Sukadev Bhattiprolu | ec233ed | 2017-11-07 18:23:53 -0800 | [diff] [blame] | 345 | unsigned long tidr; |
Alexey Kardashevskiy | efcac65 | 2011-03-02 15:18:48 +0000 | [diff] [blame] | 346 | #endif |
Ian Munsie | 2468dcf | 2013-02-07 15:46:58 +0000 | [diff] [blame] | 347 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 348 | unsigned long tar; |
Michael Ellerman | 9353374 | 2013-04-30 20:17:04 +0000 | [diff] [blame] | 349 | unsigned long ebbrr; |
| 350 | unsigned long ebbhr; |
| 351 | unsigned long bescr; |
Michael Ellerman | 59affcd | 2013-05-21 16:31:12 +0000 | [diff] [blame] | 352 | unsigned long siar; |
| 353 | unsigned long sdar; |
| 354 | unsigned long sier; |
Michael Ellerman | 59affcd | 2013-05-21 16:31:12 +0000 | [diff] [blame] | 355 | unsigned long mmcr2; |
Michael Ellerman | 330a1eb | 2013-06-28 18:15:16 +1000 | [diff] [blame] | 356 | unsigned mmcr0; |
Sukadev Bhattiprolu | 9d2a4d7 | 2017-11-07 18:23:54 -0800 | [diff] [blame] | 357 | |
Michael Ellerman | 330a1eb | 2013-06-28 18:15:16 +1000 | [diff] [blame] | 358 | unsigned used_ebb; |
Sukadev Bhattiprolu | 9d2a4d7 | 2017-11-07 18:23:54 -0800 | [diff] [blame] | 359 | unsigned int used_vas; |
Ian Munsie | 2468dcf | 2013-02-07 15:46:58 +0000 | [diff] [blame] | 360 | #endif |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 361 | }; |
| 362 | |
| 363 | #define ARCH_MIN_TASKALIGN 16 |
| 364 | |
| 365 | #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) |
Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 366 | #define INIT_SP_LIMIT \ |
| 367 | (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 368 | |
Liu Yu | 6a800f3 | 2008-10-28 11:50:21 +0800 | [diff] [blame] | 369 | #ifdef CONFIG_SPE |
Joseph Myers | 640e922 | 2013-12-10 23:07:45 +0000 | [diff] [blame] | 370 | #define SPEFSCR_INIT \ |
| 371 | .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \ |
| 372 | .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, |
Liu Yu | 6a800f3 | 2008-10-28 11:50:21 +0800 | [diff] [blame] | 373 | #else |
| 374 | #define SPEFSCR_INIT |
| 375 | #endif |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 376 | |
| 377 | #ifdef CONFIG_PPC32 |
| 378 | #define INIT_THREAD { \ |
| 379 | .ksp = INIT_SP, \ |
Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 380 | .ksp_limit = INIT_SP_LIMIT, \ |
Michael Ellerman | ba0635fc | 2018-05-14 23:03:15 +1000 | [diff] [blame] | 381 | .addr_limit = KERNEL_DS, \ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 382 | .pgdir = swapper_pg_dir, \ |
| 383 | .fpexc_mode = MSR_FE0 | MSR_FE1, \ |
Liu Yu | 6a800f3 | 2008-10-28 11:50:21 +0800 | [diff] [blame] | 384 | SPEFSCR_INIT \ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 385 | } |
| 386 | #else |
| 387 | #define INIT_THREAD { \ |
| 388 | .ksp = INIT_SP, \ |
| 389 | .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ |
Michael Ellerman | ba0635fc | 2018-05-14 23:03:15 +1000 | [diff] [blame] | 390 | .addr_limit = KERNEL_DS, \ |
Arnd Bergmann | ddf5f75 | 2006-06-20 02:30:33 +0200 | [diff] [blame] | 391 | .fpexc_mode = 0, \ |
Haren Myneni | 9277924 | 2012-12-06 21:49:56 +0000 | [diff] [blame] | 392 | .ppr = INIT_PPR, \ |
Michael Neuling | b57bd2d | 2016-06-09 12:31:08 +1000 | [diff] [blame] | 393 | .fscr = FSCR_TAR | FSCR_EBB \ |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 394 | } |
| 395 | #endif |
| 396 | |
Srinivasa Ds | e5093ff | 2008-07-08 00:22:27 +1000 | [diff] [blame] | 397 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) |
| 398 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 399 | unsigned long get_wchan(struct task_struct *p); |
| 400 | |
| 401 | #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) |
| 402 | #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) |
| 403 | |
| 404 | /* Get/set floating-point exception mode */ |
| 405 | #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr)) |
| 406 | #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val)) |
| 407 | |
| 408 | extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); |
| 409 | extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); |
| 410 | |
Paul Mackerras | fab5db9 | 2006-06-07 16:14:40 +1000 | [diff] [blame] | 411 | #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr)) |
| 412 | #define SET_ENDIAN(tsk, val) set_endian((tsk), (val)) |
| 413 | |
| 414 | extern int get_endian(struct task_struct *tsk, unsigned long adr); |
| 415 | extern int set_endian(struct task_struct *tsk, unsigned int val); |
| 416 | |
Paul Mackerras | e9370ae | 2006-06-07 16:15:39 +1000 | [diff] [blame] | 417 | #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr)) |
| 418 | #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) |
| 419 | |
| 420 | extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); |
| 421 | extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); |
| 422 | |
Paul Mackerras | 1846196 | 2013-09-10 20:21:10 +1000 | [diff] [blame] | 423 | extern void load_fp_state(struct thread_fp_state *fp); |
| 424 | extern void store_fp_state(struct thread_fp_state *fp); |
| 425 | extern void load_vr_state(struct thread_vr_state *vr); |
| 426 | extern void store_vr_state(struct thread_vr_state *vr); |
| 427 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 428 | static inline unsigned int __unpack_fe01(unsigned long msr_bits) |
| 429 | { |
| 430 | return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); |
| 431 | } |
| 432 | |
| 433 | static inline unsigned long __pack_fe01(unsigned int fpmode) |
| 434 | { |
| 435 | return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); |
| 436 | } |
| 437 | |
| 438 | #ifdef CONFIG_PPC64 |
| 439 | #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) |
Nicholas Piggin | ede8e2b | 2017-06-06 23:08:31 +1000 | [diff] [blame] | 440 | |
| 441 | #define spin_begin() HMT_low() |
| 442 | |
| 443 | #define spin_cpu_relax() barrier() |
| 444 | |
| 445 | #define spin_cpu_yield() spin_cpu_relax() |
| 446 | |
| 447 | #define spin_end() HMT_medium() |
| 448 | |
| 449 | #define spin_until_cond(cond) \ |
| 450 | do { \ |
| 451 | if (unlikely(!(cond))) { \ |
| 452 | spin_begin(); \ |
| 453 | do { \ |
| 454 | spin_cpu_relax(); \ |
| 455 | } while (!(cond)); \ |
| 456 | spin_end(); \ |
| 457 | } \ |
| 458 | } while (0) |
| 459 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 460 | #else |
| 461 | #define cpu_relax() barrier() |
| 462 | #endif |
| 463 | |
Anton Blanchard | 2f25194 | 2006-03-27 11:46:18 +1100 | [diff] [blame] | 464 | /* Check that a certain kernel stack pointer is valid in task_struct p */ |
| 465 | int validate_sp(unsigned long sp, struct task_struct *p, |
| 466 | unsigned long nbytes); |
| 467 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 468 | /* |
| 469 | * Prefetch macros. |
| 470 | */ |
| 471 | #define ARCH_HAS_PREFETCH |
| 472 | #define ARCH_HAS_PREFETCHW |
| 473 | #define ARCH_HAS_SPINLOCK_PREFETCH |
| 474 | |
| 475 | static inline void prefetch(const void *x) |
| 476 | { |
| 477 | if (unlikely(!x)) |
| 478 | return; |
| 479 | |
| 480 | __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); |
| 481 | } |
| 482 | |
| 483 | static inline void prefetchw(const void *x) |
| 484 | { |
| 485 | if (unlikely(!x)) |
| 486 | return; |
| 487 | |
| 488 | __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); |
| 489 | } |
| 490 | |
| 491 | #define spin_lock_prefetch(x) prefetchw(x) |
| 492 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 493 | #define HAVE_ARCH_PICK_MMAP_LAYOUT |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 494 | |
Josh Boyer | efbda86 | 2009-03-25 06:23:59 +0000 | [diff] [blame] | 495 | #ifdef CONFIG_PPC64 |
Michael Neuling | 2b3f8e8 | 2013-05-26 18:09:41 +0000 | [diff] [blame] | 496 | static inline unsigned long get_clean_sp(unsigned long sp, int is_32) |
Josh Boyer | efbda86 | 2009-03-25 06:23:59 +0000 | [diff] [blame] | 497 | { |
Josh Boyer | efbda86 | 2009-03-25 06:23:59 +0000 | [diff] [blame] | 498 | if (is_32) |
Michael Neuling | 2b3f8e8 | 2013-05-26 18:09:41 +0000 | [diff] [blame] | 499 | return sp & 0x0ffffffffUL; |
Josh Boyer | efbda86 | 2009-03-25 06:23:59 +0000 | [diff] [blame] | 500 | return sp; |
| 501 | } |
| 502 | #else |
Michael Neuling | 2b3f8e8 | 2013-05-26 18:09:41 +0000 | [diff] [blame] | 503 | static inline unsigned long get_clean_sp(unsigned long sp, int is_32) |
Josh Boyer | efbda86 | 2009-03-25 06:23:59 +0000 | [diff] [blame] | 504 | { |
Michael Neuling | 2b3f8e8 | 2013-05-26 18:09:41 +0000 | [diff] [blame] | 505 | return sp; |
Josh Boyer | efbda86 | 2009-03-25 06:23:59 +0000 | [diff] [blame] | 506 | } |
| 507 | #endif |
| 508 | |
Deepthi Dharwar | e8bb3e0 | 2011-11-30 02:47:03 +0000 | [diff] [blame] | 509 | extern unsigned long cpuidle_disable; |
Deepthi Dharwar | 771dae8 | 2011-11-30 02:46:31 +0000 | [diff] [blame] | 510 | enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; |
| 511 | |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 512 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
Nicholas Piggin | 2201f99 | 2017-06-13 23:05:45 +1000 | [diff] [blame] | 513 | extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/ |
| 514 | extern void power7_idle_type(unsigned long type); |
| 515 | extern unsigned long power9_idle_stop(unsigned long psscr_val); |
Nicholas Piggin | 3d4fbff | 2017-11-18 00:08:05 +1000 | [diff] [blame] | 516 | extern unsigned long power9_offline_stop(unsigned long psscr_val); |
Nicholas Piggin | 2201f99 | 2017-06-13 23:05:45 +1000 | [diff] [blame] | 517 | extern void power9_idle_type(unsigned long stop_psscr_val, |
| 518 | unsigned long stop_psscr_mask); |
Shreyas B. Prabhu | bcef83a | 2016-07-08 11:50:49 +0530 | [diff] [blame] | 519 | |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 520 | extern void flush_instruction_cache(void); |
| 521 | extern void hard_reset_now(void); |
| 522 | extern void poweroff_now(void); |
| 523 | extern int fix_alignment(struct pt_regs *); |
| 524 | extern void cvt_fd(float *from, double *to); |
| 525 | extern void cvt_df(double *from, float *to); |
| 526 | extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); |
| 527 | |
| 528 | #ifdef CONFIG_PPC64 |
| 529 | /* |
| 530 | * We handle most unaligned accesses in hardware. On the other hand |
| 531 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does |
| 532 | * powers of 2 writes until it reaches sufficient alignment). |
| 533 | * |
| 534 | * Based on this we disable the IP header alignment in network drivers. |
| 535 | */ |
| 536 | #define NET_IP_ALIGN 0 |
| 537 | #endif |
| 538 | |
Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 539 | #endif /* __KERNEL__ */ |
| 540 | #endif /* __ASSEMBLY__ */ |
| 541 | #endif /* _ASM_POWERPC_PROCESSOR_H */ |