David 'Digit' Turner | bacb7c9 | 2014-04-07 18:49:00 +0200 | [diff] [blame] | 1 | /* |
| 2 | * x86 SMM helpers |
| 3 | * |
| 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "cpu.h" |
| 21 | #include "helper.h" |
| 22 | |
| 23 | /* SMM support */ |
| 24 | |
| 25 | #if defined(CONFIG_USER_ONLY) |
| 26 | |
| 27 | void do_smm_enter(CPUArchState *env1) |
| 28 | { |
| 29 | } |
| 30 | |
| 31 | void helper_rsm(CPUX86State *env) |
| 32 | { |
| 33 | } |
| 34 | |
| 35 | #else |
| 36 | |
| 37 | #ifdef TARGET_X86_64 |
| 38 | #define SMM_REVISION_ID 0x00020064 |
| 39 | #else |
| 40 | #define SMM_REVISION_ID 0x00020000 |
| 41 | #endif |
| 42 | |
| 43 | void do_smm_enter(CPUArchState *env) |
| 44 | { |
| 45 | target_ulong sm_state; |
| 46 | SegmentCache *dt; |
| 47 | int i, offset; |
| 48 | |
| 49 | qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); |
| 50 | log_cpu_state_mask(CPU_LOG_INT, ENV_GET_CPU(env), X86_DUMP_CCOP); |
| 51 | |
| 52 | env->hflags |= HF_SMM_MASK; |
| 53 | cpu_smm_update(env); |
| 54 | |
| 55 | sm_state = env->smbase + 0x8000; |
| 56 | |
| 57 | #ifdef TARGET_X86_64 |
| 58 | for(i = 0; i < 6; i++) { |
| 59 | dt = &env->segs[i]; |
| 60 | offset = 0x7e00 + i * 16; |
| 61 | stw_phys(sm_state + offset, dt->selector); |
| 62 | stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); |
| 63 | stl_phys(sm_state + offset + 4, dt->limit); |
| 64 | stq_phys(sm_state + offset + 8, dt->base); |
| 65 | } |
| 66 | |
| 67 | stq_phys(sm_state + 0x7e68, env->gdt.base); |
| 68 | stl_phys(sm_state + 0x7e64, env->gdt.limit); |
| 69 | |
| 70 | stw_phys(sm_state + 0x7e70, env->ldt.selector); |
| 71 | stq_phys(sm_state + 0x7e78, env->ldt.base); |
| 72 | stl_phys(sm_state + 0x7e74, env->ldt.limit); |
| 73 | stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); |
| 74 | |
| 75 | stq_phys(sm_state + 0x7e88, env->idt.base); |
| 76 | stl_phys(sm_state + 0x7e84, env->idt.limit); |
| 77 | |
| 78 | stw_phys(sm_state + 0x7e90, env->tr.selector); |
| 79 | stq_phys(sm_state + 0x7e98, env->tr.base); |
| 80 | stl_phys(sm_state + 0x7e94, env->tr.limit); |
| 81 | stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); |
| 82 | |
| 83 | stq_phys(sm_state + 0x7ed0, env->efer); |
| 84 | |
| 85 | stq_phys(sm_state + 0x7ff8, EAX); |
| 86 | stq_phys(sm_state + 0x7ff0, ECX); |
| 87 | stq_phys(sm_state + 0x7fe8, EDX); |
| 88 | stq_phys(sm_state + 0x7fe0, EBX); |
| 89 | stq_phys(sm_state + 0x7fd8, ESP); |
| 90 | stq_phys(sm_state + 0x7fd0, EBP); |
| 91 | stq_phys(sm_state + 0x7fc8, ESI); |
| 92 | stq_phys(sm_state + 0x7fc0, EDI); |
| 93 | for(i = 8; i < 16; i++) |
| 94 | stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]); |
| 95 | stq_phys(sm_state + 0x7f78, env->eip); |
| 96 | stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env)); |
| 97 | stl_phys(sm_state + 0x7f68, env->dr[6]); |
| 98 | stl_phys(sm_state + 0x7f60, env->dr[7]); |
| 99 | |
| 100 | stl_phys(sm_state + 0x7f48, env->cr[4]); |
| 101 | stl_phys(sm_state + 0x7f50, env->cr[3]); |
| 102 | stl_phys(sm_state + 0x7f58, env->cr[0]); |
| 103 | |
| 104 | stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); |
| 105 | stl_phys(sm_state + 0x7f00, env->smbase); |
| 106 | #else |
| 107 | stl_phys(sm_state + 0x7ffc, env->cr[0]); |
| 108 | stl_phys(sm_state + 0x7ff8, env->cr[3]); |
| 109 | stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env)); |
| 110 | stl_phys(sm_state + 0x7ff0, env->eip); |
| 111 | stl_phys(sm_state + 0x7fec, EDI); |
| 112 | stl_phys(sm_state + 0x7fe8, ESI); |
| 113 | stl_phys(sm_state + 0x7fe4, EBP); |
| 114 | stl_phys(sm_state + 0x7fe0, ESP); |
| 115 | stl_phys(sm_state + 0x7fdc, EBX); |
| 116 | stl_phys(sm_state + 0x7fd8, EDX); |
| 117 | stl_phys(sm_state + 0x7fd4, ECX); |
| 118 | stl_phys(sm_state + 0x7fd0, EAX); |
| 119 | stl_phys(sm_state + 0x7fcc, env->dr[6]); |
| 120 | stl_phys(sm_state + 0x7fc8, env->dr[7]); |
| 121 | |
| 122 | stl_phys(sm_state + 0x7fc4, env->tr.selector); |
| 123 | stl_phys(sm_state + 0x7f64, env->tr.base); |
| 124 | stl_phys(sm_state + 0x7f60, env->tr.limit); |
| 125 | stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); |
| 126 | |
| 127 | stl_phys(sm_state + 0x7fc0, env->ldt.selector); |
| 128 | stl_phys(sm_state + 0x7f80, env->ldt.base); |
| 129 | stl_phys(sm_state + 0x7f7c, env->ldt.limit); |
| 130 | stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); |
| 131 | |
| 132 | stl_phys(sm_state + 0x7f74, env->gdt.base); |
| 133 | stl_phys(sm_state + 0x7f70, env->gdt.limit); |
| 134 | |
| 135 | stl_phys(sm_state + 0x7f58, env->idt.base); |
| 136 | stl_phys(sm_state + 0x7f54, env->idt.limit); |
| 137 | |
| 138 | for(i = 0; i < 6; i++) { |
| 139 | dt = &env->segs[i]; |
| 140 | if (i < 3) |
| 141 | offset = 0x7f84 + i * 12; |
| 142 | else |
| 143 | offset = 0x7f2c + (i - 3) * 12; |
| 144 | stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector); |
| 145 | stl_phys(sm_state + offset + 8, dt->base); |
| 146 | stl_phys(sm_state + offset + 4, dt->limit); |
| 147 | stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff); |
| 148 | } |
| 149 | stl_phys(sm_state + 0x7f14, env->cr[4]); |
| 150 | |
| 151 | stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); |
| 152 | stl_phys(sm_state + 0x7ef8, env->smbase); |
| 153 | #endif |
| 154 | /* init SMM cpu state */ |
| 155 | |
| 156 | #ifdef TARGET_X86_64 |
| 157 | cpu_load_efer(env, 0); |
| 158 | #endif |
| 159 | cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
| 160 | env->eip = 0x00008000; |
| 161 | cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, |
| 162 | 0xffffffff, 0); |
| 163 | cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0); |
| 164 | cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0); |
| 165 | cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0); |
| 166 | cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); |
| 167 | cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); |
| 168 | |
| 169 | cpu_x86_update_cr0(env, |
| 170 | env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); |
| 171 | cpu_x86_update_cr4(env, 0); |
| 172 | env->dr[7] = 0x00000400; |
| 173 | CC_OP = CC_OP_EFLAGS; |
| 174 | } |
| 175 | |
| 176 | void helper_rsm(CPUX86State *env) |
| 177 | { |
| 178 | target_ulong sm_state; |
| 179 | int i, offset; |
| 180 | uint32_t val; |
| 181 | |
| 182 | sm_state = env->smbase + 0x8000; |
| 183 | #ifdef TARGET_X86_64 |
| 184 | cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0)); |
| 185 | |
| 186 | for(i = 0; i < 6; i++) { |
| 187 | offset = 0x7e00 + i * 16; |
| 188 | cpu_x86_load_seg_cache(env, i, |
| 189 | lduw_phys(sm_state + offset), |
| 190 | ldq_phys(sm_state + offset + 8), |
| 191 | ldl_phys(sm_state + offset + 4), |
| 192 | (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8); |
| 193 | } |
| 194 | |
| 195 | env->gdt.base = ldq_phys(sm_state + 0x7e68); |
| 196 | env->gdt.limit = ldl_phys(sm_state + 0x7e64); |
| 197 | |
| 198 | env->ldt.selector = lduw_phys(sm_state + 0x7e70); |
| 199 | env->ldt.base = ldq_phys(sm_state + 0x7e78); |
| 200 | env->ldt.limit = ldl_phys(sm_state + 0x7e74); |
| 201 | env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; |
| 202 | |
| 203 | env->idt.base = ldq_phys(sm_state + 0x7e88); |
| 204 | env->idt.limit = ldl_phys(sm_state + 0x7e84); |
| 205 | |
| 206 | env->tr.selector = lduw_phys(sm_state + 0x7e90); |
| 207 | env->tr.base = ldq_phys(sm_state + 0x7e98); |
| 208 | env->tr.limit = ldl_phys(sm_state + 0x7e94); |
| 209 | env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; |
| 210 | |
| 211 | EAX = ldq_phys(sm_state + 0x7ff8); |
| 212 | ECX = ldq_phys(sm_state + 0x7ff0); |
| 213 | EDX = ldq_phys(sm_state + 0x7fe8); |
| 214 | EBX = ldq_phys(sm_state + 0x7fe0); |
| 215 | ESP = ldq_phys(sm_state + 0x7fd8); |
| 216 | EBP = ldq_phys(sm_state + 0x7fd0); |
| 217 | ESI = ldq_phys(sm_state + 0x7fc8); |
| 218 | EDI = ldq_phys(sm_state + 0x7fc0); |
| 219 | for(i = 8; i < 16; i++) |
| 220 | env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8); |
| 221 | env->eip = ldq_phys(sm_state + 0x7f78); |
| 222 | cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70), |
| 223 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
| 224 | env->dr[6] = ldl_phys(sm_state + 0x7f68); |
| 225 | env->dr[7] = ldl_phys(sm_state + 0x7f60); |
| 226 | |
| 227 | cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48)); |
| 228 | cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50)); |
| 229 | cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58)); |
| 230 | |
| 231 | val = ldl_phys(sm_state + 0x7efc); /* revision ID */ |
| 232 | if (val & 0x20000) { |
| 233 | env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff; |
| 234 | } |
| 235 | #else |
| 236 | cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc)); |
| 237 | cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8)); |
| 238 | cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4), |
| 239 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
| 240 | env->eip = ldl_phys(sm_state + 0x7ff0); |
| 241 | EDI = ldl_phys(sm_state + 0x7fec); |
| 242 | ESI = ldl_phys(sm_state + 0x7fe8); |
| 243 | EBP = ldl_phys(sm_state + 0x7fe4); |
| 244 | ESP = ldl_phys(sm_state + 0x7fe0); |
| 245 | EBX = ldl_phys(sm_state + 0x7fdc); |
| 246 | EDX = ldl_phys(sm_state + 0x7fd8); |
| 247 | ECX = ldl_phys(sm_state + 0x7fd4); |
| 248 | EAX = ldl_phys(sm_state + 0x7fd0); |
| 249 | env->dr[6] = ldl_phys(sm_state + 0x7fcc); |
| 250 | env->dr[7] = ldl_phys(sm_state + 0x7fc8); |
| 251 | |
| 252 | env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff; |
| 253 | env->tr.base = ldl_phys(sm_state + 0x7f64); |
| 254 | env->tr.limit = ldl_phys(sm_state + 0x7f60); |
| 255 | env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; |
| 256 | |
| 257 | env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; |
| 258 | env->ldt.base = ldl_phys(sm_state + 0x7f80); |
| 259 | env->ldt.limit = ldl_phys(sm_state + 0x7f7c); |
| 260 | env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; |
| 261 | |
| 262 | env->gdt.base = ldl_phys(sm_state + 0x7f74); |
| 263 | env->gdt.limit = ldl_phys(sm_state + 0x7f70); |
| 264 | |
| 265 | env->idt.base = ldl_phys(sm_state + 0x7f58); |
| 266 | env->idt.limit = ldl_phys(sm_state + 0x7f54); |
| 267 | |
| 268 | for(i = 0; i < 6; i++) { |
| 269 | if (i < 3) |
| 270 | offset = 0x7f84 + i * 12; |
| 271 | else |
| 272 | offset = 0x7f2c + (i - 3) * 12; |
| 273 | cpu_x86_load_seg_cache(env, i, |
| 274 | ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff, |
| 275 | ldl_phys(sm_state + offset + 8), |
| 276 | ldl_phys(sm_state + offset + 4), |
| 277 | (ldl_phys(sm_state + offset) & 0xf0ff) << 8); |
| 278 | } |
| 279 | cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14)); |
| 280 | |
| 281 | val = ldl_phys(sm_state + 0x7efc); /* revision ID */ |
| 282 | if (val & 0x20000) { |
| 283 | env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff; |
| 284 | } |
| 285 | #endif |
| 286 | CC_OP = CC_OP_EFLAGS; |
| 287 | env->hflags &= ~HF_SMM_MASK; |
| 288 | cpu_smm_update(env); |
| 289 | |
| 290 | qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); |
| 291 | log_cpu_state_mask(CPU_LOG_INT, ENV_GET_CPU(env), X86_DUMP_CCOP); |
| 292 | } |
| 293 | |
| 294 | #endif /* !CONFIG_USER_ONLY */ |