blob: 77d5c4368204038b3350f5094f2c20410c85bf92 [file] [log] [blame]
Armando Montanez5104cd62019-12-10 14:36:43 -08001// Copyright 2019 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14
Armando Montanez356bf972020-06-04 10:35:55 -070015#include "pw_cpu_exception/entry.h"
Armando Montanez5104cd62019-12-10 14:36:43 -080016
Armando Montanez356bf972020-06-04 10:35:55 -070017#include <cstdint>
18#include <cstring>
19
20#include "pw_cpu_exception/handler.h"
Armando Montanez5104cd62019-12-10 14:36:43 -080021#include "pw_cpu_exception_armv7m/cpu_state.h"
22#include "pw_preprocessor/compiler.h"
23
24namespace pw::cpu_exception {
25namespace {
26
27// CMSIS/Cortex-M/ARMv7 related constants.
28// These values are from the ARMv7-M Architecture Reference Manual DDI 0403E.b.
29// https://static.docs.arm.com/ddi0403/e/DDI0403E_B_armv7m_arm.pdf
30
31// Masks for individual bits of CFSR. (ARMv7-M Section B3.2.15)
32constexpr uint32_t kMemFaultStart = 0x1u;
33constexpr uint32_t kMStkErrMask = kMemFaultStart << 4;
34constexpr uint32_t kBusFaultStart = 0x1u << 8;
35constexpr uint32_t kStkErrMask = kBusFaultStart << 4;
36
37// Bit masks for an exception return value. (ARMv7-M Section B1.5.8)
38constexpr uint32_t kExcReturnStackMask = (0x1u << 2);
39constexpr uint32_t kExcReturnBasicFrameMask = (0x1u << 4);
40
41// Memory mapped registers. (ARMv7-M Section B3.2.2, Table B3-4)
Armando Montanez5104cd62019-12-10 14:36:43 -080042volatile uint32_t& arm_v7m_cfsr =
43 *reinterpret_cast<volatile uint32_t*>(0xE000ED28u);
44volatile uint32_t& arm_v7m_mmfar =
45 *reinterpret_cast<volatile uint32_t*>(0xE000ED34u);
46volatile uint32_t& arm_v7m_bfar =
47 *reinterpret_cast<volatile uint32_t*>(0xE000ED38u);
Armando Montanez4159d1e2020-09-10 12:08:49 -070048volatile uint32_t& arm_v7m_icsr =
49 *reinterpret_cast<volatile uint32_t*>(0xE000ED04u);
50volatile uint32_t& arm_v7m_hfsr =
51 *reinterpret_cast<volatile uint32_t*>(0xE000ED2Cu);
52volatile uint32_t& arm_v7m_shcsr =
53 *reinterpret_cast<volatile uint32_t*>(0xE000ED24u);
Armando Montanez5104cd62019-12-10 14:36:43 -080054
55// If the CPU fails to capture some registers, the captured struct members will
56// be populated with this value. The only registers that this value should be
57// loaded into are pc, lr, and psr when the CPU fails to push an exception
58// context frame.
59//
60// 0xFFFFFFFF is an illegal lr value, which is why it was selected for this
61// purpose. pc and psr values of 0xFFFFFFFF are dubious too, so this constant
62// is clear enough at expressing that the registers weren't properly captured.
63constexpr uint32_t kInvalidRegisterValue = 0xFFFFFFFF;
64
65// Checks exc_return in the captured CPU state to determine which stack pointer
66// was in use prior to entering the exception handler.
Armando Montanez356bf972020-06-04 10:35:55 -070067bool PspWasActive(const pw_CpuExceptionState& cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -080068 return cpu_state.extended.exc_return & kExcReturnStackMask;
69}
70
71// Checks exc_return to determine if FPU state was pushed to the stack in
72// addition to the base CPU context frame.
Armando Montanez356bf972020-06-04 10:35:55 -070073bool FpuStateWasPushed(const pw_CpuExceptionState& cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -080074 return !(cpu_state.extended.exc_return & kExcReturnBasicFrameMask);
75}
76
77// If the CPU successfully pushed context on exception, copy it into cpu_state.
78//
79// For more information see (See ARMv7-M Section B1.5.11, derived exceptions
80// on exception entry).
Armando Montanez356bf972020-06-04 10:35:55 -070081void CloneBaseRegistersFromPsp(pw_CpuExceptionState* cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -080082 // If CPU succeeded in pushing context to PSP, copy it to the MSP.
83 if (!(cpu_state->extended.cfsr & kStkErrMask) &&
84 !(cpu_state->extended.cfsr & kMStkErrMask)) {
85 // TODO(amontanez): {r0-r3,r12} are captured in pw_CpuExceptionEntry(),
86 // so this only really needs to copy pc, lr, and psr. Could
87 // (possibly) improve speed, but would add marginally more
88 // complexity.
89 std::memcpy(&cpu_state->base,
90 reinterpret_cast<void*>(cpu_state->extended.psp),
91 sizeof(ArmV7mFaultRegisters));
92 } else {
93 // If CPU context wasn't pushed to stack on exception entry, we can't
94 // recover psr, lr, and pc from exception-time. Make these values clearly
95 // invalid.
96 cpu_state->base.lr = kInvalidRegisterValue;
97 cpu_state->base.pc = kInvalidRegisterValue;
98 cpu_state->base.psr = kInvalidRegisterValue;
99 }
100}
101
102// If the CPU successfully pushed context on exception, restore it from
103// cpu_state. Otherwise, don't attempt to restore state.
104//
105// For more information see (See ARMv7-M Section B1.5.11, derived exceptions
106// on exception entry).
Armando Montanez356bf972020-06-04 10:35:55 -0700107void RestoreBaseRegistersToPsp(pw_CpuExceptionState* cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -0800108 // If CPU succeeded in pushing context to PSP on exception entry, restore the
109 // contents of cpu_state to the CPU-pushed register frame so the CPU can
110 // continue. Otherwise, don't attempt as we'll likely end up in an escalated
111 // hard fault.
112 if (!(cpu_state->extended.cfsr & kStkErrMask) &&
113 !(cpu_state->extended.cfsr & kMStkErrMask)) {
114 std::memcpy(reinterpret_cast<void*>(cpu_state->extended.psp),
115 &cpu_state->base,
116 sizeof(ArmV7mFaultRegisters));
117 }
118}
119
120// Determines the size of the CPU-pushed context frame.
Armando Montanez356bf972020-06-04 10:35:55 -0700121uint32_t CpuContextSize(const pw_CpuExceptionState& cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -0800122 uint32_t cpu_context_size = sizeof(ArmV7mFaultRegisters);
123 if (FpuStateWasPushed(cpu_state)) {
124 cpu_context_size += sizeof(ArmV7mFaultRegistersFpu);
125 }
126 if (cpu_state.base.psr & kPsrExtraStackAlignBit) {
127 // Account for the extra 4-bytes the processor
128 // added to keep the stack pointer 8-byte aligned
129 cpu_context_size += 4;
130 }
131
132 return cpu_context_size;
133}
134
135// On exception entry, the Program Stack Pointer is patched to reflect the state
136// at exception-time. On exception return, it is restored to the appropriate
137// location. This calculates the delta that is used for these patch operations.
Armando Montanez356bf972020-06-04 10:35:55 -0700138uint32_t CalculatePspDelta(const pw_CpuExceptionState& cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -0800139 // If CPU context was not pushed to program stack (because program stack
140 // wasn't in use, or an error occurred when pushing context), the PSP doesn't
141 // need to be shifted.
142 if (!PspWasActive(cpu_state) || (cpu_state.extended.cfsr & kStkErrMask) ||
143 (cpu_state.extended.cfsr & kMStkErrMask)) {
144 return 0;
145 }
146
147 return CpuContextSize(cpu_state);
148}
149
150// On exception entry, the Main Stack Pointer is patched to reflect the state
151// at exception-time. On exception return, it is restored to the appropriate
152// location. This calculates the delta that is used for these patch operations.
Armando Montanez356bf972020-06-04 10:35:55 -0700153uint32_t CalculateMspDelta(const pw_CpuExceptionState& cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -0800154 if (PspWasActive(cpu_state)) {
155 // TODO(amontanez): Since FPU state isn't captured at this time, we ignore
156 // it when patching MSP. To add FPU capture support,
157 // delete this if block as CpuContextSize() will include
158 // FPU context size in the calculation.
159 return sizeof(ArmV7mFaultRegisters) + sizeof(ArmV7mExtraRegisters);
160 }
161
162 return CpuContextSize(cpu_state) + sizeof(ArmV7mExtraRegisters);
163}
164
165} // namespace
166
167extern "C" {
168
169// Collect remaining CPU state (memory mapped registers), populate memory mapped
170// registers, and call application exception handler.
Armando Montanez356bf972020-06-04 10:35:55 -0700171PW_USED void pw_PackageAndHandleCpuException(pw_CpuExceptionState* cpu_state) {
Armando Montanez5104cd62019-12-10 14:36:43 -0800172 // Capture memory mapped registers.
173 cpu_state->extended.cfsr = arm_v7m_cfsr;
Armando Montanez5104cd62019-12-10 14:36:43 -0800174 cpu_state->extended.mmfar = arm_v7m_mmfar;
Armando Montanez4159d1e2020-09-10 12:08:49 -0700175 cpu_state->extended.bfar = arm_v7m_bfar;
176 cpu_state->extended.icsr = arm_v7m_icsr;
177 cpu_state->extended.hfsr = arm_v7m_hfsr;
178 cpu_state->extended.shcsr = arm_v7m_shcsr;
Armando Montanez5104cd62019-12-10 14:36:43 -0800179
180 // CPU may have automatically pushed state to the program stack. If it did,
Armando Montanez356bf972020-06-04 10:35:55 -0700181 // the values can be copied into in the pw_CpuExceptionState struct that is
182 // passed to HandleCpuException(). The cpu_state passed to the handler is
183 // ALWAYS stored on the main stack (MSP).
Armando Montanez5104cd62019-12-10 14:36:43 -0800184 if (PspWasActive(*cpu_state)) {
185 CloneBaseRegistersFromPsp(cpu_state);
186 // If PSP wasn't active, this delta is 0.
187 cpu_state->extended.psp += CalculatePspDelta(*cpu_state);
188 }
189
190 // Patch captured stack pointers so they reflect the state at exception time.
191 cpu_state->extended.msp += CalculateMspDelta(*cpu_state);
192
193 // Call application-level exception handler.
Armando Montanez356bf972020-06-04 10:35:55 -0700194 pw_HandleCpuException(cpu_state);
Armando Montanez5104cd62019-12-10 14:36:43 -0800195
196 // Restore program stack pointer so exception return can restore state if
197 // needed.
198 // Note: The default behavior of NOT subtracting a delta from MSP is
199 // intentional. This simplifies the assembly to pop the exception state
200 // off the main stack on exception return (since MSP currently reflects
201 // exception-time state).
202 cpu_state->extended.psp -= CalculatePspDelta(*cpu_state);
203
204 // If PSP was active and the CPU pushed a context frame, we must copy the
205 // potentially modified state from cpu_state back to the PSP so the CPU can
206 // resume execution with the modified values.
207 if (PspWasActive(*cpu_state)) {
208 // In this case, there's no need to touch the MSP as it's at the location
209 // before we entering the exception (effectively popping the state initially
210 // pushed to the main stack).
211 RestoreBaseRegistersToPsp(cpu_state);
212 } else {
213 // Since we're restoring context from MSP, we DO need to adjust MSP to point
214 // to CPU-pushed context frame so it can be properly restored.
215 // No need to adjust PSP since nothing was pushed to program stack.
216 cpu_state->extended.msp -= CpuContextSize(*cpu_state);
217 }
218}
219
220// Captures faulting CPU state on the main stack (MSP), then calls the exception
221// handlers.
222// This function should be called immediately after an exception.
223void pw_CpuExceptionEntry(void) {
224 asm volatile(
225 // If PSP was in use at the time of exception, it's possible the CPU
226 // wasn't able to push CPU state. To be safe, this first captures scratch
227 // registers before moving forward.
228 //
229 // Stack flag is bit index 2 (0x4) of exc_return value stored in lr. When
230 // this bit is set, the Process Stack Pointer (PSP) was in use. Otherwise,
231 // the Main Stack Pointer (MSP) was in use. (See ARMv7-M Section B1.5.8
232 // for more details)
233 // The following block of assembly is equivalent to:
234 // if (lr & (1 << 2)) {
235 // msp -= sizeof(ArmV7mFaultRegisters);
236 // ArmV7mFaultRegisters* state = (ArmV7mFaultRegisters*) msp;
237 // state->r0 = r0;
238 // state->r1 = r1;
239 // state->r2 = r2;
240 // state->r3 = r3;
241 // state->r12 = r12;
242 // }
243 //
244 " tst lr, #(1 << 2) \n"
245 " itt ne \n"
246 " subne sp, sp, %[base_state_size] \n"
247 " stmne sp, {r0-r3, r12} \n"
248
249 // Reserve stack space for additional registers. Since we're in exception
250 // handler mode, the main stack pointer is currently in use.
251 // r0 will temporarily store the end of captured_cpu_state to simplify
252 // assembly for copying additional registers.
253 " mrs r0, msp \n"
254 " sub sp, sp, %[extra_state_size] \n"
255
256 // Store GPRs to stack.
257 " stmdb r0!, {r4-r11} \n"
258
259 // Load special registers.
260 " mov r1, lr \n"
261 " mrs r2, msp \n"
262 " mrs r3, psp \n"
263 " mrs r4, control \n"
264
265 // Store special registers to stack.
266 " stmdb r0!, {r1-r4} \n"
267
268 // Store a pointer to the beginning of special registers in r4 so they can
269 // be restored later.
270 " mov r4, r0 \n"
271
272 // Restore captured_cpu_state pointer to r0. This makes adding more
273 // memory mapped registers easier in the future since they're skipped in
274 // this assembly.
275 " mrs r0, msp \n"
276
277 // Call intermediate handler that packages data.
278 " ldr r3, =pw_PackageAndHandleCpuException \n"
279 " blx r3 \n"
280
281 // Restore state and exit exception handler.
282 // Pointer to saved CPU state was stored in r4.
283 " mov r0, r4 \n"
284
285 // Restore special registers.
286 " ldm r0!, {r1-r4} \n"
287 " mov lr, r1 \n"
288 " msr control, r4 \n"
289
290 // Restore GPRs.
291 " ldm r0, {r4-r11} \n"
292
293 // Restore stack pointers.
294 " msr msp, r2 \n"
295 " msr psp, r3 \n"
296
297 // Exit exception.
298 " bx lr \n"
299 // clang-format off
300 : /*output=*/
301 : /*input=*/[base_state_size]"i"(sizeof(ArmV7mFaultRegisters)),
302 [extra_state_size]"i"(sizeof(ArmV7mExtraRegisters))
303 // clang-format on
304 );
305}
306
307} // extern "C"
308} // namespace pw::cpu_exception