blob: 0cd21da1f0038f16e05d0673e891a64476081e21 [file] [log] [blame]
Armando Montanez5104cd62019-12-10 14:36:43 -08001// Copyright 2019 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14
15#include <cstdint>
Wyatt Heplere2cbadf2020-06-22 11:21:45 -070016#include <span>
Armando Montanez5104cd62019-12-10 14:36:43 -080017#include <type_traits>
18
19#include "gtest/gtest.h"
Armando Montanez356bf972020-06-04 10:35:55 -070020#include "pw_cpu_exception/entry.h"
21#include "pw_cpu_exception/handler.h"
22#include "pw_cpu_exception/support.h"
Armando Montaneza9ca9992021-01-26 17:06:10 -080023#include "pw_cpu_exception_cortex_m/cpu_state.h"
Armando Montanez5104cd62019-12-10 14:36:43 -080024
25namespace pw::cpu_exception {
26namespace {
27
28// CMSIS/Cortex-M/ARMv7 related constants.
29// These values are from the ARMv7-M Architecture Reference Manual DDI 0403E.b.
30// https://static.docs.arm.com/ddi0403/e/DDI0403E_B_armv7m_arm.pdf
31
32// Exception ISR number. (ARMv7-M Section B1.5.2)
33constexpr uint32_t kHardFaultIsrNum = 0x3u;
34constexpr uint32_t kMemFaultIsrNum = 0x4u;
35constexpr uint32_t kBusFaultIsrNum = 0x5u;
36constexpr uint32_t kUsageFaultIsrNum = 0x6u;
37
38// Masks for individual bits of HFSR. (ARMv7-M Section B3.2.16)
39constexpr uint32_t kForcedHardfaultMask = 0x1u << 30;
40
41// Masks for individual bits of CFSR. (ARMv7-M Section B3.2.15)
Armando Montanezaec61cc2020-03-10 15:24:53 -070042constexpr uint32_t kUsageFaultStart = 0x1u << 16;
43constexpr uint32_t kUnalignedFaultMask = kUsageFaultStart << 8;
44constexpr uint32_t kDivByZeroFaultMask = kUsageFaultStart << 9;
Armando Montanez5104cd62019-12-10 14:36:43 -080045
46// CCR flags. (ARMv7-M Section B3.2.8)
Armando Montanezaec61cc2020-03-10 15:24:53 -070047constexpr uint32_t kUnalignedTrapEnableMask = 0x1u << 3;
Armando Montanez5104cd62019-12-10 14:36:43 -080048constexpr uint32_t kDivByZeroTrapEnableMask = 0x1u << 4;
49
50// Masks for individual bits of SHCSR. (ARMv7-M Section B3.2.13)
51constexpr uint32_t kMemFaultEnableMask = 0x1 << 16;
52constexpr uint32_t kBusFaultEnableMask = 0x1 << 17;
Armando Montanezaec61cc2020-03-10 15:24:53 -070053constexpr uint32_t kUsageFaultEnableMask = 0x1 << 18;
Armando Montanez5104cd62019-12-10 14:36:43 -080054
55// Bit masks for an exception return value. (ARMv7-M Section B1.5.8)
56constexpr uint32_t kExcReturnBasicFrameMask = (0x1u << 4);
57
58// CPCAR mask that enables FPU. (ARMv7-M Section B3.2.20)
59constexpr uint32_t kFpuEnableMask = (0xFu << 20);
60
61// Memory mapped registers. (ARMv7-M Section B3.2.2, Table B3-4)
Armando Montaneza9ca9992021-01-26 17:06:10 -080062volatile uint32_t& cortex_m_vtor =
Armando Montanez5104cd62019-12-10 14:36:43 -080063 *reinterpret_cast<volatile uint32_t*>(0xE000ED08u);
Armando Montaneza9ca9992021-01-26 17:06:10 -080064volatile uint32_t& cortex_m_ccr =
Armando Montanez5104cd62019-12-10 14:36:43 -080065 *reinterpret_cast<volatile uint32_t*>(0xE000ED14u);
Armando Montaneza9ca9992021-01-26 17:06:10 -080066volatile uint32_t& cortex_m_shcsr =
Armando Montanez5104cd62019-12-10 14:36:43 -080067 *reinterpret_cast<volatile uint32_t*>(0xE000ED24u);
Armando Montaneza9ca9992021-01-26 17:06:10 -080068volatile uint32_t& cortex_m_cfsr =
Armando Montanez5104cd62019-12-10 14:36:43 -080069 *reinterpret_cast<volatile uint32_t*>(0xE000ED28u);
Armando Montaneza9ca9992021-01-26 17:06:10 -080070volatile uint32_t& cortex_m_hfsr =
Armando Montanezaec61cc2020-03-10 15:24:53 -070071 *reinterpret_cast<volatile uint32_t*>(0xE000ED2Cu);
Armando Montaneza9ca9992021-01-26 17:06:10 -080072volatile uint32_t& cortex_m_cpacr =
Armando Montanez5104cd62019-12-10 14:36:43 -080073 *reinterpret_cast<volatile uint32_t*>(0xE000ED88u);
74
75// Begin a critical section that must not be interrupted.
76// This function disables interrupts to prevent any sort of context switch until
77// the critical section ends. This is done by setting PRIMASK to 1 using the cps
78// instruction.
79//
80// Returns the state of PRIMASK before it was disabled.
81inline uint32_t BeginCriticalSection() {
82 uint32_t previous_state;
83 asm volatile(
84 " mrs %[previous_state], primask \n"
85 " cpsid i \n"
86 // clang-format off
87 : /*output=*/[previous_state]"=r"(previous_state)
88 : /*input=*/
89 : /*clobbers=*/"memory"
90 // clang-format on
91 );
92 return previous_state;
93}
94
95// Ends a critical section.
96// Restore previous previous state produced by BeginCriticalSection().
97// Note: This does not always re-enable interrupts.
98inline void EndCriticalSection(uint32_t previous_state) {
99 asm volatile(
100 // clang-format off
101 "msr primask, %0"
102 : /*output=*/
103 : /*input=*/"r"(previous_state)
104 : /*clobbers=*/"memory"
105 // clang-format on
106 );
107}
108
109void EnableFpu() {
110#if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
111 // TODO(pwbug/17): Replace when Pigweed config system is added.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800112 cortex_m_cpacr |= kFpuEnableMask;
Armando Montanez5104cd62019-12-10 14:36:43 -0800113#endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
114}
115
116void DisableFpu() {
117#if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
118 // TODO(pwbug/17): Replace when Pigweed config system is added.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800119 cortex_m_cpacr &= ~kFpuEnableMask;
Armando Montanez5104cd62019-12-10 14:36:43 -0800120#endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
121}
122
Armando Montanezaec61cc2020-03-10 15:24:53 -0700123// Counter that is incremented if the test's exception handler correctly handles
124// a triggered exception.
125size_t exceptions_handled = 0;
Armando Montanez5104cd62019-12-10 14:36:43 -0800126
Armando Montanezaec61cc2020-03-10 15:24:53 -0700127// Global variable that triggers a single nested fault on a fault.
128bool trigger_nested_fault = false;
129
130// Allow up to kMaxFaultDepth faults before determining the device is
131// unrecoverable.
132constexpr size_t kMaxFaultDepth = 2;
133
134// Variable to prevent more than kMaxFaultDepth nested crashes.
135size_t current_fault_depth = 0;
Armando Montanez5104cd62019-12-10 14:36:43 -0800136
Armando Montaneza9ca9992021-01-26 17:06:10 -0800137// Faulting pw_cpu_exception_State is copied here so values can be validated
138// after exiting exception handler.
139pw_cpu_exception_State captured_states[kMaxFaultDepth] = {};
140pw_cpu_exception_State& captured_state = captured_states[0];
Armando Montanezaec61cc2020-03-10 15:24:53 -0700141
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700142// Flag used to check if the contents of std::span matches the captured state.
Armando Montanezaec61cc2020-03-10 15:24:53 -0700143bool span_matches = false;
Armando Montanez5104cd62019-12-10 14:36:43 -0800144
145// Variable to be manipulated by function that uses floating
146// point to test that exceptions push Fpu state correctly.
147// Note: don't use double because a cortex-m4f with fpv4-sp-d16
148// will result in gcc generating code to use the software floating
149// point support for double.
150volatile float float_test_value;
151
Armando Montanez356bf972020-06-04 10:35:55 -0700152// Magic pattern to help identify if the exception handler's
Armando Montaneza9ca9992021-01-26 17:06:10 -0800153// pw_cpu_exception_State pointer was pointing to captured CPU state that was
Armando Montanez356bf972020-06-04 10:35:55 -0700154// pushed onto the stack when the faulting context uses the VFP. Has to be
155// computed at runtime because it uses values only available at link time.
Armando Montanez5104cd62019-12-10 14:36:43 -0800156const float kFloatTestPattern = 12.345f * 67.89f;
157
158volatile float fpu_lhs_val = 12.345f;
159volatile float fpu_rhs_val = 67.89f;
160
161// This macro provides a calculation that equals kFloatTestPattern.
162#define _PW_TEST_FPU_OPERATION (fpu_lhs_val * fpu_rhs_val)
163
Armando Montanez356bf972020-06-04 10:35:55 -0700164// Magic pattern to help identify if the exception handler's
Armando Montaneza9ca9992021-01-26 17:06:10 -0800165// pw_cpu_exception_State pointer was pointing to captured CPU state that was
Armando Montanez356bf972020-06-04 10:35:55 -0700166// pushed onto the stack.
Armando Montanez5104cd62019-12-10 14:36:43 -0800167constexpr uint32_t kMagicPattern = 0xDEADBEEF;
168
Armando Montanezaec61cc2020-03-10 15:24:53 -0700169// This pattern serves a purpose similar to kMagicPattern, but is used for
Armando Montaneza9ca9992021-01-26 17:06:10 -0800170// testing a nested fault to ensure both pw_cpu_exception_State objects are
Armando Montanez356bf972020-06-04 10:35:55 -0700171// correctly captured.
Armando Montanezaec61cc2020-03-10 15:24:53 -0700172constexpr uint32_t kNestedMagicPattern = 0x900DF00D;
173
Armando Montanez5104cd62019-12-10 14:36:43 -0800174// The manually captured PC won't be the exact same as the faulting PC. This is
175// the maximum tolerated distance between the two to allow the test to pass.
176constexpr int32_t kMaxPcDistance = 4;
177
178// In-memory interrupt service routine vector table.
179using InterruptVectorTable = std::aligned_storage_t<512, 512>;
180InterruptVectorTable ram_vector_table;
181
Armando Montanez356bf972020-06-04 10:35:55 -0700182// Forward declaration of the exception handler.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800183void TestingExceptionHandler(pw_cpu_exception_State*);
Armando Montanez356bf972020-06-04 10:35:55 -0700184
Armando Montanez5104cd62019-12-10 14:36:43 -0800185// Populate the device's registers with testable values, then trigger exception.
186void BeginBaseFaultTest() {
187 // Make sure divide by zero causes a fault.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800188 cortex_m_ccr |= kDivByZeroTrapEnableMask;
Armando Montanez5104cd62019-12-10 14:36:43 -0800189 uint32_t magic = kMagicPattern;
190 asm volatile(
191 " mov r0, %[magic] \n"
192 " mov r1, #0 \n"
193 " mov r2, pc \n"
194 " mov r3, lr \n"
195 // This instruction divides by zero.
196 " udiv r1, r1, r1 \n"
197 // clang-format off
198 : /*output=*/
199 : /*input=*/[magic]"r"(magic)
200 : /*clobbers=*/"r0", "r1", "r2", "r3"
201 // clang-format on
202 );
203
204 // Check that the stack align bit was not set.
205 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
206}
207
208// Populate the device's registers with testable values, then trigger exception.
Armando Montanezaec61cc2020-03-10 15:24:53 -0700209void BeginNestedFaultTest() {
210 // Make sure divide by zero causes a fault.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800211 cortex_m_ccr |= kUnalignedTrapEnableMask;
Armando Montanezaec61cc2020-03-10 15:24:53 -0700212 volatile uint32_t magic = kNestedMagicPattern;
213 asm volatile(
214 " mov r0, %[magic] \n"
215 " mov r1, #0 \n"
216 " mov r2, pc \n"
217 " mov r3, lr \n"
218 // This instruction does an unaligned read.
219 " ldrh r1, [%[magic_addr], 1] \n"
220 // clang-format off
221 : /*output=*/
222 : /*input=*/[magic]"r"(magic), [magic_addr]"r"(&magic)
223 : /*clobbers=*/"r0", "r1", "r2", "r3"
224 // clang-format on
225 );
226}
227
228// Populate the device's registers with testable values, then trigger exception.
Armando Montanez5104cd62019-12-10 14:36:43 -0800229// This version causes stack to not be 4-byte aligned initially, testing
230// the fault handlers correction for psp.
231void BeginBaseFaultUnalignedStackTest() {
232 // Make sure divide by zero causes a fault.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800233 cortex_m_ccr |= kDivByZeroTrapEnableMask;
Armando Montanez5104cd62019-12-10 14:36:43 -0800234 uint32_t magic = kMagicPattern;
235 asm volatile(
236 // Push one register to cause $sp to be no longer 8-byte aligned,
237 // assuming it started 8-byte aligned as expected.
238 " push {r0} \n"
239 " mov r0, %[magic] \n"
240 " mov r1, #0 \n"
241 " mov r2, pc \n"
242 " mov r3, lr \n"
243 // This instruction divides by zero. Our fault handler should
244 // ultimately advance the pc to the pop instruction.
245 " udiv r1, r1, r1 \n"
246 " pop {r0} \n"
247 // clang-format off
248 : /*output=*/
249 : /*input=*/[magic]"r"(magic)
250 : /*clobbers=*/"r0", "r1", "r2", "r3"
251 // clang-format on
252 );
253
254 // Check that the stack align bit was set.
255 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
256 kPsrExtraStackAlignBit);
257}
258
259// Populate some of the extended set of captured registers, then trigger
260// exception.
261void BeginExtendedFaultTest() {
262 // Make sure divide by zero causes a fault.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800263 cortex_m_ccr |= kDivByZeroTrapEnableMask;
Armando Montanez5104cd62019-12-10 14:36:43 -0800264 uint32_t magic = kMagicPattern;
265 volatile uint32_t local_msp = 0;
266 volatile uint32_t local_psp = 0;
267 asm volatile(
268 " mov r4, %[magic] \n"
269 " mov r5, #0 \n"
270 " mov r11, %[magic] \n"
271 " mrs %[local_msp], msp \n"
272 " mrs %[local_psp], psp \n"
273 // This instruction divides by zero.
274 " udiv r5, r5, r5 \n"
275 // clang-format off
276 : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
277 : /*input=*/[magic]"r"(magic)
Armando Montanez1ee925c2020-06-29 13:15:28 -0700278 : /*clobbers=*/"r0", "r4", "r5", "r11", "memory"
Armando Montanez5104cd62019-12-10 14:36:43 -0800279 // clang-format on
280 );
281
282 // Check that the stack align bit was not set.
283 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
284
285 // Check that the captured stack pointers matched the ones in the context of
286 // the fault.
287 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
288 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
289}
290
291// Populate some of the extended set of captured registers, then trigger
292// exception.
293// This version causes stack to not be 4-byte aligned initially, testing
294// the fault handlers correction for psp.
295void BeginExtendedFaultUnalignedStackTest() {
296 // Make sure divide by zero causes a fault.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800297 cortex_m_ccr |= kDivByZeroTrapEnableMask;
Armando Montanez5104cd62019-12-10 14:36:43 -0800298 uint32_t magic = kMagicPattern;
299 volatile uint32_t local_msp = 0;
300 volatile uint32_t local_psp = 0;
301 asm volatile(
302 // Push one register to cause $sp to be no longer 8-byte aligned,
303 // assuming it started 8-byte aligned as expected.
304 " push {r0} \n"
305 " mov r4, %[magic] \n"
306 " mov r5, #0 \n"
307 " mov r11, %[magic] \n"
308 " mrs %[local_msp], msp \n"
309 " mrs %[local_psp], psp \n"
310 // This instruction divides by zero. Our fault handler should
311 // ultimately advance the pc to the pop instruction.
312 " udiv r5, r5, r5 \n"
313 " pop {r0} \n"
314 // clang-format off
315 : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
316 : /*input=*/[magic]"r"(magic)
Armando Montanez1ee925c2020-06-29 13:15:28 -0700317 : /*clobbers=*/"r0", "r4", "r5", "r11", "memory"
Armando Montanez5104cd62019-12-10 14:36:43 -0800318 // clang-format on
319 );
320
321 // Check that the stack align bit was set.
322 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
323 kPsrExtraStackAlignBit);
324
325 // Check that the captured stack pointers matched the ones in the context of
326 // the fault.
327 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
328 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
329}
330
331void InstallVectorTableEntries() {
332 uint32_t prev_state = BeginCriticalSection();
333 // If vector table is installed already, this is done.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800334 if (cortex_m_vtor == reinterpret_cast<uint32_t>(&ram_vector_table)) {
Armando Montanez5104cd62019-12-10 14:36:43 -0800335 EndCriticalSection(prev_state);
336 return;
337 }
338 // Copy table to new location since it's not guaranteed that we can write to
339 // the original one.
340 std::memcpy(&ram_vector_table,
Armando Montaneza9ca9992021-01-26 17:06:10 -0800341 reinterpret_cast<uint32_t*>(cortex_m_vtor),
Armando Montanez5104cd62019-12-10 14:36:43 -0800342 sizeof(ram_vector_table));
343
344 // Override exception handling vector table entries.
345 uint32_t* exception_entry_addr =
Armando Montaneza9ca9992021-01-26 17:06:10 -0800346 reinterpret_cast<uint32_t*>(pw_cpu_exception_Entry);
Armando Montanez5104cd62019-12-10 14:36:43 -0800347 uint32_t** interrupts = reinterpret_cast<uint32_t**>(&ram_vector_table);
348 interrupts[kHardFaultIsrNum] = exception_entry_addr;
349 interrupts[kMemFaultIsrNum] = exception_entry_addr;
350 interrupts[kBusFaultIsrNum] = exception_entry_addr;
351 interrupts[kUsageFaultIsrNum] = exception_entry_addr;
352
Armando Montanez5104cd62019-12-10 14:36:43 -0800353 // Update Vector Table Offset Register (VTOR) to point to new vector table.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800354 cortex_m_vtor = reinterpret_cast<uint32_t>(&ram_vector_table);
Armando Montanez5104cd62019-12-10 14:36:43 -0800355 EndCriticalSection(prev_state);
356}
357
358void EnableAllFaultHandlers() {
Armando Montaneza9ca9992021-01-26 17:06:10 -0800359 cortex_m_shcsr |=
Armando Montanezaec61cc2020-03-10 15:24:53 -0700360 kMemFaultEnableMask | kBusFaultEnableMask | kUsageFaultEnableMask;
Armando Montanez5104cd62019-12-10 14:36:43 -0800361}
362
363void Setup(bool use_fpu) {
364 if (use_fpu) {
365 EnableFpu();
366 } else {
367 DisableFpu();
368 }
Armando Montaneza9ca9992021-01-26 17:06:10 -0800369 pw_cpu_exception_SetHandler(TestingExceptionHandler);
Armando Montanez5104cd62019-12-10 14:36:43 -0800370 EnableAllFaultHandlers();
371 InstallVectorTableEntries();
Armando Montanezaec61cc2020-03-10 15:24:53 -0700372 exceptions_handled = 0;
373 current_fault_depth = 0;
Armando Montanez5104cd62019-12-10 14:36:43 -0800374 captured_state = {};
375 float_test_value = 0.0f;
Armando Montanezaec61cc2020-03-10 15:24:53 -0700376 trigger_nested_fault = false;
Armando Montanez5104cd62019-12-10 14:36:43 -0800377}
378
379TEST(FaultEntry, BasicFault) {
380 Setup(/*use_fpu=*/false);
381 BeginBaseFaultTest();
Armando Montanezaec61cc2020-03-10 15:24:53 -0700382 ASSERT_EQ(exceptions_handled, 1u);
Armando Montanez5104cd62019-12-10 14:36:43 -0800383 // captured_state values must be cast since they're in a packed struct.
384 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
385 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
386 // PC is manually saved in r2 before the exception occurs (where PC is also
387 // stored). Ensure these numbers are within a reasonable distance.
388 int32_t captured_pc_distance =
389 captured_state.base.pc - captured_state.base.r2;
390 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
391 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
392 static_cast<uint32_t>(captured_state.base.lr));
393}
394
395TEST(FaultEntry, BasicUnalignedStackFault) {
396 Setup(/*use_fpu=*/false);
397 BeginBaseFaultUnalignedStackTest();
Armando Montanezaec61cc2020-03-10 15:24:53 -0700398 ASSERT_EQ(exceptions_handled, 1u);
Armando Montanez5104cd62019-12-10 14:36:43 -0800399 // captured_state values must be cast since they're in a packed struct.
400 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
401 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
402 // PC is manually saved in r2 before the exception occurs (where PC is also
403 // stored). Ensure these numbers are within a reasonable distance.
404 int32_t captured_pc_distance =
405 captured_state.base.pc - captured_state.base.r2;
406 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
407 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
408 static_cast<uint32_t>(captured_state.base.lr));
409}
410
411TEST(FaultEntry, ExtendedFault) {
412 Setup(/*use_fpu=*/false);
413 BeginExtendedFaultTest();
Armando Montanezaec61cc2020-03-10 15:24:53 -0700414 ASSERT_EQ(exceptions_handled, 1u);
Armando Montanez5104cd62019-12-10 14:36:43 -0800415 ASSERT_TRUE(span_matches);
Armando Montaneza9ca9992021-01-26 17:06:10 -0800416 const CortexMExtraRegisters& extended_registers = captured_state.extended;
Armando Montanez5104cd62019-12-10 14:36:43 -0800417 // captured_state values must be cast since they're in a packed struct.
418 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
419 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
420 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
421
422 // Check expected values for this crash.
423 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
424 static_cast<uint32_t>(kDivByZeroFaultMask));
425 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
426}
427
428TEST(FaultEntry, ExtendedUnalignedStackFault) {
429 Setup(/*use_fpu=*/false);
430 BeginExtendedFaultUnalignedStackTest();
Armando Montanezaec61cc2020-03-10 15:24:53 -0700431 ASSERT_EQ(exceptions_handled, 1u);
Armando Montanez5104cd62019-12-10 14:36:43 -0800432 ASSERT_TRUE(span_matches);
Armando Montaneza9ca9992021-01-26 17:06:10 -0800433 const CortexMExtraRegisters& extended_registers = captured_state.extended;
Armando Montanez5104cd62019-12-10 14:36:43 -0800434 // captured_state values must be cast since they're in a packed struct.
435 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
436 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
437 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
438
439 // Check expected values for this crash.
440 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
441 static_cast<uint32_t>(kDivByZeroFaultMask));
442 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
443}
444
Armando Montanezaec61cc2020-03-10 15:24:53 -0700445TEST(FaultEntry, NestedFault) {
446 // Due to the way nesting is handled, captured_states[0] is the nested fault
447 // since that fault must be handled *FIRST*. After that fault is handled, the
448 // original fault can be correctly handled afterwards (captured into
449 // captured_states[1]).
450
451 Setup(/*use_fpu=*/false);
452 trigger_nested_fault = true;
453 BeginBaseFaultTest();
454 ASSERT_EQ(exceptions_handled, 2u);
455
456 // captured_state values must be cast since they're in a packed struct.
457 EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r0), kMagicPattern);
458 EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r1), 0u);
459 // PC is manually saved in r2 before the exception occurs (where PC is also
460 // stored). Ensure these numbers are within a reasonable distance.
461 int32_t captured_pc_distance =
462 captured_states[1].base.pc - captured_states[1].base.r2;
463 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
464 EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r3),
465 static_cast<uint32_t>(captured_states[1].base.lr));
466
467 // NESTED STATE
468 // captured_state values must be cast since they're in a packed struct.
469 EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r0),
470 kNestedMagicPattern);
471 EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r1), 0u);
472 // PC is manually saved in r2 before the exception occurs (where PC is also
473 // stored). Ensure these numbers are within a reasonable distance.
474 captured_pc_distance =
475 captured_states[0].base.pc - captured_states[0].base.r2;
476 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
477 EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r3),
478 static_cast<uint32_t>(captured_states[0].base.lr));
479}
480
Armando Montanez5104cd62019-12-10 14:36:43 -0800481// TODO(pwbug/17): Replace when Pigweed config system is added.
482// Disable tests that rely on hardware FPU if this module wasn't built with
483// hardware FPU support.
484#if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
485
486// Populate some of the extended set of captured registers, then trigger
487// exception. This function uses floating point to validate float context
488// is pushed correctly.
489void BeginExtendedFaultFloatTest() {
490 float_test_value = _PW_TEST_FPU_OPERATION;
491 BeginExtendedFaultTest();
492}
493
494// Populate some of the extended set of captured registers, then trigger
495// exception.
496// This version causes stack to not be 4-byte aligned initially, testing
497// the fault handlers correction for psp.
498// This function uses floating point to validate float context
499// is pushed correctly.
500void BeginExtendedFaultUnalignedStackFloatTest() {
501 float_test_value = _PW_TEST_FPU_OPERATION;
502 BeginExtendedFaultUnalignedStackTest();
503}
504
505TEST(FaultEntry, FloatFault) {
506 Setup(/*use_fpu=*/true);
507 BeginExtendedFaultFloatTest();
Armando Montanezaec61cc2020-03-10 15:24:53 -0700508 ASSERT_EQ(exceptions_handled, 1u);
Armando Montaneza9ca9992021-01-26 17:06:10 -0800509 const CortexMExtraRegisters& extended_registers = captured_state.extended;
Armando Montanez5104cd62019-12-10 14:36:43 -0800510 // captured_state values must be cast since they're in a packed struct.
511 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
512 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
513 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
514
515 // Check expected values for this crash.
516 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
517 static_cast<uint32_t>(kDivByZeroFaultMask));
518 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
519
520 // Check fpu state was pushed during exception
521 EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
522
523 // Check float_test_value is correct
524 EXPECT_EQ(float_test_value, kFloatTestPattern);
525}
526
527TEST(FaultEntry, FloatUnalignedStackFault) {
528 Setup(/*use_fpu=*/true);
529 BeginExtendedFaultUnalignedStackFloatTest();
Armando Montanezaec61cc2020-03-10 15:24:53 -0700530 ASSERT_EQ(exceptions_handled, 1u);
Armando Montanez5104cd62019-12-10 14:36:43 -0800531 ASSERT_TRUE(span_matches);
Armando Montaneza9ca9992021-01-26 17:06:10 -0800532 const CortexMExtraRegisters& extended_registers = captured_state.extended;
Armando Montanez5104cd62019-12-10 14:36:43 -0800533 // captured_state values must be cast since they're in a packed struct.
534 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
535 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
536 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
537
538 // Check expected values for this crash.
539 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
540 static_cast<uint32_t>(kDivByZeroFaultMask));
541 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
542
543 // Check fpu state was pushed during exception.
544 EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
545
546 // Check float_test_value is correct
547 EXPECT_EQ(float_test_value, kFloatTestPattern);
548}
549
550#endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
551
Armando Montaneza9ca9992021-01-26 17:06:10 -0800552void TestingExceptionHandler(pw_cpu_exception_State* state) {
Armando Montanezaec61cc2020-03-10 15:24:53 -0700553 if (++current_fault_depth > kMaxFaultDepth) {
554 volatile bool loop = true;
555 while (loop) {
556 // Hit unexpected nested crash, prevent further nesting.
557 }
558 }
559
560 if (trigger_nested_fault) {
561 // Disable nesting before triggering the nested fault to prevent infinite
562 // recursive crashes.
563 trigger_nested_fault = false;
564 BeginNestedFaultTest();
565 }
Armando Montanez1ee925c2020-06-29 13:15:28 -0700566 // Logging may require FPU (fpu instructions in vsnprintf()), so re-enable
567 // asap.
568 EnableFpu();
569
570 // Disable traps. Must be disabled before EXPECT, as memcpy() can do unaligned
571 // operations.
572 cortex_m_ccr &= ~kUnalignedTrapEnableMask;
573 cortex_m_ccr &= ~kDivByZeroTrapEnableMask;
Armando Montanezaec61cc2020-03-10 15:24:53 -0700574
575 // Clear HFSR forced (nested) hard fault mask if set. This will only be
576 // set by the nested fault test.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800577 EXPECT_EQ(state->extended.hfsr, cortex_m_hfsr);
578 if (cortex_m_hfsr & kForcedHardfaultMask) {
579 cortex_m_hfsr = kForcedHardfaultMask;
Armando Montanezaec61cc2020-03-10 15:24:53 -0700580 }
581
Armando Montaneza9ca9992021-01-26 17:06:10 -0800582 if (cortex_m_cfsr & kUnalignedFaultMask) {
Armando Montanez5104cd62019-12-10 14:36:43 -0800583 // Copy captured state to check later.
Armando Montanez356bf972020-06-04 10:35:55 -0700584 std::memcpy(&captured_states[exceptions_handled],
585 state,
Armando Montaneza9ca9992021-01-26 17:06:10 -0800586 sizeof(pw_cpu_exception_State));
Armando Montanezaec61cc2020-03-10 15:24:53 -0700587
588 // Disable unaligned read/write trapping to "handle" exception.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800589 cortex_m_cfsr = kUnalignedFaultMask;
Armando Montanezaec61cc2020-03-10 15:24:53 -0700590 exceptions_handled++;
591 return;
Armando Montaneza9ca9992021-01-26 17:06:10 -0800592 } else if (cortex_m_cfsr & kDivByZeroFaultMask) {
Armando Montanezaec61cc2020-03-10 15:24:53 -0700593 // Copy captured state to check later.
Armando Montanez356bf972020-06-04 10:35:55 -0700594 std::memcpy(&captured_states[exceptions_handled],
595 state,
Armando Montaneza9ca9992021-01-26 17:06:10 -0800596 sizeof(pw_cpu_exception_State));
Armando Montanez5104cd62019-12-10 14:36:43 -0800597
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700598 // Ensure std::span compares to be the same.
599 std::span<const uint8_t> state_span = RawFaultingCpuState(*state);
Armando Montaneza9ca9992021-01-26 17:06:10 -0800600 EXPECT_EQ(state_span.size(), sizeof(pw_cpu_exception_State));
Armando Montanez5104cd62019-12-10 14:36:43 -0800601 if (std::memcmp(state, state_span.data(), state_span.size()) == 0) {
602 span_matches = true;
603 } else {
604 span_matches = false;
605 }
606
Armando Montanezaec61cc2020-03-10 15:24:53 -0700607 // Disable divide-by-zero trapping to "handle" exception.
Armando Montaneza9ca9992021-01-26 17:06:10 -0800608 cortex_m_cfsr = kDivByZeroFaultMask;
Armando Montanezaec61cc2020-03-10 15:24:53 -0700609 exceptions_handled++;
Armando Montanez5104cd62019-12-10 14:36:43 -0800610 return;
611 }
612
Armando Montaneza9ca9992021-01-26 17:06:10 -0800613 EXPECT_EQ(state->extended.shcsr, cortex_m_shcsr);
Armando Montanez4159d1e2020-09-10 12:08:49 -0700614
Armando Montanez5104cd62019-12-10 14:36:43 -0800615 // If an unexpected exception occurred, just enter an infinite loop.
616 while (true) {
617 }
618}
619
Armando Montanez356bf972020-06-04 10:35:55 -0700620} // namespace
Armando Montanez5104cd62019-12-10 14:36:43 -0800621} // namespace pw::cpu_exception