blob: 2c524d5892e01b36bfd6ed293ef95d0e1e60b048 [file] [log] [blame]
Armando Montanez5104cd62019-12-10 14:36:43 -08001// Copyright 2019 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14
15#include <cstdint>
16#include <type_traits>
17
18#include "gtest/gtest.h"
19#include "pw_cpu_exception/cpu_exception.h"
20#include "pw_cpu_exception_armv7m/cpu_state.h"
21#include "pw_span/span.h"
22
23namespace pw::cpu_exception {
24namespace {
25
26// CMSIS/Cortex-M/ARMv7 related constants.
27// These values are from the ARMv7-M Architecture Reference Manual DDI 0403E.b.
28// https://static.docs.arm.com/ddi0403/e/DDI0403E_B_armv7m_arm.pdf
29
30// Exception ISR number. (ARMv7-M Section B1.5.2)
31constexpr uint32_t kHardFaultIsrNum = 0x3u;
32constexpr uint32_t kMemFaultIsrNum = 0x4u;
33constexpr uint32_t kBusFaultIsrNum = 0x5u;
34constexpr uint32_t kUsageFaultIsrNum = 0x6u;
35
36// Masks for individual bits of HFSR. (ARMv7-M Section B3.2.16)
37constexpr uint32_t kForcedHardfaultMask = 0x1u << 30;
38
39// Masks for individual bits of CFSR. (ARMv7-M Section B3.2.15)
40constexpr uint32_t kUseageFaultStart = 0x1u << 16;
41constexpr uint32_t kDivByZeroFaultMask = kUseageFaultStart << 9;
42
43// CCR flags. (ARMv7-M Section B3.2.8)
44constexpr uint32_t kDivByZeroTrapEnableMask = 0x1u << 4;
45
46// Masks for individual bits of SHCSR. (ARMv7-M Section B3.2.13)
47constexpr uint32_t kMemFaultEnableMask = 0x1 << 16;
48constexpr uint32_t kBusFaultEnableMask = 0x1 << 17;
49constexpr uint32_t kUseageFaultEnableMask = 0x1 << 18;
50
51// Bit masks for an exception return value. (ARMv7-M Section B1.5.8)
52constexpr uint32_t kExcReturnBasicFrameMask = (0x1u << 4);
53
54// CPCAR mask that enables FPU. (ARMv7-M Section B3.2.20)
55constexpr uint32_t kFpuEnableMask = (0xFu << 20);
56
57// Memory mapped registers. (ARMv7-M Section B3.2.2, Table B3-4)
58volatile uint32_t& arm_v7m_vtor =
59 *reinterpret_cast<volatile uint32_t*>(0xE000ED08u);
60volatile uint32_t& arm_v7m_ccr =
61 *reinterpret_cast<volatile uint32_t*>(0xE000ED14u);
62volatile uint32_t& arm_v7m_shcsr =
63 *reinterpret_cast<volatile uint32_t*>(0xE000ED24u);
64volatile uint32_t& arm_v7m_cfsr =
65 *reinterpret_cast<volatile uint32_t*>(0xE000ED28u);
66volatile uint32_t& arm_v7m_cpacr =
67 *reinterpret_cast<volatile uint32_t*>(0xE000ED88u);
68
69// Begin a critical section that must not be interrupted.
70// This function disables interrupts to prevent any sort of context switch until
71// the critical section ends. This is done by setting PRIMASK to 1 using the cps
72// instruction.
73//
74// Returns the state of PRIMASK before it was disabled.
75inline uint32_t BeginCriticalSection() {
76 uint32_t previous_state;
77 asm volatile(
78 " mrs %[previous_state], primask \n"
79 " cpsid i \n"
80 // clang-format off
81 : /*output=*/[previous_state]"=r"(previous_state)
82 : /*input=*/
83 : /*clobbers=*/"memory"
84 // clang-format on
85 );
86 return previous_state;
87}
88
89// Ends a critical section.
90// Restore previous previous state produced by BeginCriticalSection().
91// Note: This does not always re-enable interrupts.
92inline void EndCriticalSection(uint32_t previous_state) {
93 asm volatile(
94 // clang-format off
95 "msr primask, %0"
96 : /*output=*/
97 : /*input=*/"r"(previous_state)
98 : /*clobbers=*/"memory"
99 // clang-format on
100 );
101}
102
103void EnableFpu() {
104#if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
105 // TODO(pwbug/17): Replace when Pigweed config system is added.
106 arm_v7m_cpacr |= kFpuEnableMask;
107#endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
108}
109
110void DisableFpu() {
111#if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
112 // TODO(pwbug/17): Replace when Pigweed config system is added.
113 arm_v7m_cpacr &= ~kFpuEnableMask;
114#endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
115}
116
117// Simple boolean that is set to true if the test's exception handler correctly
118// handles a triggered exception.
119bool exception_handled = false;
120
121// Flag used to check if the contents of span matches the captured state.
122bool span_matches = false;
123
124// Faulting CpuState is copied here so values can be validated after exiting
125// exception handler.
126CpuState captured_state = {};
127
128// Variable to be manipulated by function that uses floating
129// point to test that exceptions push Fpu state correctly.
130// Note: don't use double because a cortex-m4f with fpv4-sp-d16
131// will result in gcc generating code to use the software floating
132// point support for double.
133volatile float float_test_value;
134
135// Magic pattern to help identify if the exception handler's CpuState pointer
136// was pointing to captured CPU state that was pushed onto the stack when
137// the faulting context uses the VFP. Has to be computed at runtime
138// because it uses values only available at link time.
139const float kFloatTestPattern = 12.345f * 67.89f;
140
141volatile float fpu_lhs_val = 12.345f;
142volatile float fpu_rhs_val = 67.89f;
143
144// This macro provides a calculation that equals kFloatTestPattern.
145#define _PW_TEST_FPU_OPERATION (fpu_lhs_val * fpu_rhs_val)
146
147// Magic pattern to help identify if the exception handler's CpuState pointer
148// was pointing to captured CPU state that was pushed onto the stack.
149constexpr uint32_t kMagicPattern = 0xDEADBEEF;
150
151// The manually captured PC won't be the exact same as the faulting PC. This is
152// the maximum tolerated distance between the two to allow the test to pass.
153constexpr int32_t kMaxPcDistance = 4;
154
155// In-memory interrupt service routine vector table.
156using InterruptVectorTable = std::aligned_storage_t<512, 512>;
157InterruptVectorTable ram_vector_table;
158
159// Populate the device's registers with testable values, then trigger exception.
160void BeginBaseFaultTest() {
161 // Make sure divide by zero causes a fault.
162 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
163 uint32_t magic = kMagicPattern;
164 asm volatile(
165 " mov r0, %[magic] \n"
166 " mov r1, #0 \n"
167 " mov r2, pc \n"
168 " mov r3, lr \n"
169 // This instruction divides by zero.
170 " udiv r1, r1, r1 \n"
171 // clang-format off
172 : /*output=*/
173 : /*input=*/[magic]"r"(magic)
174 : /*clobbers=*/"r0", "r1", "r2", "r3"
175 // clang-format on
176 );
177
178 // Check that the stack align bit was not set.
179 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
180}
181
182// Populate the device's registers with testable values, then trigger exception.
183// This version causes stack to not be 4-byte aligned initially, testing
184// the fault handlers correction for psp.
185void BeginBaseFaultUnalignedStackTest() {
186 // Make sure divide by zero causes a fault.
187 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
188 uint32_t magic = kMagicPattern;
189 asm volatile(
190 // Push one register to cause $sp to be no longer 8-byte aligned,
191 // assuming it started 8-byte aligned as expected.
192 " push {r0} \n"
193 " mov r0, %[magic] \n"
194 " mov r1, #0 \n"
195 " mov r2, pc \n"
196 " mov r3, lr \n"
197 // This instruction divides by zero. Our fault handler should
198 // ultimately advance the pc to the pop instruction.
199 " udiv r1, r1, r1 \n"
200 " pop {r0} \n"
201 // clang-format off
202 : /*output=*/
203 : /*input=*/[magic]"r"(magic)
204 : /*clobbers=*/"r0", "r1", "r2", "r3"
205 // clang-format on
206 );
207
208 // Check that the stack align bit was set.
209 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
210 kPsrExtraStackAlignBit);
211}
212
213// Populate some of the extended set of captured registers, then trigger
214// exception.
215void BeginExtendedFaultTest() {
216 // Make sure divide by zero causes a fault.
217 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
218 uint32_t magic = kMagicPattern;
219 volatile uint32_t local_msp = 0;
220 volatile uint32_t local_psp = 0;
221 asm volatile(
222 " mov r4, %[magic] \n"
223 " mov r5, #0 \n"
224 " mov r11, %[magic] \n"
225 " mrs %[local_msp], msp \n"
226 " mrs %[local_psp], psp \n"
227 // This instruction divides by zero.
228 " udiv r5, r5, r5 \n"
229 // clang-format off
230 : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
231 : /*input=*/[magic]"r"(magic)
232 : /*clobbers=*/"r4", "r5", "r11", "memory"
233 // clang-format on
234 );
235
236 // Check that the stack align bit was not set.
237 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
238
239 // Check that the captured stack pointers matched the ones in the context of
240 // the fault.
241 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
242 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
243}
244
245// Populate some of the extended set of captured registers, then trigger
246// exception.
247// This version causes stack to not be 4-byte aligned initially, testing
248// the fault handlers correction for psp.
249void BeginExtendedFaultUnalignedStackTest() {
250 // Make sure divide by zero causes a fault.
251 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
252 uint32_t magic = kMagicPattern;
253 volatile uint32_t local_msp = 0;
254 volatile uint32_t local_psp = 0;
255 asm volatile(
256 // Push one register to cause $sp to be no longer 8-byte aligned,
257 // assuming it started 8-byte aligned as expected.
258 " push {r0} \n"
259 " mov r4, %[magic] \n"
260 " mov r5, #0 \n"
261 " mov r11, %[magic] \n"
262 " mrs %[local_msp], msp \n"
263 " mrs %[local_psp], psp \n"
264 // This instruction divides by zero. Our fault handler should
265 // ultimately advance the pc to the pop instruction.
266 " udiv r5, r5, r5 \n"
267 " pop {r0} \n"
268 // clang-format off
269 : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
270 : /*input=*/[magic]"r"(magic)
271 : /*clobbers=*/"r4", "r5", "r11", "memory"
272 // clang-format on
273 );
274
275 // Check that the stack align bit was set.
276 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
277 kPsrExtraStackAlignBit);
278
279 // Check that the captured stack pointers matched the ones in the context of
280 // the fault.
281 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
282 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
283}
284
285void InstallVectorTableEntries() {
286 uint32_t prev_state = BeginCriticalSection();
287 // If vector table is installed already, this is done.
288 if (arm_v7m_vtor == reinterpret_cast<uint32_t>(&ram_vector_table)) {
289 EndCriticalSection(prev_state);
290 return;
291 }
292 // Copy table to new location since it's not guaranteed that we can write to
293 // the original one.
294 std::memcpy(&ram_vector_table,
295 reinterpret_cast<uint32_t*>(arm_v7m_vtor),
296 sizeof(ram_vector_table));
297
298 // Override exception handling vector table entries.
299 uint32_t* exception_entry_addr =
300 reinterpret_cast<uint32_t*>(pw::cpu_exception::pw_CpuExceptionEntry);
301 uint32_t** interrupts = reinterpret_cast<uint32_t**>(&ram_vector_table);
302 interrupts[kHardFaultIsrNum] = exception_entry_addr;
303 interrupts[kMemFaultIsrNum] = exception_entry_addr;
304 interrupts[kBusFaultIsrNum] = exception_entry_addr;
305 interrupts[kUsageFaultIsrNum] = exception_entry_addr;
306
307 uint32_t old_vector_table = arm_v7m_vtor;
308 // Dismiss unused variable warning for non-debug builds.
309 PW_UNUSED(old_vector_table);
310
311 // Update Vector Table Offset Register (VTOR) to point to new vector table.
312 arm_v7m_vtor = reinterpret_cast<uint32_t>(&ram_vector_table);
313 EndCriticalSection(prev_state);
314}
315
316void EnableAllFaultHandlers() {
317 arm_v7m_shcsr |=
318 kMemFaultEnableMask | kBusFaultEnableMask | kUseageFaultEnableMask;
319}
320
321void Setup(bool use_fpu) {
322 if (use_fpu) {
323 EnableFpu();
324 } else {
325 DisableFpu();
326 }
327 EnableAllFaultHandlers();
328 InstallVectorTableEntries();
329 exception_handled = false;
330 captured_state = {};
331 float_test_value = 0.0f;
332}
333
334TEST(FaultEntry, BasicFault) {
335 Setup(/*use_fpu=*/false);
336 BeginBaseFaultTest();
337 ASSERT_TRUE(exception_handled);
338 // captured_state values must be cast since they're in a packed struct.
339 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
340 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
341 // PC is manually saved in r2 before the exception occurs (where PC is also
342 // stored). Ensure these numbers are within a reasonable distance.
343 int32_t captured_pc_distance =
344 captured_state.base.pc - captured_state.base.r2;
345 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
346 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
347 static_cast<uint32_t>(captured_state.base.lr));
348}
349
350TEST(FaultEntry, BasicUnalignedStackFault) {
351 Setup(/*use_fpu=*/false);
352 BeginBaseFaultUnalignedStackTest();
353 ASSERT_TRUE(exception_handled);
354 // captured_state values must be cast since they're in a packed struct.
355 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
356 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
357 // PC is manually saved in r2 before the exception occurs (where PC is also
358 // stored). Ensure these numbers are within a reasonable distance.
359 int32_t captured_pc_distance =
360 captured_state.base.pc - captured_state.base.r2;
361 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
362 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
363 static_cast<uint32_t>(captured_state.base.lr));
364}
365
366TEST(FaultEntry, ExtendedFault) {
367 Setup(/*use_fpu=*/false);
368 BeginExtendedFaultTest();
369 ASSERT_TRUE(exception_handled);
370 ASSERT_TRUE(span_matches);
371 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
372 // captured_state values must be cast since they're in a packed struct.
373 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
374 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
375 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
376
377 // Check expected values for this crash.
378 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
379 static_cast<uint32_t>(kDivByZeroFaultMask));
380 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
381}
382
383TEST(FaultEntry, ExtendedUnalignedStackFault) {
384 Setup(/*use_fpu=*/false);
385 BeginExtendedFaultUnalignedStackTest();
386 ASSERT_TRUE(exception_handled);
387 ASSERT_TRUE(span_matches);
388 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
389 // captured_state values must be cast since they're in a packed struct.
390 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
391 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
392 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
393
394 // Check expected values for this crash.
395 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
396 static_cast<uint32_t>(kDivByZeroFaultMask));
397 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
398}
399
400// TODO(pwbug/17): Replace when Pigweed config system is added.
401// Disable tests that rely on hardware FPU if this module wasn't built with
402// hardware FPU support.
403#if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
404
405// Populate some of the extended set of captured registers, then trigger
406// exception. This function uses floating point to validate float context
407// is pushed correctly.
408void BeginExtendedFaultFloatTest() {
409 float_test_value = _PW_TEST_FPU_OPERATION;
410 BeginExtendedFaultTest();
411}
412
413// Populate some of the extended set of captured registers, then trigger
414// exception.
415// This version causes stack to not be 4-byte aligned initially, testing
416// the fault handlers correction for psp.
417// This function uses floating point to validate float context
418// is pushed correctly.
419void BeginExtendedFaultUnalignedStackFloatTest() {
420 float_test_value = _PW_TEST_FPU_OPERATION;
421 BeginExtendedFaultUnalignedStackTest();
422}
423
424TEST(FaultEntry, FloatFault) {
425 Setup(/*use_fpu=*/true);
426 BeginExtendedFaultFloatTest();
427 ASSERT_TRUE(exception_handled);
428 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
429 // captured_state values must be cast since they're in a packed struct.
430 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
431 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
432 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
433
434 // Check expected values for this crash.
435 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
436 static_cast<uint32_t>(kDivByZeroFaultMask));
437 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
438
439 // Check fpu state was pushed during exception
440 EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
441
442 // Check float_test_value is correct
443 EXPECT_EQ(float_test_value, kFloatTestPattern);
444}
445
446TEST(FaultEntry, FloatUnalignedStackFault) {
447 Setup(/*use_fpu=*/true);
448 BeginExtendedFaultUnalignedStackFloatTest();
449 ASSERT_TRUE(exception_handled);
450 ASSERT_TRUE(span_matches);
451 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
452 // captured_state values must be cast since they're in a packed struct.
453 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
454 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
455 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
456
457 // Check expected values for this crash.
458 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
459 static_cast<uint32_t>(kDivByZeroFaultMask));
460 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
461
462 // Check fpu state was pushed during exception.
463 EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
464
465 // Check float_test_value is correct
466 EXPECT_EQ(float_test_value, kFloatTestPattern);
467}
468
469#endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
470
471} // namespace
472
473void HandleCpuException(CpuState* state) {
474 if (arm_v7m_cfsr & kDivByZeroFaultMask) {
475 // Disable divide-by-zero trapping to "handle" exception.
476 arm_v7m_ccr &= ~kDivByZeroTrapEnableMask;
477 // Copy captured state to check later.
478 std::memcpy(&captured_state, state, sizeof(CpuState));
479 exception_handled = true;
480
481 // Ensure span compares to be the same.
482 span<const uint8_t> state_span = RawFaultingCpuState(*state);
483 EXPECT_EQ(state_span.size(), sizeof(CpuState));
484 if (std::memcmp(state, state_span.data(), state_span.size()) == 0) {
485 span_matches = true;
486 } else {
487 span_matches = false;
488 }
489
490 return;
491 }
492
493 // If an unexpected exception occurred, just enter an infinite loop.
494 while (true) {
495 }
496}
497
498} // namespace pw::cpu_exception