Dynamic Tools Team | 0d7d2ae | 2020-04-21 15:30:50 -0700 | [diff] [blame] | 1 | //===-- stack_depot.h -------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #ifndef SCUDO_STACK_DEPOT_H_ |
| 10 | #define SCUDO_STACK_DEPOT_H_ |
| 11 | |
| 12 | #include "atomic_helpers.h" |
| 13 | #include "mutex.h" |
| 14 | |
| 15 | namespace scudo { |
| 16 | |
| 17 | class MurMur2HashBuilder { |
| 18 | static const u32 M = 0x5bd1e995; |
| 19 | static const u32 Seed = 0x9747b28c; |
| 20 | static const u32 R = 24; |
| 21 | u32 H; |
| 22 | |
Kostya Kortchinsky | a51a892 | 2020-11-02 14:27:11 -0800 | [diff] [blame] | 23 | public: |
Dynamic Tools Team | 0d7d2ae | 2020-04-21 15:30:50 -0700 | [diff] [blame] | 24 | explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; } |
| 25 | void add(u32 K) { |
| 26 | K *= M; |
| 27 | K ^= K >> R; |
| 28 | K *= M; |
| 29 | H *= M; |
| 30 | H ^= K; |
| 31 | } |
| 32 | u32 get() { |
| 33 | u32 X = H; |
| 34 | X ^= X >> 13; |
| 35 | X *= M; |
| 36 | X ^= X >> 15; |
| 37 | return X; |
| 38 | } |
| 39 | }; |
| 40 | |
| 41 | class StackDepot { |
| 42 | HybridMutex RingEndMu; |
Vitaly Buka | 5d3d727 | 2021-04-29 01:19:51 -0700 | [diff] [blame] | 43 | u32 RingEnd = 0; |
Dynamic Tools Team | 0d7d2ae | 2020-04-21 15:30:50 -0700 | [diff] [blame] | 44 | |
| 45 | // This data structure stores a stack trace for each allocation and |
| 46 | // deallocation when stack trace recording is enabled, that may be looked up |
| 47 | // using a hash of the stack trace. The lower bits of the hash are an index |
| 48 | // into the Tab array, which stores an index into the Ring array where the |
| 49 | // stack traces are stored. As the name implies, Ring is a ring buffer, so a |
| 50 | // stack trace may wrap around to the start of the array. |
| 51 | // |
| 52 | // Each stack trace in Ring is prefixed by a stack trace marker consisting of |
| 53 | // a fixed 1 bit in bit 0 (this allows disambiguation between stack frames |
| 54 | // and stack trace markers in the case where instruction pointers are 4-byte |
| 55 | // aligned, as they are on arm64), the stack trace hash in bits 1-32, and the |
| 56 | // size of the stack trace in bits 33-63. |
| 57 | // |
| 58 | // The insert() function is potentially racy in its accesses to the Tab and |
| 59 | // Ring arrays, but find() is resilient to races in the sense that, barring |
| 60 | // hash collisions, it will either return the correct stack trace or no stack |
| 61 | // trace at all, even if two instances of insert() raced with one another. |
| 62 | // This is achieved by re-checking the hash of the stack trace before |
| 63 | // returning the trace. |
| 64 | |
| 65 | #ifdef SCUDO_FUZZ |
| 66 | // Use smaller table sizes for fuzzing in order to reduce input size. |
| 67 | static const uptr TabBits = 4; |
| 68 | #else |
| 69 | static const uptr TabBits = 16; |
| 70 | #endif |
| 71 | static const uptr TabSize = 1 << TabBits; |
| 72 | static const uptr TabMask = TabSize - 1; |
Vitaly Buka | 5d3d727 | 2021-04-29 01:19:51 -0700 | [diff] [blame] | 73 | atomic_u32 Tab[TabSize] = {}; |
Dynamic Tools Team | 0d7d2ae | 2020-04-21 15:30:50 -0700 | [diff] [blame] | 74 | |
| 75 | #ifdef SCUDO_FUZZ |
| 76 | static const uptr RingBits = 4; |
| 77 | #else |
| 78 | static const uptr RingBits = 19; |
| 79 | #endif |
| 80 | static const uptr RingSize = 1 << RingBits; |
| 81 | static const uptr RingMask = RingSize - 1; |
Vitaly Buka | 5d3d727 | 2021-04-29 01:19:51 -0700 | [diff] [blame] | 82 | atomic_u64 Ring[RingSize] = {}; |
Dynamic Tools Team | 0d7d2ae | 2020-04-21 15:30:50 -0700 | [diff] [blame] | 83 | |
| 84 | public: |
| 85 | // Insert hash of the stack trace [Begin, End) into the stack depot, and |
| 86 | // return the hash. |
| 87 | u32 insert(uptr *Begin, uptr *End) { |
| 88 | MurMur2HashBuilder B; |
| 89 | for (uptr *I = Begin; I != End; ++I) |
| 90 | B.add(u32(*I) >> 2); |
| 91 | u32 Hash = B.get(); |
| 92 | |
| 93 | u32 Pos = Hash & TabMask; |
| 94 | u32 RingPos = atomic_load_relaxed(&Tab[Pos]); |
| 95 | u64 Entry = atomic_load_relaxed(&Ring[RingPos]); |
| 96 | u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1; |
| 97 | if (Entry == Id) |
| 98 | return Hash; |
| 99 | |
| 100 | ScopedLock Lock(RingEndMu); |
| 101 | RingPos = RingEnd; |
| 102 | atomic_store_relaxed(&Tab[Pos], RingPos); |
| 103 | atomic_store_relaxed(&Ring[RingPos], Id); |
| 104 | for (uptr *I = Begin; I != End; ++I) { |
| 105 | RingPos = (RingPos + 1) & RingMask; |
| 106 | atomic_store_relaxed(&Ring[RingPos], *I); |
| 107 | } |
| 108 | RingEnd = (RingPos + 1) & RingMask; |
| 109 | return Hash; |
| 110 | } |
| 111 | |
| 112 | // Look up a stack trace by hash. Returns true if successful. The trace may be |
| 113 | // accessed via operator[] passing indexes between *RingPosPtr and |
| 114 | // *RingPosPtr + *SizePtr. |
| 115 | bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const { |
| 116 | u32 Pos = Hash & TabMask; |
| 117 | u32 RingPos = atomic_load_relaxed(&Tab[Pos]); |
| 118 | if (RingPos >= RingSize) |
| 119 | return false; |
| 120 | u64 Entry = atomic_load_relaxed(&Ring[RingPos]); |
| 121 | u64 HashWithTagBit = (u64(Hash) << 1) | 1; |
| 122 | if ((Entry & 0x1ffffffff) != HashWithTagBit) |
| 123 | return false; |
| 124 | u32 Size = u32(Entry >> 33); |
| 125 | if (Size >= RingSize) |
| 126 | return false; |
| 127 | *RingPosPtr = (RingPos + 1) & RingMask; |
| 128 | *SizePtr = Size; |
| 129 | MurMur2HashBuilder B; |
| 130 | for (uptr I = 0; I != Size; ++I) { |
| 131 | RingPos = (RingPos + 1) & RingMask; |
| 132 | B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2); |
| 133 | } |
| 134 | return B.get() == Hash; |
| 135 | } |
| 136 | |
| 137 | u64 operator[](uptr RingPos) const { |
| 138 | return atomic_load_relaxed(&Ring[RingPos & RingMask]); |
| 139 | } |
| 140 | }; |
| 141 | |
| 142 | } // namespace scudo |
| 143 | |
| 144 | #endif // SCUDO_STACK_DEPOT_H_ |