blob: c1b6b99d66f91b999cf7875d282276c16a352323 [file] [log] [blame]
Dynamic Tools Team48429c72019-12-04 17:46:15 -08001//===-- memtag.h ------------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_MEMTAG_H_
10#define SCUDO_MEMTAG_H_
11
12#include "internal_defs.h"
13
14#if SCUDO_LINUX
15#include <sys/auxv.h>
16#include <sys/prctl.h>
Dynamic Tools Team48429c72019-12-04 17:46:15 -080017#endif
18
19namespace scudo {
20
Peter Collingbourne706ac402020-12-04 15:03:49 -080021void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask, uptr *TaggedBegin,
22 uptr *TaggedEnd);
23
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -070024#if defined(__aarch64__) || defined(SCUDO_FUZZ)
Dynamic Tools Team48429c72019-12-04 17:46:15 -080025
Peter Collingbournecc3d4932020-12-21 18:39:03 -080026// We assume that Top-Byte Ignore is enabled if the architecture supports memory
27// tagging. Not all operating systems enable TBI, so we only claim architectural
28// support for memory tagging if the operating system enables TBI.
Peter Collingbourne6f816d12021-03-16 11:46:31 -070029#if SCUDO_LINUX && !defined(SCUDO_DISABLE_TBI)
Dynamic Tools Team48429c72019-12-04 17:46:15 -080030inline constexpr bool archSupportsMemoryTagging() { return true; }
Peter Collingbournecc3d4932020-12-21 18:39:03 -080031#else
32inline constexpr bool archSupportsMemoryTagging() { return false; }
33#endif
34
Dynamic Tools Team48429c72019-12-04 17:46:15 -080035inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
36
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -070037inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
38
Kostya Kortchinskya51a8922020-11-02 14:27:11 -080039inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -070040
41#else
42
43inline constexpr bool archSupportsMemoryTagging() { return false; }
44
45inline uptr archMemoryTagGranuleSize() {
46 UNREACHABLE("memory tagging not supported");
47}
48
49inline uptr untagPointer(uptr Ptr) {
50 (void)Ptr;
51 UNREACHABLE("memory tagging not supported");
52}
53
54inline uint8_t extractTag(uptr Ptr) {
55 (void)Ptr;
56 UNREACHABLE("memory tagging not supported");
57}
58
59#endif
60
61#if defined(__aarch64__)
62
Roland McGrath2699c682021-01-05 11:55:11 -080063#if SCUDO_LINUX
64
Dynamic Tools Team48429c72019-12-04 17:46:15 -080065inline bool systemSupportsMemoryTagging() {
Peter Collingbourne9ece4092020-12-17 21:02:01 -080066#ifndef HWCAP2_MTE
67#define HWCAP2_MTE (1 << 18)
Dynamic Tools Team48429c72019-12-04 17:46:15 -080068#endif
Peter Collingbourne9ece4092020-12-17 21:02:01 -080069 return getauxval(AT_HWCAP2) & HWCAP2_MTE;
Dynamic Tools Team48429c72019-12-04 17:46:15 -080070}
71
72inline bool systemDetectsMemoryTagFaultsTestOnly() {
Peter Collingbourne9ece4092020-12-17 21:02:01 -080073#ifndef PR_GET_TAGGED_ADDR_CTRL
74#define PR_GET_TAGGED_ADDR_CTRL 56
Dynamic Tools Team48429c72019-12-04 17:46:15 -080075#endif
Peter Collingbourne9ece4092020-12-17 21:02:01 -080076#ifndef PR_MTE_TCF_SHIFT
77#define PR_MTE_TCF_SHIFT 1
78#endif
79#ifndef PR_MTE_TCF_NONE
80#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
81#endif
82#ifndef PR_MTE_TCF_MASK
83#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
84#endif
85 return (static_cast<unsigned long>(
86 prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &
87 PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
Dynamic Tools Team48429c72019-12-04 17:46:15 -080088}
89
Roland McGrath2699c682021-01-05 11:55:11 -080090#else // !SCUDO_LINUX
91
92inline bool systemSupportsMemoryTagging() { return false; }
93
94inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
95
96#endif // SCUDO_LINUX
97
Dynamic Tools Team48429c72019-12-04 17:46:15 -080098inline void disableMemoryTagChecksTestOnly() {
Roland McGrath12ee7ad2021-04-13 15:03:54 -070099 __asm__ __volatile__(
100 R"(
101 .arch_extension memtag
102 msr tco, #1
103 )");
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800104}
105
106inline void enableMemoryTagChecksTestOnly() {
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700107 __asm__ __volatile__(
108 R"(
109 .arch_extension memtag
110 msr tco, #0
111 )");
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800112}
113
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -0700114class ScopedDisableMemoryTagChecks {
115 size_t PrevTCO;
116
Kostya Kortchinskya51a8922020-11-02 14:27:11 -0800117public:
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -0700118 ScopedDisableMemoryTagChecks() {
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700119 __asm__ __volatile__(
120 R"(
121 .arch_extension memtag
122 mrs %0, tco
123 msr tco, #1
124 )"
125 : "=r"(PrevTCO));
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -0700126 }
127
128 ~ScopedDisableMemoryTagChecks() {
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700129 __asm__ __volatile__(
130 R"(
131 .arch_extension memtag
132 msr tco, %0
133 )"
134 :
135 : "r"(PrevTCO));
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -0700136 }
137};
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800138
Peter Collingbourne706ac402020-12-04 15:03:49 -0800139inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
140 uptr TaggedPtr;
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800141 __asm__ __volatile__(
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700142 R"(
143 .arch_extension memtag
144 irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
145 )"
Peter Collingbourne706ac402020-12-04 15:03:49 -0800146 : [TaggedPtr] "=r"(TaggedPtr)
147 : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
148 return TaggedPtr;
149}
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800150
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800151inline uptr addFixedTag(uptr Ptr, uptr Tag) { return Ptr | (Tag << 56); }
152
Peter Collingbourne706ac402020-12-04 15:03:49 -0800153inline uptr storeTags(uptr Begin, uptr End) {
154 DCHECK(Begin % 16 == 0);
155 if (Begin != End) {
156 __asm__ __volatile__(
157 R"(
Roland McGrath3c3951e2021-02-03 19:28:29 -0800158 .arch_extension memtag
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800159
Peter Collingbourne706ac402020-12-04 15:03:49 -0800160 1:
161 stzg %[Cur], [%[Cur]], #16
162 cmp %[Cur], %[End]
163 b.lt 1b
164 )"
165 : [Cur] "+&r"(Begin)
166 : [End] "r"(End)
167 : "memory");
168 }
169 return Begin;
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800170}
171
Peter Collingbourneb232c852020-07-22 13:45:14 -0700172inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
173 uptr BlockEnd) {
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800174 // Prepare the granule before the chunk to store the chunk header by setting
175 // its tag to 0. Normally its tag will already be 0, but in the case where a
176 // chunk holding a low alignment allocation is reused for a higher alignment
177 // allocation, the chunk may already have a non-zero tag from the previous
178 // allocation.
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700179 __asm__ __volatile__(
180 R"(
181 .arch_extension memtag
182 stg %0, [%0, #-16]
183 )"
184 :
185 : "r"(Ptr)
186 : "memory");
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800187
188 uptr TaggedBegin, TaggedEnd;
Peter Collingbourneb232c852020-07-22 13:45:14 -0700189 setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800190
191 // Finally, set the tag of the granule past the end of the allocation to 0,
192 // to catch linear overflows even if a previous larger allocation used the
193 // same block and tag. Only do this if the granule past the end is in our
194 // block, because this would otherwise lead to a SEGV if the allocation
195 // covers the entire block and our block is at the end of a mapping. The tag
196 // of the next block's header granule will be set to 0, so it will serve the
197 // purpose of catching linear overflows in this case.
198 uptr UntaggedEnd = untagPointer(TaggedEnd);
199 if (UntaggedEnd != BlockEnd)
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700200 __asm__ __volatile__(
201 R"(
202 .arch_extension memtag
203 stg %0, [%0]
204 )"
205 :
206 : "r"(UntaggedEnd)
207 : "memory");
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800208 return reinterpret_cast<void *>(TaggedBegin);
209}
210
211inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
212 uptr RoundOldPtr = roundUpTo(OldPtr, 16);
213 if (RoundOldPtr >= NewPtr) {
214 // If the allocation is shrinking we just need to set the tag past the end
215 // of the allocation to 0. See explanation in prepareTaggedChunk above.
216 uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16));
217 if (RoundNewPtr != BlockEnd)
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700218 __asm__ __volatile__(
219 R"(
220 .arch_extension memtag
221 stg %0, [%0]
222 )"
223 :
224 : "r"(RoundNewPtr)
225 : "memory");
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800226 return;
227 }
228
229 __asm__ __volatile__(R"(
Roland McGrath3c3951e2021-02-03 19:28:29 -0800230 .arch_extension memtag
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800231
232 // Set the memory tag of the region
233 // [roundUpTo(OldPtr, 16), roundUpTo(NewPtr, 16))
234 // to the pointer tag stored in OldPtr.
235 1:
236 stzg %[Cur], [%[Cur]], #16
237 cmp %[Cur], %[End]
238 b.lt 1b
239
240 // Finally, set the tag of the granule past the end of the allocation to 0.
241 and %[Cur], %[Cur], #(1 << 56) - 1
242 cmp %[Cur], %[BlockEnd]
243 b.eq 2f
244 stg %[Cur], [%[Cur]]
245
246 2:
247 )"
Kostya Kortchinskya51a8922020-11-02 14:27:11 -0800248 : [Cur] "+&r"(RoundOldPtr), [End] "+&r"(NewPtr)
249 : [BlockEnd] "r"(BlockEnd)
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800250 : "memory");
251}
252
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800253inline uptr loadTag(uptr Ptr) {
254 uptr TaggedPtr = Ptr;
Roland McGrath12ee7ad2021-04-13 15:03:54 -0700255 __asm__ __volatile__(
256 R"(
257 .arch_extension memtag
258 ldg %0, [%0]
259 )"
260 : "+r"(TaggedPtr)
261 :
262 : "memory");
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800263 return TaggedPtr;
264}
265
266#else
267
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800268inline bool systemSupportsMemoryTagging() {
269 UNREACHABLE("memory tagging not supported");
270}
271
272inline bool systemDetectsMemoryTagFaultsTestOnly() {
273 UNREACHABLE("memory tagging not supported");
274}
275
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800276inline void disableMemoryTagChecksTestOnly() {
277 UNREACHABLE("memory tagging not supported");
278}
279
280inline void enableMemoryTagChecksTestOnly() {
281 UNREACHABLE("memory tagging not supported");
282}
283
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -0700284struct ScopedDisableMemoryTagChecks {
285 ScopedDisableMemoryTagChecks() {}
286};
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800287
Peter Collingbourne706ac402020-12-04 15:03:49 -0800288inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800289 (void)Ptr;
Peter Collingbourne865f6742020-04-30 16:42:17 -0700290 (void)ExcludeMask;
Peter Collingbourne706ac402020-12-04 15:03:49 -0800291 UNREACHABLE("memory tagging not supported");
292}
293
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800294inline uptr addFixedTag(uptr Ptr, uptr Tag) {
295 (void)Ptr;
296 (void)Tag;
297 UNREACHABLE("memory tagging not supported");
298}
299
Peter Collingbourne706ac402020-12-04 15:03:49 -0800300inline uptr storeTags(uptr Begin, uptr End) {
301 (void)Begin;
302 (void)End;
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800303 UNREACHABLE("memory tagging not supported");
304}
305
Peter Collingbourneb232c852020-07-22 13:45:14 -0700306inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
307 uptr BlockEnd) {
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800308 (void)Ptr;
309 (void)Size;
Peter Collingbourneb232c852020-07-22 13:45:14 -0700310 (void)ExcludeMask;
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800311 (void)BlockEnd;
312 UNREACHABLE("memory tagging not supported");
313}
314
315inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
316 (void)OldPtr;
317 (void)NewPtr;
318 (void)BlockEnd;
319 UNREACHABLE("memory tagging not supported");
320}
321
322inline uptr loadTag(uptr Ptr) {
323 (void)Ptr;
324 UNREACHABLE("memory tagging not supported");
325}
326
327#endif
328
Peter Collingbourne706ac402020-12-04 15:03:49 -0800329inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
330 uptr *TaggedBegin, uptr *TaggedEnd) {
331 *TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
332 *TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
333}
334
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800335inline void *untagPointer(void *Ptr) {
336 return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
337}
338
Peter Collingbourne75ee3bc2021-01-05 19:03:03 -0800339inline void *loadTag(void *Ptr) {
340 return reinterpret_cast<void *>(loadTag(reinterpret_cast<uptr>(Ptr)));
341}
342
343inline void *addFixedTag(void *Ptr, uptr Tag) {
344 return reinterpret_cast<void *>(
345 addFixedTag(reinterpret_cast<uptr>(Ptr), Tag));
346}
347
Peter Collingbourne6be49192020-12-15 14:26:10 -0800348template <typename Config>
349inline constexpr bool allocatorSupportsMemoryTagging() {
350 return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging;
351}
352
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800353} // namespace scudo
354
355#endif