blob: a5f0ab004e614dda384ce8a86226eefe012adb8c [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Header for scudo_allocator.cpp.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef SCUDO_ALLOCATOR_H_
15#define SCUDO_ALLOCATOR_H_
16
Kostya Serebryany712fc982016-06-07 01:20:26 +000017#include "sanitizer_common/sanitizer_allocator.h"
18
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000019#if !SANITIZER_LINUX
20# error "The Scudo hardened allocator is currently only supported on Linux."
21#endif
22
Kostya Serebryany712fc982016-06-07 01:20:26 +000023namespace __scudo {
24
25enum AllocType : u8 {
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000026 FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
27 FromNew = 1, // Memory block came from operator new.
28 FromNewArray = 2, // Memory block came from operator new [].
29 FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
Kostya Serebryany712fc982016-06-07 01:20:26 +000030};
31
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000032enum ChunkState : u8 {
33 ChunkAvailable = 0,
34 ChunkAllocated = 1,
35 ChunkQuarantine = 2
36};
37
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000038// Our header requires 64 bits of storage. Having the offset saves us from
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000039// using functions such as GetBlockBegin, that is fairly costly. Our first
40// implementation used the MetaData as well, which offers the advantage of
41// being stored away from the chunk itself, but accessing it was costly as
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000042// well. The header will be atomically loaded and stored.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000043typedef u64 PackedHeader;
44struct UnpackedHeader {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000045 u64 Checksum : 16;
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000046 u64 SizeOrUnusedBytes : 19; // Size for Primary backed allocations, amount of
47 // unused bytes in the chunk for Secondary ones.
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000048 u64 FromPrimary : 1;
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000049 u64 State : 2; // available, allocated, or quarantined
50 u64 AllocType : 2; // malloc, new, new[], or memalign
51 u64 Offset : 16; // Offset from the beginning of the backend
52 // allocation to the beginning of the chunk
53 // itself, in multiples of MinAlignment. See
54 // comment about its maximum value and in init().
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000055 u64 Salt : 8;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000056};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000057
Kostya Kortchinskya00b9222017-01-20 18:32:18 +000058typedef atomic_uint64_t AtomicPackedHeader;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000059COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
60
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000061// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
62const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000063const uptr MaxAlignmentLog = 24; // 16 MB
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000064const uptr MinAlignment = 1 << MinAlignmentLog;
65const uptr MaxAlignment = 1 << MaxAlignmentLog;
66
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000067const uptr ChunkHeaderSize = sizeof(PackedHeader);
68const uptr AlignedChunkHeaderSize =
69 (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
70
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000071#if SANITIZER_CAN_USE_ALLOCATOR64
72const uptr AllocatorSpace = ~0ULL;
Kostya Kortchinskyee0695762017-05-05 21:38:22 +000073# if defined(__aarch64__) && SANITIZER_ANDROID
74const uptr AllocatorSize = 0x4000000000ULL; // 256G.
75# elif defined(__aarch64__)
76const uptr AllocatorSize = 0x10000000000ULL; // 1T.
77# else
78const uptr AllocatorSize = 0x40000000000ULL; // 4T.
79# endif
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000080typedef DefaultSizeClassMap SizeClassMap;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000081struct AP64 {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000082 static const uptr kSpaceBeg = AllocatorSpace;
83 static const uptr kSpaceSize = AllocatorSize;
84 static const uptr kMetadataSize = 0;
85 typedef __scudo::SizeClassMap SizeClassMap;
86 typedef NoOpMapUnmapCallback MapUnmapCallback;
87 static const uptr kFlags =
88 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany712fc982016-06-07 01:20:26 +000089};
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000090typedef SizeClassAllocator64<AP64> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000091#else
92// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
93// security improvements brought to the 64-bit one. This makes the 32-bit
94// version of Scudo slightly less toughened.
95static const uptr RegionSizeLog = 20;
96static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
97# if SANITIZER_WORDSIZE == 32
98typedef FlatByteMap<NumRegions> ByteMap;
99# elif SANITIZER_WORDSIZE == 64
100typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
101# endif // SANITIZER_WORDSIZE
102typedef DefaultSizeClassMap SizeClassMap;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +0000103struct AP32 {
104 static const uptr kSpaceBeg = 0;
105 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
106 static const uptr kMetadataSize = 0;
107 typedef __scudo::SizeClassMap SizeClassMap;
108 static const uptr kRegionSizeLog = RegionSizeLog;
109 typedef __scudo::ByteMap ByteMap;
110 typedef NoOpMapUnmapCallback MapUnmapCallback;
111 static const uptr kFlags =
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +0000112 SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
113 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +0000114};
115typedef SizeClassAllocator32<AP32> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000116#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany712fc982016-06-07 01:20:26 +0000117
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000118// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own.
119INLINE uptr RoundUpTo(uptr Size, uptr Boundary) {
120 return (Size + Boundary - 1) & ~(Boundary - 1);
121}
122
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000123#include "scudo_allocator_secondary.h"
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000124#include "scudo_allocator_combined.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000125
126typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
127typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000128typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache,
129 SecondaryAllocator> ScudoBackendAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000130
131void initScudo();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000132
133void *scudoMalloc(uptr Size, AllocType Type);
134void scudoFree(void *Ptr, AllocType Type);
135void scudoSizedFree(void *Ptr, uptr Size, AllocType Type);
136void *scudoRealloc(void *Ptr, uptr Size);
137void *scudoCalloc(uptr NMemB, uptr Size);
138void *scudoMemalign(uptr Alignment, uptr Size);
139void *scudoValloc(uptr Size);
140void *scudoPvalloc(uptr Size);
141int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
142void *scudoAlignedAlloc(uptr Alignment, uptr Size);
143uptr scudoMallocUsableSize(void *Ptr);
144
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000145} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000146
147#endif // SCUDO_ALLOCATOR_H_