blob: 523808750eec230271446303336234e1686c0d6c [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Header for scudo_allocator.cpp.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef SCUDO_ALLOCATOR_H_
15#define SCUDO_ALLOCATOR_H_
16
Kostya Serebryany712fc982016-06-07 01:20:26 +000017#include "scudo_flags.h"
18
19#include "sanitizer_common/sanitizer_allocator.h"
20
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000021#if !SANITIZER_LINUX
22# error "The Scudo hardened allocator is currently only supported on Linux."
23#endif
24
Kostya Serebryany712fc982016-06-07 01:20:26 +000025namespace __scudo {
26
27enum AllocType : u8 {
28 FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
29 FromNew = 1, // Memory block came from operator new.
30 FromNewArray = 2, // Memory block came from operator new [].
31 FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
32};
33
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000034enum ChunkState : u8 {
35 ChunkAvailable = 0,
36 ChunkAllocated = 1,
37 ChunkQuarantine = 2
38};
39
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000040// Our header requires 64 bits of storage. Having the offset saves us from
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000041// using functions such as GetBlockBegin, that is fairly costly. Our first
42// implementation used the MetaData as well, which offers the advantage of
43// being stored away from the chunk itself, but accessing it was costly as
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000044// well. The header will be atomically loaded and stored.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000045typedef u64 PackedHeader;
46struct UnpackedHeader {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000047 u64 Checksum : 16;
48 u64 SizeOrUnusedBytes : 19; // Size for Primary backed allocations, amount of
49 // unused bytes in the chunk for Secondary ones.
50 u64 FromPrimary : 1;
51 u64 State : 2; // available, allocated, or quarantined
52 u64 AllocType : 2; // malloc, new, new[], or memalign
53 u64 Offset : 16; // Offset from the beginning of the backend
54 // allocation to the beginning of the chunk
55 // itself, in multiples of MinAlignment. See
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000056 // comment about its maximum value and in init().
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000057 u64 Salt : 8;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000058};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000059
Kostya Kortchinskya00b9222017-01-20 18:32:18 +000060typedef atomic_uint64_t AtomicPackedHeader;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000061COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
62
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000063// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
64const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000065const uptr MaxAlignmentLog = 24; // 16 MB
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000066const uptr MinAlignment = 1 << MinAlignmentLog;
67const uptr MaxAlignment = 1 << MaxAlignmentLog;
68
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000069const uptr ChunkHeaderSize = sizeof(PackedHeader);
70const uptr AlignedChunkHeaderSize =
71 (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
72
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000073#if SANITIZER_CAN_USE_ALLOCATOR64
74const uptr AllocatorSpace = ~0ULL;
Kostya Kortchinskyee0695762017-05-05 21:38:22 +000075# if defined(__aarch64__) && SANITIZER_ANDROID
76const uptr AllocatorSize = 0x4000000000ULL; // 256G.
77# elif defined(__aarch64__)
78const uptr AllocatorSize = 0x10000000000ULL; // 1T.
79# else
80const uptr AllocatorSize = 0x40000000000ULL; // 4T.
81# endif
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000082typedef DefaultSizeClassMap SizeClassMap;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000083struct AP64 {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000084 static const uptr kSpaceBeg = AllocatorSpace;
85 static const uptr kSpaceSize = AllocatorSize;
86 static const uptr kMetadataSize = 0;
87 typedef __scudo::SizeClassMap SizeClassMap;
88 typedef NoOpMapUnmapCallback MapUnmapCallback;
89 static const uptr kFlags =
90 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany712fc982016-06-07 01:20:26 +000091};
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000092typedef SizeClassAllocator64<AP64> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000093#else
94// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
95// security improvements brought to the 64-bit one. This makes the 32-bit
96// version of Scudo slightly less toughened.
97static const uptr RegionSizeLog = 20;
98static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
99# if SANITIZER_WORDSIZE == 32
100typedef FlatByteMap<NumRegions> ByteMap;
101# elif SANITIZER_WORDSIZE == 64
102typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
103# endif // SANITIZER_WORDSIZE
104typedef DefaultSizeClassMap SizeClassMap;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +0000105struct AP32 {
106 static const uptr kSpaceBeg = 0;
107 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
108 static const uptr kMetadataSize = 0;
109 typedef __scudo::SizeClassMap SizeClassMap;
110 static const uptr kRegionSizeLog = RegionSizeLog;
111 typedef __scudo::ByteMap ByteMap;
112 typedef NoOpMapUnmapCallback MapUnmapCallback;
113 static const uptr kFlags =
114 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
115};
116typedef SizeClassAllocator32<AP32> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000117#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany712fc982016-06-07 01:20:26 +0000118
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000119#include "scudo_allocator_secondary.h"
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000120#include "scudo_allocator_combined.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000121
122typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
123typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000124typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache,
125 SecondaryAllocator> ScudoBackendAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000126
127void initScudo();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000128
129void *scudoMalloc(uptr Size, AllocType Type);
130void scudoFree(void *Ptr, AllocType Type);
131void scudoSizedFree(void *Ptr, uptr Size, AllocType Type);
132void *scudoRealloc(void *Ptr, uptr Size);
133void *scudoCalloc(uptr NMemB, uptr Size);
134void *scudoMemalign(uptr Alignment, uptr Size);
135void *scudoValloc(uptr Size);
136void *scudoPvalloc(uptr Size);
137int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
138void *scudoAlignedAlloc(uptr Alignment, uptr Size);
139uptr scudoMallocUsableSize(void *Ptr);
140
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000141} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000142
143#endif // SCUDO_ALLOCATOR_H_