blob: c29edea4692fa4052424055ca605fef96f22cb74 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Header for scudo_allocator.cpp.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef SCUDO_ALLOCATOR_H_
15#define SCUDO_ALLOCATOR_H_
16
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000017#include "scudo_platform.h"
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000018
Kostya Serebryany712fc982016-06-07 01:20:26 +000019namespace __scudo {
20
21enum AllocType : u8 {
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000022 FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
23 FromNew = 1, // Memory block came from operator new.
24 FromNewArray = 2, // Memory block came from operator new [].
25 FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
Kostya Serebryany712fc982016-06-07 01:20:26 +000026};
27
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000028enum ChunkState : u8 {
29 ChunkAvailable = 0,
30 ChunkAllocated = 1,
31 ChunkQuarantine = 2
32};
33
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000034// Our header requires 64 bits of storage. Having the offset saves us from
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000035// using functions such as GetBlockBegin, that is fairly costly. Our first
36// implementation used the MetaData as well, which offers the advantage of
37// being stored away from the chunk itself, but accessing it was costly as
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000038// well. The header will be atomically loaded and stored.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000039typedef u64 PackedHeader;
40struct UnpackedHeader {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000041 u64 Checksum : 16;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +000042 u64 ClassId : 8;
43 u64 SizeOrUnusedBytes : 20; // Size for Primary backed allocations, amount of
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000044 // unused bytes in the chunk for Secondary ones.
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000045 u64 State : 2; // available, allocated, or quarantined
46 u64 AllocType : 2; // malloc, new, new[], or memalign
47 u64 Offset : 16; // Offset from the beginning of the backend
48 // allocation to the beginning of the chunk
49 // itself, in multiples of MinAlignment. See
50 // comment about its maximum value and in init().
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000051};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000052
Kostya Kortchinskya00b9222017-01-20 18:32:18 +000053typedef atomic_uint64_t AtomicPackedHeader;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000054COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
55
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000056// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
57const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000058const uptr MaxAlignmentLog = 24; // 16 MB
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000059const uptr MinAlignment = 1 << MinAlignmentLog;
60const uptr MaxAlignment = 1 << MaxAlignmentLog;
61
Kostya Kortchinskybeeea622018-02-27 16:14:49 +000062// constexpr version of __sanitizer::RoundUp without the extraneous CHECK.
63// This way we can use it in constexpr variables and functions declarations.
64constexpr uptr RoundUpTo(uptr Size, uptr Boundary) {
65 return (Size + Boundary - 1) & ~(Boundary - 1);
66}
67
68namespace Chunk {
69 constexpr uptr getHeaderSize() {
70 return RoundUpTo(sizeof(PackedHeader), MinAlignment);
71 }
72}
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000073
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000074#if SANITIZER_CAN_USE_ALLOCATOR64
75const uptr AllocatorSpace = ~0ULL;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000076struct AP64 {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000077 static const uptr kSpaceBeg = AllocatorSpace;
78 static const uptr kSpaceSize = AllocatorSize;
79 static const uptr kMetadataSize = 0;
80 typedef __scudo::SizeClassMap SizeClassMap;
81 typedef NoOpMapUnmapCallback MapUnmapCallback;
82 static const uptr kFlags =
83 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany712fc982016-06-07 01:20:26 +000084};
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000085typedef SizeClassAllocator64<AP64> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000086#else
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000087static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
88# if SANITIZER_WORDSIZE == 32
89typedef FlatByteMap<NumRegions> ByteMap;
90# elif SANITIZER_WORDSIZE == 64
91typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
92# endif // SANITIZER_WORDSIZE
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000093struct AP32 {
94 static const uptr kSpaceBeg = 0;
95 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
96 static const uptr kMetadataSize = 0;
97 typedef __scudo::SizeClassMap SizeClassMap;
98 static const uptr kRegionSizeLog = RegionSizeLog;
99 typedef __scudo::ByteMap ByteMap;
100 typedef NoOpMapUnmapCallback MapUnmapCallback;
101 static const uptr kFlags =
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +0000102 SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
103 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +0000104};
105typedef SizeClassAllocator32<AP32> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000106#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany712fc982016-06-07 01:20:26 +0000107
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000108#include "scudo_allocator_secondary.h"
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000109#include "scudo_allocator_combined.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000110
111typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
112typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000113typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache,
114 SecondaryAllocator> ScudoBackendAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000115
116void initScudo();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000117
118void *scudoMalloc(uptr Size, AllocType Type);
119void scudoFree(void *Ptr, AllocType Type);
120void scudoSizedFree(void *Ptr, uptr Size, AllocType Type);
121void *scudoRealloc(void *Ptr, uptr Size);
122void *scudoCalloc(uptr NMemB, uptr Size);
123void *scudoMemalign(uptr Alignment, uptr Size);
124void *scudoValloc(uptr Size);
125void *scudoPvalloc(uptr Size);
126int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
127void *scudoAlignedAlloc(uptr Alignment, uptr Size);
128uptr scudoMallocUsableSize(void *Ptr);
129
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000130} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000131
132#endif // SCUDO_ALLOCATOR_H_