blob: 2f317d24a53325a93bef3ef5bc8ffa702d1c5100 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Header for scudo_allocator.cpp.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef SCUDO_ALLOCATOR_H_
15#define SCUDO_ALLOCATOR_H_
16
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000017#include "scudo_platform.h"
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000018
Kostya Serebryany712fc982016-06-07 01:20:26 +000019namespace __scudo {
20
21enum AllocType : u8 {
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000022 FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
23 FromNew = 1, // Memory block came from operator new.
24 FromNewArray = 2, // Memory block came from operator new [].
25 FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
Kostya Serebryany712fc982016-06-07 01:20:26 +000026};
27
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000028enum ChunkState : u8 {
29 ChunkAvailable = 0,
30 ChunkAllocated = 1,
31 ChunkQuarantine = 2
32};
33
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000034// Our header requires 64 bits of storage. Having the offset saves us from
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000035// using functions such as GetBlockBegin, that is fairly costly. Our first
36// implementation used the MetaData as well, which offers the advantage of
37// being stored away from the chunk itself, but accessing it was costly as
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000038// well. The header will be atomically loaded and stored.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000039typedef u64 PackedHeader;
40struct UnpackedHeader {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000041 u64 Checksum : 16;
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000042 u64 SizeOrUnusedBytes : 19; // Size for Primary backed allocations, amount of
43 // unused bytes in the chunk for Secondary ones.
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000044 u64 FromPrimary : 1;
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000045 u64 State : 2; // available, allocated, or quarantined
46 u64 AllocType : 2; // malloc, new, new[], or memalign
47 u64 Offset : 16; // Offset from the beginning of the backend
48 // allocation to the beginning of the chunk
49 // itself, in multiples of MinAlignment. See
50 // comment about its maximum value and in init().
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +000051 u64 Salt : 8;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000052};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000053
Kostya Kortchinskya00b9222017-01-20 18:32:18 +000054typedef atomic_uint64_t AtomicPackedHeader;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000055COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
56
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000057// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
58const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000059const uptr MaxAlignmentLog = 24; // 16 MB
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000060const uptr MinAlignment = 1 << MinAlignmentLog;
61const uptr MaxAlignment = 1 << MaxAlignmentLog;
62
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000063const uptr ChunkHeaderSize = sizeof(PackedHeader);
64const uptr AlignedChunkHeaderSize =
65 (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
66
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000067#if SANITIZER_CAN_USE_ALLOCATOR64
68const uptr AllocatorSpace = ~0ULL;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000069struct AP64 {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000070 static const uptr kSpaceBeg = AllocatorSpace;
71 static const uptr kSpaceSize = AllocatorSize;
72 static const uptr kMetadataSize = 0;
73 typedef __scudo::SizeClassMap SizeClassMap;
74 typedef NoOpMapUnmapCallback MapUnmapCallback;
75 static const uptr kFlags =
76 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany712fc982016-06-07 01:20:26 +000077};
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000078typedef SizeClassAllocator64<AP64> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000079#else
80// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
81// security improvements brought to the 64-bit one. This makes the 32-bit
82// version of Scudo slightly less toughened.
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000083static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
84# if SANITIZER_WORDSIZE == 32
85typedef FlatByteMap<NumRegions> ByteMap;
86# elif SANITIZER_WORDSIZE == 64
87typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
88# endif // SANITIZER_WORDSIZE
Kostya Kortchinskydc646a02017-05-15 14:47:19 +000089struct AP32 {
90 static const uptr kSpaceBeg = 0;
91 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
92 static const uptr kMetadataSize = 0;
93 typedef __scudo::SizeClassMap SizeClassMap;
94 static const uptr kRegionSizeLog = RegionSizeLog;
95 typedef __scudo::ByteMap ByteMap;
96 typedef NoOpMapUnmapCallback MapUnmapCallback;
97 static const uptr kFlags =
Kostya Kortchinsky476f21d2017-08-28 15:20:02 +000098 SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
99 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
Kostya Kortchinskydc646a02017-05-15 14:47:19 +0000100};
101typedef SizeClassAllocator32<AP32> PrimaryAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000102#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany712fc982016-06-07 01:20:26 +0000103
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000104// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own.
105INLINE uptr RoundUpTo(uptr Size, uptr Boundary) {
106 return (Size + Boundary - 1) & ~(Boundary - 1);
107}
108
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000109#include "scudo_allocator_secondary.h"
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000110#include "scudo_allocator_combined.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000111
112typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
113typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000114typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache,
115 SecondaryAllocator> ScudoBackendAllocator;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000116
117void initScudo();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000118
119void *scudoMalloc(uptr Size, AllocType Type);
120void scudoFree(void *Ptr, AllocType Type);
121void scudoSizedFree(void *Ptr, uptr Size, AllocType Type);
122void *scudoRealloc(void *Ptr, uptr Size);
123void *scudoCalloc(uptr NMemB, uptr Size);
124void *scudoMemalign(uptr Alignment, uptr Size);
125void *scudoValloc(uptr Size);
126void *scudoPvalloc(uptr Size);
127int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
128void *scudoAlignedAlloc(uptr Alignment, uptr Size);
129uptr scudoMallocUsableSize(void *Ptr);
130
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000131} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000132
133#endif // SCUDO_ALLOCATOR_H_