Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 1 | // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "third_party/base/allocator/partition_allocator/partition_alloc.h" |
| 6 | |
| 7 | #include <string.h> |
| 8 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 9 | #include <memory> |
| 10 | #include <type_traits> |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 11 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 12 | #include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h" |
| 13 | #include "third_party/base/allocator/partition_allocator/partition_oom.h" |
| 14 | #include "third_party/base/allocator/partition_allocator/partition_page.h" |
| 15 | #include "third_party/base/allocator/partition_allocator/spin_lock.h" |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 16 | |
| 17 | namespace pdfium { |
| 18 | namespace base { |
| 19 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 20 | // Two partition pages are used as guard / metadata page so make sure the super |
| 21 | // page size is bigger. |
| 22 | static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size"); |
| 23 | static_assert(!(kSuperPageSize % kPartitionPageSize), "ok super page multiple"); |
| 24 | // Four system pages gives us room to hack out a still-guard-paged piece |
| 25 | // of metadata in the middle of a guard partition page. |
| 26 | static_assert(kSystemPageSize * 4 <= kPartitionPageSize, |
| 27 | "ok partition page size"); |
| 28 | static_assert(!(kPartitionPageSize % kSystemPageSize), |
| 29 | "ok partition page multiple"); |
| 30 | static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize, |
| 31 | "PartitionPage should not be too big"); |
| 32 | static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize, |
| 33 | "PartitionBucket should not be too big"); |
| 34 | static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <= |
| 35 | kPageMetadataSize, |
| 36 | "PartitionSuperPageExtentEntry should not be too big"); |
| 37 | static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <= |
| 38 | kSystemPageSize, |
| 39 | "page metadata fits in hole"); |
| 40 | // Limit to prevent callers accidentally overflowing an int size. |
| 41 | static_assert(kGenericMaxDirectMapped <= |
| 42 | (1UL << 31) + kPageAllocationGranularity, |
| 43 | "maximum direct mapped allocation"); |
| 44 | // Check that some of our zanier calculations worked out as expected. |
| 45 | static_assert(kGenericSmallestBucket == 8, "generic smallest bucket"); |
| 46 | static_assert(kGenericMaxBucketed == 983040, "generic max bucketed"); |
| 47 | static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8), |
| 48 | "System pages per slot span must be less than 128."); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 49 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 50 | internal::PartitionRootBase::PartitionRootBase() = default; |
| 51 | internal::PartitionRootBase::~PartitionRootBase() = default; |
| 52 | PartitionRoot::PartitionRoot() = default; |
| 53 | PartitionRoot::~PartitionRoot() = default; |
| 54 | PartitionRootGeneric::PartitionRootGeneric() = default; |
| 55 | PartitionRootGeneric::~PartitionRootGeneric() = default; |
| 56 | PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default; |
| 57 | PartitionAllocatorGeneric::~PartitionAllocatorGeneric() = default; |
| 58 | |
| 59 | subtle::SpinLock* GetLock() { |
| 60 | static subtle::SpinLock* s_initialized_lock = nullptr; |
| 61 | if (!s_initialized_lock) |
| 62 | s_initialized_lock = new subtle::SpinLock(); |
| 63 | return s_initialized_lock; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 66 | static bool g_initialized = false; |
| 67 | |
| 68 | void (*internal::PartitionRootBase::gOomHandlingFunction)() = nullptr; |
| 69 | std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false); |
| 70 | subtle::SpinLock PartitionAllocHooks::set_hooks_lock_; |
| 71 | std::atomic<PartitionAllocHooks::AllocationObserverHook*> |
| 72 | PartitionAllocHooks::allocation_observer_hook_(nullptr); |
| 73 | std::atomic<PartitionAllocHooks::FreeObserverHook*> |
| 74 | PartitionAllocHooks::free_observer_hook_(nullptr); |
| 75 | std::atomic<PartitionAllocHooks::AllocationOverrideHook*> |
| 76 | PartitionAllocHooks::allocation_override_hook_(nullptr); |
| 77 | std::atomic<PartitionAllocHooks::FreeOverrideHook*> |
| 78 | PartitionAllocHooks::free_override_hook_(nullptr); |
| 79 | std::atomic<PartitionAllocHooks::ReallocOverrideHook*> |
| 80 | PartitionAllocHooks::realloc_override_hook_(nullptr); |
| 81 | |
| 82 | void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook, |
| 83 | FreeObserverHook* free_hook) { |
| 84 | subtle::SpinLock::Guard guard(set_hooks_lock_); |
| 85 | |
| 86 | // Chained hooks are not supported. Registering a non-null hook when a |
| 87 | // non-null hook is already registered indicates somebody is trying to |
| 88 | // overwrite a hook. |
| 89 | CHECK((!allocation_observer_hook_ && !free_observer_hook_) || |
| 90 | (!alloc_hook && !free_hook)); |
| 91 | allocation_observer_hook_ = alloc_hook; |
| 92 | free_observer_hook_ = free_hook; |
| 93 | |
| 94 | hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_; |
| 95 | } |
| 96 | |
| 97 | void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook, |
| 98 | FreeOverrideHook* free_hook, |
| 99 | ReallocOverrideHook realloc_hook) { |
| 100 | subtle::SpinLock::Guard guard(set_hooks_lock_); |
| 101 | |
| 102 | CHECK((!allocation_override_hook_ && !free_override_hook_ && |
| 103 | !realloc_override_hook_) || |
| 104 | (!alloc_hook && !free_hook && !realloc_hook)); |
| 105 | allocation_override_hook_ = alloc_hook; |
| 106 | free_override_hook_ = free_hook; |
| 107 | realloc_override_hook_ = realloc_hook; |
| 108 | |
| 109 | hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_; |
| 110 | } |
| 111 | |
| 112 | void PartitionAllocHooks::AllocationObserverHookIfEnabled( |
| 113 | void* address, |
| 114 | size_t size, |
| 115 | const char* type_name) { |
| 116 | if (AllocationObserverHook* hook = |
| 117 | allocation_observer_hook_.load(std::memory_order_relaxed)) { |
| 118 | hook(address, size, type_name); |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | bool PartitionAllocHooks::AllocationOverrideHookIfEnabled( |
| 123 | void** out, |
| 124 | int flags, |
| 125 | size_t size, |
| 126 | const char* type_name) { |
| 127 | if (AllocationOverrideHook* hook = |
| 128 | allocation_override_hook_.load(std::memory_order_relaxed)) { |
| 129 | return hook(out, flags, size, type_name); |
| 130 | } |
| 131 | return false; |
| 132 | } |
| 133 | |
| 134 | void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) { |
| 135 | if (FreeObserverHook* hook = |
| 136 | free_observer_hook_.load(std::memory_order_relaxed)) { |
| 137 | hook(address); |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) { |
| 142 | if (FreeOverrideHook* hook = |
| 143 | free_override_hook_.load(std::memory_order_relaxed)) { |
| 144 | return hook(address); |
| 145 | } |
| 146 | return false; |
| 147 | } |
| 148 | |
| 149 | void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address, |
| 150 | void* new_address, |
| 151 | size_t size, |
| 152 | const char* type_name) { |
| 153 | // Report a reallocation as a free followed by an allocation. |
| 154 | AllocationObserverHook* allocation_hook = |
| 155 | allocation_observer_hook_.load(std::memory_order_relaxed); |
| 156 | FreeObserverHook* free_hook = |
| 157 | free_observer_hook_.load(std::memory_order_relaxed); |
| 158 | if (allocation_hook && free_hook) { |
| 159 | free_hook(old_address); |
| 160 | allocation_hook(new_address, size, type_name); |
| 161 | } |
| 162 | } |
| 163 | bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out, |
| 164 | void* address) { |
| 165 | if (ReallocOverrideHook* hook = |
| 166 | realloc_override_hook_.load(std::memory_order_relaxed)) { |
| 167 | return hook(out, address); |
| 168 | } |
| 169 | return false; |
| 170 | } |
| 171 | |
| 172 | static void PartitionAllocBaseInit(internal::PartitionRootBase* root) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 173 | DCHECK(!root->initialized); |
| 174 | { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 175 | subtle::SpinLock::Guard guard(*GetLock()); |
| 176 | if (!g_initialized) { |
| 177 | g_initialized = true; |
| 178 | // We mark the sentinel bucket/page as free to make sure it is skipped by |
| 179 | // our logic to find a new active page. |
| 180 | internal::PartitionBucket::get_sentinel_bucket()->active_pages_head = |
| 181 | internal::PartitionPage::get_sentinel_page(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 182 | } |
| 183 | } |
| 184 | |
| 185 | root->initialized = true; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 186 | |
| 187 | // This is a "magic" value so we can test if a root pointer is valid. |
| 188 | root->inverted_self = ~reinterpret_cast<uintptr_t>(root); |
| 189 | } |
| 190 | |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 191 | void PartitionAllocGlobalInit(void (*oom_handling_function)()) { |
| 192 | DCHECK(oom_handling_function); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 193 | internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 196 | void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) { |
| 197 | PartitionAllocBaseInit(this); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 198 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 199 | num_buckets = bucket_count; |
| 200 | max_allocation = maximum_allocation; |
| 201 | for (size_t i = 0; i < num_buckets; ++i) { |
| 202 | internal::PartitionBucket& bucket = buckets()[i]; |
| 203 | bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift)); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 204 | } |
| 205 | } |
| 206 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 207 | void PartitionRootGeneric::Init() { |
| 208 | subtle::SpinLock::Guard guard(lock); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 209 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 210 | PartitionAllocBaseInit(this); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 211 | |
| 212 | // Precalculate some shift and mask constants used in the hot path. |
| 213 | // Example: malloc(41) == 101001 binary. |
| 214 | // Order is 6 (1 << 6-1) == 32 is highest bit set. |
| 215 | // order_index is the next three MSB == 010 == 2. |
| 216 | // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01 |
| 217 | // for |
| 218 | // the sub_order_index). |
| 219 | size_t order; |
| 220 | for (order = 0; order <= kBitsPerSizeT; ++order) { |
| 221 | size_t order_index_shift; |
| 222 | if (order < kGenericNumBucketsPerOrderBits + 1) |
| 223 | order_index_shift = 0; |
| 224 | else |
| 225 | order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 226 | order_index_shifts[order] = order_index_shift; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 227 | size_t sub_order_index_mask; |
| 228 | if (order == kBitsPerSizeT) { |
| 229 | // This avoids invoking undefined behavior for an excessive shift. |
| 230 | sub_order_index_mask = |
| 231 | static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1); |
| 232 | } else { |
| 233 | sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >> |
| 234 | (kGenericNumBucketsPerOrderBits + 1); |
| 235 | } |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 236 | order_sub_index_masks[order] = sub_order_index_mask; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | // Set up the actual usable buckets first. |
| 240 | // Note that typical values (i.e. min allocation size of 8) will result in |
| 241 | // pseudo buckets (size==9 etc. or more generally, size is not a multiple |
| 242 | // of the smallest allocation granularity). |
| 243 | // We avoid them in the bucket lookup map, but we tolerate them to keep the |
| 244 | // code simpler and the structures more generic. |
| 245 | size_t i, j; |
| 246 | size_t current_size = kGenericSmallestBucket; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 247 | size_t current_increment = |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 248 | kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 249 | internal::PartitionBucket* bucket = &buckets[0]; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 250 | for (i = 0; i < kGenericNumBucketedOrders; ++i) { |
| 251 | for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 252 | bucket->Init(current_size); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 253 | // Disable psuedo buckets so that touching them faults. |
| 254 | if (current_size % kGenericSmallestBucket) |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 255 | bucket->active_pages_head = nullptr; |
| 256 | current_size += current_increment; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 257 | ++bucket; |
| 258 | } |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 259 | current_increment <<= 1; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 260 | } |
| 261 | DCHECK(current_size == 1 << kGenericMaxBucketedOrder); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 262 | DCHECK(bucket == &buckets[0] + kGenericNumBuckets); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 263 | |
| 264 | // Then set up the fast size -> bucket lookup table. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 265 | bucket = &buckets[0]; |
| 266 | internal::PartitionBucket** bucket_ptr = &bucket_lookups[0]; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 267 | for (order = 0; order <= kBitsPerSizeT; ++order) { |
| 268 | for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
| 269 | if (order < kGenericMinBucketedOrder) { |
| 270 | // Use the bucket of the finest granularity for malloc(0) etc. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 271 | *bucket_ptr++ = &buckets[0]; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 272 | } else if (order > kGenericMaxBucketedOrder) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 273 | *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 274 | } else { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 275 | internal::PartitionBucket* valid_bucket = bucket; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 276 | // Skip over invalid buckets. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 277 | while (valid_bucket->slot_size % kGenericSmallestBucket) |
| 278 | valid_bucket++; |
| 279 | *bucket_ptr++ = valid_bucket; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 280 | bucket++; |
| 281 | } |
| 282 | } |
| 283 | } |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 284 | DCHECK(bucket == &buckets[0] + kGenericNumBuckets); |
| 285 | DCHECK(bucket_ptr == &bucket_lookups[0] + |
| 286 | ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder)); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 287 | // And there's one last bucket lookup that will be hit for e.g. malloc(-1), |
| 288 | // which tries to overflow to a non-existant order. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 289 | *bucket_ptr = internal::PartitionBucket::get_sentinel_bucket(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 290 | } |
| 291 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 292 | bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root, |
| 293 | internal::PartitionPage* page, |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 294 | size_t raw_size) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 295 | DCHECK(page->bucket->is_direct_mapped()); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 296 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 297 | raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 298 | |
| 299 | // Note that the new size might be a bucketed size; this function is called |
| 300 | // whenever we're reallocating a direct mapped allocation. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 301 | size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 302 | if (new_size < kGenericMinDirectMappedDownsize) |
| 303 | return false; |
| 304 | |
| 305 | // bucket->slot_size is the current size of the allocation. |
| 306 | size_t current_size = page->bucket->slot_size; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 307 | char* char_ptr = static_cast<char*>(internal::PartitionPage::ToPointer(page)); |
| 308 | if (new_size == current_size) { |
| 309 | // No need to move any memory around, but update size and cookie below. |
| 310 | } else if (new_size < current_size) { |
| 311 | size_t map_size = |
| 312 | internal::PartitionDirectMapExtent::FromPage(page)->map_size; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 313 | |
| 314 | // Don't reallocate in-place if new size is less than 80 % of the full |
| 315 | // map size, to avoid holding on to too much unused address space. |
| 316 | if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4) |
| 317 | return false; |
| 318 | |
| 319 | // Shrink by decommitting unneeded pages and making them inaccessible. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 320 | size_t decommit_size = current_size - new_size; |
| 321 | root->DecommitSystemPages(char_ptr + new_size, decommit_size); |
| 322 | SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible); |
| 323 | } else if (new_size <= |
| 324 | internal::PartitionDirectMapExtent::FromPage(page)->map_size) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 325 | // Grow within the actually allocated memory. Just need to make the |
| 326 | // pages accessible again. |
| 327 | size_t recommit_size = new_size - current_size; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 328 | SetSystemPagesAccess(char_ptr + current_size, recommit_size, PageReadWrite); |
| 329 | root->RecommitSystemPages(char_ptr + current_size, recommit_size); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 330 | |
| 331 | #if DCHECK_IS_ON() |
| 332 | memset(char_ptr + current_size, kUninitializedByte, recommit_size); |
| 333 | #endif |
| 334 | } else { |
| 335 | // We can't perform the realloc in-place. |
| 336 | // TODO: support this too when possible. |
| 337 | return false; |
| 338 | } |
| 339 | |
| 340 | #if DCHECK_IS_ON() |
| 341 | // Write a new trailing cookie. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 342 | internal::PartitionCookieWriteValue(char_ptr + raw_size - |
| 343 | internal::kCookieSize); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 344 | #endif |
| 345 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 346 | page->set_raw_size(raw_size); |
| 347 | DCHECK(page->get_raw_size() == raw_size); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 348 | |
| 349 | page->bucket->slot_size = new_size; |
| 350 | return true; |
| 351 | } |
| 352 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 353 | void* PartitionReallocGenericFlags(PartitionRootGeneric* root, |
| 354 | int flags, |
| 355 | void* ptr, |
| 356 | size_t new_size, |
| 357 | const char* type_name) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 358 | #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 359 | CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags); |
| 360 | void* result = realloc(ptr, new_size); |
| 361 | CHECK(result || flags & PartitionAllocReturnNull); |
| 362 | return result; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 363 | #else |
| 364 | if (UNLIKELY(!ptr)) |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 365 | return PartitionAllocGenericFlags(root, flags, new_size, type_name); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 366 | if (UNLIKELY(!new_size)) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 367 | root->Free(ptr); |
| 368 | return nullptr; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 369 | } |
| 370 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 371 | if (new_size > kGenericMaxDirectMapped) { |
| 372 | if (flags & PartitionAllocReturnNull) |
| 373 | return nullptr; |
| 374 | internal::PartitionExcessiveAllocationSize(); |
| 375 | } |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 376 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 377 | const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled(); |
| 378 | bool overridden = false; |
| 379 | size_t actual_old_size; |
| 380 | if (UNLIKELY(hooks_enabled)) { |
| 381 | overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled( |
| 382 | &actual_old_size, ptr); |
| 383 | } |
| 384 | if (LIKELY(!overridden)) { |
| 385 | internal::PartitionPage* page = internal::PartitionPage::FromPointer( |
| 386 | internal::PartitionCookieFreePointerAdjust(ptr)); |
| 387 | // TODO(palmer): See if we can afford to make this a CHECK. |
| 388 | DCHECK(root->IsValidPage(page)); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 389 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 390 | if (UNLIKELY(page->bucket->is_direct_mapped())) { |
| 391 | // We may be able to perform the realloc in place by changing the |
| 392 | // accessibility of memory pages and, if reducing the size, decommitting |
| 393 | // them. |
| 394 | if (PartitionReallocDirectMappedInPlace(root, page, new_size)) { |
| 395 | if (UNLIKELY(hooks_enabled)) { |
| 396 | PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size, |
| 397 | type_name); |
| 398 | } |
| 399 | return ptr; |
| 400 | } |
| 401 | } |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 402 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 403 | const size_t actual_new_size = root->ActualSize(new_size); |
| 404 | actual_old_size = PartitionAllocGetSize(ptr); |
| 405 | |
| 406 | // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the |
| 407 | // new size is a significant percentage smaller. We could do the same if we |
| 408 | // determine it is a win. |
| 409 | if (actual_new_size == actual_old_size) { |
| 410 | // Trying to allocate a block of size |new_size| would give us a block of |
| 411 | // the same size as the one we've already got, so re-use the allocation |
| 412 | // after updating statistics (and cookies, if present). |
| 413 | page->set_raw_size(internal::PartitionCookieSizeAdjustAdd(new_size)); |
| 414 | #if DCHECK_IS_ON() |
| 415 | // Write a new trailing cookie when it is possible to keep track of |
| 416 | // |new_size| via the raw size pointer. |
| 417 | if (page->get_raw_size_ptr()) |
| 418 | internal::PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size); |
| 419 | #endif |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 420 | return ptr; |
| 421 | } |
| 422 | } |
| 423 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 424 | // This realloc cannot be resized in-place. Sadness. |
| 425 | void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name); |
| 426 | if (!ret) { |
| 427 | if (flags & PartitionAllocReturnNull) |
| 428 | return nullptr; |
| 429 | internal::PartitionExcessiveAllocationSize(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 430 | } |
| 431 | |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 432 | size_t copy_size = actual_old_size; |
| 433 | if (new_size < copy_size) |
| 434 | copy_size = new_size; |
| 435 | |
| 436 | memcpy(ret, ptr, copy_size); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 437 | root->Free(ptr); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 438 | return ret; |
| 439 | #endif |
| 440 | } |
| 441 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 442 | void* PartitionRootGeneric::Realloc(void* ptr, |
| 443 | size_t new_size, |
| 444 | const char* type_name) { |
| 445 | return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name); |
| 446 | } |
| 447 | |
| 448 | void* PartitionRootGeneric::TryRealloc(void* ptr, |
| 449 | size_t new_size, |
| 450 | const char* type_name) { |
| 451 | return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr, |
| 452 | new_size, type_name); |
| 453 | } |
| 454 | |
| 455 | static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { |
| 456 | const internal::PartitionBucket* bucket = page->bucket; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 457 | size_t slot_size = bucket->slot_size; |
| 458 | if (slot_size < kSystemPageSize || !page->num_allocated_slots) |
| 459 | return 0; |
| 460 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 461 | size_t bucket_num_slots = bucket->get_slots_per_span(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 462 | size_t discardable_bytes = 0; |
| 463 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 464 | size_t raw_size = page->get_raw_size(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 465 | if (raw_size) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 466 | uint32_t used_bytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size)); |
| 467 | discardable_bytes = bucket->slot_size - used_bytes; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 468 | if (discardable_bytes && discard) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 469 | char* ptr = |
| 470 | reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page)); |
| 471 | ptr += used_bytes; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 472 | DiscardSystemPages(ptr, discardable_bytes); |
| 473 | } |
| 474 | return discardable_bytes; |
| 475 | } |
| 476 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 477 | constexpr size_t kMaxSlotCount = |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 478 | (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 479 | DCHECK(bucket_num_slots <= kMaxSlotCount); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 480 | DCHECK(page->num_unprovisioned_slots < bucket_num_slots); |
| 481 | size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 482 | char slot_usage[kMaxSlotCount]; |
| 483 | #if !defined(OS_WIN) |
| 484 | // The last freelist entry should not be discarded when using OS_WIN. |
| 485 | // DiscardVirtualMemory makes the contents of discarded memory undefined. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 486 | size_t last_slot = static_cast<size_t>(-1); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 487 | #endif |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 488 | memset(slot_usage, 1, num_slots); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 489 | char* ptr = reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page)); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 490 | // First, walk the freelist for this page and make a bitmap of which slots |
| 491 | // are not in use. |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 492 | for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry; |
| 493 | /**/) { |
| 494 | size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size; |
| 495 | DCHECK(slot_index < num_slots); |
| 496 | slot_usage[slot_index] = 0; |
| 497 | entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next); |
| 498 | #if !defined(OS_WIN) |
| 499 | // If we have a slot where the masked freelist entry is 0, we can actually |
| 500 | // discard that freelist entry because touching a discarded page is |
| 501 | // guaranteed to return original content or 0. (Note that this optimization |
| 502 | // won't fire on big-endian machines because the masking function is |
| 503 | // negation.) |
| 504 | if (!internal::PartitionFreelistEntry::Encode(entry)) |
| 505 | last_slot = slot_index; |
| 506 | #endif |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 507 | } |
| 508 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 509 | // If the slot(s) at the end of the slot span are not in used, we can truncate |
| 510 | // them entirely and rewrite the freelist. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 511 | size_t truncated_slots = 0; |
| 512 | while (!slot_usage[num_slots - 1]) { |
| 513 | truncated_slots++; |
| 514 | num_slots--; |
| 515 | DCHECK(num_slots); |
| 516 | } |
| 517 | // First, do the work of calculating the discardable bytes. Don't actually |
| 518 | // discard anything unless the discard flag was passed in. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 519 | if (truncated_slots) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 520 | size_t unprovisioned_bytes = 0; |
| 521 | char* begin_ptr = ptr + (num_slots * slot_size); |
| 522 | char* end_ptr = begin_ptr + (slot_size * truncated_slots); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 523 | begin_ptr = reinterpret_cast<char*>( |
| 524 | RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr))); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 525 | // We round the end pointer here up and not down because we're at the end of |
| 526 | // a slot span, so we "own" all the way up the page boundary. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 527 | end_ptr = reinterpret_cast<char*>( |
| 528 | RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr))); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 529 | DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span()); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 530 | if (begin_ptr < end_ptr) { |
| 531 | unprovisioned_bytes = end_ptr - begin_ptr; |
| 532 | discardable_bytes += unprovisioned_bytes; |
| 533 | } |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 534 | if (unprovisioned_bytes && discard) { |
| 535 | DCHECK(truncated_slots > 0); |
| 536 | size_t num_new_entries = 0; |
| 537 | page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots); |
| 538 | |
| 539 | // Rewrite the freelist. |
| 540 | internal::PartitionFreelistEntry* head = nullptr; |
| 541 | internal::PartitionFreelistEntry* back = head; |
| 542 | for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) { |
| 543 | if (slot_usage[slot_index]) |
| 544 | continue; |
| 545 | |
| 546 | auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>( |
| 547 | ptr + (slot_size * slot_index)); |
| 548 | if (!head) { |
| 549 | head = entry; |
| 550 | back = entry; |
| 551 | } else { |
| 552 | back->next = internal::PartitionFreelistEntry::Encode(entry); |
| 553 | back = entry; |
| 554 | } |
| 555 | num_new_entries++; |
| 556 | #if !defined(OS_WIN) |
| 557 | last_slot = slot_index; |
| 558 | #endif |
| 559 | } |
| 560 | |
| 561 | page->freelist_head = head; |
| 562 | if (back) |
| 563 | back->next = internal::PartitionFreelistEntry::Encode(nullptr); |
| 564 | |
| 565 | DCHECK(num_new_entries == num_slots - page->num_allocated_slots); |
| 566 | // Discard the memory. |
| 567 | DiscardSystemPages(begin_ptr, unprovisioned_bytes); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 568 | } |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 569 | } |
| 570 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 571 | // Next, walk the slots and for any not in use, consider where the system page |
| 572 | // boundaries occur. We can release any system pages back to the system as |
| 573 | // long as we don't interfere with a freelist pointer or an adjacent slot. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 574 | for (size_t i = 0; i < num_slots; ++i) { |
| 575 | if (slot_usage[i]) |
| 576 | continue; |
| 577 | // The first address we can safely discard is just after the freelist |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 578 | // pointer. There's one quirk: if the freelist pointer is actually NULL, we |
| 579 | // can discard that pointer value too. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 580 | char* begin_ptr = ptr + (i * slot_size); |
| 581 | char* end_ptr = begin_ptr + slot_size; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 582 | #if !defined(OS_WIN) |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 583 | if (i != last_slot) |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 584 | begin_ptr += sizeof(internal::PartitionFreelistEntry); |
| 585 | #else |
| 586 | begin_ptr += sizeof(internal::PartitionFreelistEntry); |
| 587 | #endif |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 588 | begin_ptr = reinterpret_cast<char*>( |
| 589 | RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr))); |
| 590 | end_ptr = reinterpret_cast<char*>( |
| 591 | RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr))); |
| 592 | if (begin_ptr < end_ptr) { |
| 593 | size_t partial_slot_bytes = end_ptr - begin_ptr; |
| 594 | discardable_bytes += partial_slot_bytes; |
| 595 | if (discard) |
| 596 | DiscardSystemPages(begin_ptr, partial_slot_bytes); |
| 597 | } |
| 598 | } |
| 599 | return discardable_bytes; |
| 600 | } |
| 601 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 602 | static void PartitionPurgeBucket(internal::PartitionBucket* bucket) { |
| 603 | if (bucket->active_pages_head != |
| 604 | internal::PartitionPage::get_sentinel_page()) { |
| 605 | for (internal::PartitionPage* page = bucket->active_pages_head; page; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 606 | page = page->next_page) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 607 | DCHECK(page != internal::PartitionPage::get_sentinel_page()); |
| 608 | PartitionPurgePage(page, true); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 609 | } |
| 610 | } |
| 611 | } |
| 612 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 613 | void PartitionRoot::PurgeMemory(int flags) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 614 | if (flags & PartitionPurgeDecommitEmptyPages) |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 615 | DecommitEmptyPages(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 616 | // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 617 | // here because that flag is only useful for allocations >= system page size. |
| 618 | // We only have allocations that large inside generic partitions at the |
| 619 | // moment. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 620 | } |
| 621 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 622 | void PartitionRootGeneric::PurgeMemory(int flags) { |
| 623 | subtle::SpinLock::Guard guard(lock); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 624 | if (flags & PartitionPurgeDecommitEmptyPages) |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 625 | DecommitEmptyPages(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 626 | if (flags & PartitionPurgeDiscardUnusedSystemPages) { |
| 627 | for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 628 | internal::PartitionBucket* bucket = &buckets[i]; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 629 | if (bucket->slot_size >= kSystemPageSize) |
| 630 | PartitionPurgeBucket(bucket); |
| 631 | } |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out, |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 636 | internal::PartitionPage* page) { |
| 637 | uint16_t bucket_num_slots = page->bucket->get_slots_per_span(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 638 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 639 | if (page->is_decommitted()) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 640 | ++stats_out->num_decommitted_pages; |
| 641 | return; |
| 642 | } |
| 643 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 644 | stats_out->discardable_bytes += PartitionPurgePage(page, false); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 645 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 646 | size_t raw_size = page->get_raw_size(); |
| 647 | if (raw_size) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 648 | stats_out->active_bytes += static_cast<uint32_t>(raw_size); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 649 | } else { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 650 | stats_out->active_bytes += |
| 651 | (page->num_allocated_slots * stats_out->bucket_slot_size); |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 652 | } |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 653 | |
| 654 | size_t page_bytes_resident = |
| 655 | RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) * |
| 656 | stats_out->bucket_slot_size); |
| 657 | stats_out->resident_bytes += page_bytes_resident; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 658 | if (page->is_empty()) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 659 | stats_out->decommittable_bytes += page_bytes_resident; |
| 660 | ++stats_out->num_empty_pages; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 661 | } else if (page->is_full()) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 662 | ++stats_out->num_full_pages; |
| 663 | } else { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 664 | DCHECK(page->is_active()); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 665 | ++stats_out->num_active_pages; |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 670 | const internal::PartitionBucket* bucket) { |
| 671 | DCHECK(!bucket->is_direct_mapped()); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 672 | stats_out->is_valid = false; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 673 | // If the active page list is empty (== |
| 674 | // internal::PartitionPage::get_sentinel_page()), the bucket might still need |
| 675 | // to be reported if it has a list of empty, decommitted or full pages. |
| 676 | if (bucket->active_pages_head == |
| 677 | internal::PartitionPage::get_sentinel_page() && |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 678 | !bucket->empty_pages_head && !bucket->decommitted_pages_head && |
| 679 | !bucket->num_full_pages) |
| 680 | return; |
| 681 | |
| 682 | memset(stats_out, '\0', sizeof(*stats_out)); |
| 683 | stats_out->is_valid = true; |
| 684 | stats_out->is_direct_map = false; |
| 685 | stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages); |
| 686 | stats_out->bucket_slot_size = bucket->slot_size; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 687 | uint16_t bucket_num_slots = bucket->get_slots_per_span(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 688 | size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 689 | stats_out->allocated_page_size = bucket->get_bytes_per_span(); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 690 | stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage; |
| 691 | stats_out->resident_bytes = |
| 692 | bucket->num_full_pages * stats_out->allocated_page_size; |
| 693 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 694 | for (internal::PartitionPage* page = bucket->empty_pages_head; page; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 695 | page = page->next_page) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 696 | DCHECK(page->is_empty() || page->is_decommitted()); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 697 | PartitionDumpPageStats(stats_out, page); |
| 698 | } |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 699 | for (internal::PartitionPage* page = bucket->decommitted_pages_head; page; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 700 | page = page->next_page) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 701 | DCHECK(page->is_decommitted()); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 702 | PartitionDumpPageStats(stats_out, page); |
| 703 | } |
| 704 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 705 | if (bucket->active_pages_head != |
| 706 | internal::PartitionPage::get_sentinel_page()) { |
| 707 | for (internal::PartitionPage* page = bucket->active_pages_head; page; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 708 | page = page->next_page) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 709 | DCHECK(page != internal::PartitionPage::get_sentinel_page()); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 710 | PartitionDumpPageStats(stats_out, page); |
| 711 | } |
| 712 | } |
| 713 | } |
| 714 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 715 | void PartitionRootGeneric::DumpStats(const char* partition_name, |
| 716 | bool is_light_dump, |
| 717 | PartitionStatsDumper* dumper) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 718 | PartitionMemoryStats stats = {0}; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 719 | stats.total_mmapped_bytes = |
| 720 | total_size_of_super_pages + total_size_of_direct_mapped_pages; |
| 721 | stats.total_committed_bytes = total_size_of_committed_pages; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 722 | |
| 723 | size_t direct_mapped_allocations_total_size = 0; |
| 724 | |
| 725 | static const size_t kMaxReportableDirectMaps = 4096; |
| 726 | |
| 727 | // Allocate on the heap rather than on the stack to avoid stack overflow |
| 728 | // skirmishes (on Windows, in particular). |
| 729 | std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr; |
| 730 | if (!is_light_dump) { |
| 731 | direct_map_lengths = |
| 732 | std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]); |
| 733 | } |
| 734 | |
| 735 | PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; |
| 736 | size_t num_direct_mapped_allocations = 0; |
| 737 | { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 738 | subtle::SpinLock::Guard guard(lock); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 739 | |
| 740 | for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 741 | const internal::PartitionBucket* bucket = &buckets[i]; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 742 | // Don't report the pseudo buckets that the generic allocator sets up in |
| 743 | // order to preserve a fast size->bucket map (see |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 744 | // PartitionRootGeneric::Init() for details). |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 745 | if (!bucket->active_pages_head) |
| 746 | bucket_stats[i].is_valid = false; |
| 747 | else |
| 748 | PartitionDumpBucketStats(&bucket_stats[i], bucket); |
| 749 | if (bucket_stats[i].is_valid) { |
| 750 | stats.total_resident_bytes += bucket_stats[i].resident_bytes; |
| 751 | stats.total_active_bytes += bucket_stats[i].active_bytes; |
| 752 | stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes; |
| 753 | stats.total_discardable_bytes += bucket_stats[i].discardable_bytes; |
| 754 | } |
| 755 | } |
| 756 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 757 | for (internal::PartitionDirectMapExtent* extent = direct_map_list; |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 758 | extent && num_direct_mapped_allocations < kMaxReportableDirectMaps; |
| 759 | extent = extent->next_extent, ++num_direct_mapped_allocations) { |
| 760 | DCHECK(!extent->next_extent || |
| 761 | extent->next_extent->prev_extent == extent); |
| 762 | size_t slot_size = extent->bucket->slot_size; |
| 763 | direct_mapped_allocations_total_size += slot_size; |
| 764 | if (is_light_dump) |
| 765 | continue; |
| 766 | direct_map_lengths[num_direct_mapped_allocations] = slot_size; |
| 767 | } |
| 768 | } |
| 769 | |
| 770 | if (!is_light_dump) { |
| 771 | // Call |PartitionsDumpBucketStats| after collecting stats because it can |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 772 | // try to allocate using |PartitionRootGeneric::Alloc()| and it can't |
| 773 | // obtain the lock. |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 774 | for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 775 | if (bucket_stats[i].is_valid) |
| 776 | dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]); |
| 777 | } |
| 778 | |
| 779 | for (size_t i = 0; i < num_direct_mapped_allocations; ++i) { |
| 780 | uint32_t size = direct_map_lengths[i]; |
| 781 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 782 | PartitionBucketMemoryStats mapped_stats = {}; |
| 783 | mapped_stats.is_valid = true; |
| 784 | mapped_stats.is_direct_map = true; |
| 785 | mapped_stats.num_full_pages = 1; |
| 786 | mapped_stats.allocated_page_size = size; |
| 787 | mapped_stats.bucket_slot_size = size; |
| 788 | mapped_stats.active_bytes = size; |
| 789 | mapped_stats.resident_bytes = size; |
| 790 | dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats); |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 791 | } |
| 792 | } |
| 793 | |
| 794 | stats.total_resident_bytes += direct_mapped_allocations_total_size; |
| 795 | stats.total_active_bytes += direct_mapped_allocations_total_size; |
| 796 | dumper->PartitionDumpTotals(partition_name, &stats); |
| 797 | } |
| 798 | |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 799 | void PartitionRoot::DumpStats(const char* partition_name, |
| 800 | bool is_light_dump, |
| 801 | PartitionStatsDumper* dumper) { |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 802 | PartitionMemoryStats stats = {0}; |
Haibo Huang | 49cc930 | 2020-04-27 16:14:24 -0700 | [diff] [blame] | 803 | stats.total_mmapped_bytes = total_size_of_super_pages; |
| 804 | stats.total_committed_bytes = total_size_of_committed_pages; |
| 805 | DCHECK(!total_size_of_direct_mapped_pages); |
| 806 | |
| 807 | static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*); |
| 808 | std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats; |
| 809 | if (!is_light_dump) { |
| 810 | memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>( |
| 811 | new PartitionBucketMemoryStats[kMaxReportableBuckets]); |
| 812 | } |
| 813 | |
| 814 | const size_t partition_num_buckets = num_buckets; |
| 815 | DCHECK(partition_num_buckets <= kMaxReportableBuckets); |
| 816 | |
| 817 | for (size_t i = 0; i < partition_num_buckets; ++i) { |
| 818 | PartitionBucketMemoryStats bucket_stats = {0}; |
| 819 | PartitionDumpBucketStats(&bucket_stats, &buckets()[i]); |
| 820 | if (bucket_stats.is_valid) { |
| 821 | stats.total_resident_bytes += bucket_stats.resident_bytes; |
| 822 | stats.total_active_bytes += bucket_stats.active_bytes; |
| 823 | stats.total_decommittable_bytes += bucket_stats.decommittable_bytes; |
| 824 | stats.total_discardable_bytes += bucket_stats.discardable_bytes; |
| 825 | } |
| 826 | if (!is_light_dump) { |
| 827 | if (bucket_stats.is_valid) |
| 828 | memory_stats[i] = bucket_stats; |
| 829 | else |
| 830 | memory_stats[i].is_valid = false; |
| 831 | } |
| 832 | } |
| 833 | if (!is_light_dump) { |
| 834 | // PartitionsDumpBucketStats is called after collecting stats because it |
| 835 | // can use PartitionRoot::Alloc() to allocate and this can affect the |
| 836 | // statistics. |
| 837 | for (size_t i = 0; i < partition_num_buckets; ++i) { |
| 838 | if (memory_stats[i].is_valid) |
Philip P. Moltmann | d904c1e | 2018-03-19 09:26:45 -0700 | [diff] [blame] | 839 | dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]); |
| 840 | } |
| 841 | } |
| 842 | dumper->PartitionDumpTotals(partition_name, &stats); |
| 843 | } |
| 844 | |
| 845 | } // namespace base |
| 846 | } // namespace pdfium |