blob: 19ce17b786b2c230bb095b6e923db4d35bc3dbff [file] [log] [blame]
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -07001// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "third_party/base/allocator/partition_allocator/partition_alloc.h"
6
7#include <string.h>
8
Haibo Huang49cc9302020-04-27 16:14:24 -07009#include <memory>
10#include <type_traits>
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -070011
Haibo Huang49cc9302020-04-27 16:14:24 -070012#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
13#include "third_party/base/allocator/partition_allocator/partition_oom.h"
14#include "third_party/base/allocator/partition_allocator/partition_page.h"
15#include "third_party/base/allocator/partition_allocator/spin_lock.h"
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -070016
17namespace pdfium {
18namespace base {
19
Haibo Huang49cc9302020-04-27 16:14:24 -070020// Two partition pages are used as guard / metadata page so make sure the super
21// page size is bigger.
22static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size");
23static_assert(!(kSuperPageSize % kPartitionPageSize), "ok super page multiple");
24// Four system pages gives us room to hack out a still-guard-paged piece
25// of metadata in the middle of a guard partition page.
26static_assert(kSystemPageSize * 4 <= kPartitionPageSize,
27 "ok partition page size");
28static_assert(!(kPartitionPageSize % kSystemPageSize),
29 "ok partition page multiple");
30static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize,
31 "PartitionPage should not be too big");
32static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize,
33 "PartitionBucket should not be too big");
34static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <=
35 kPageMetadataSize,
36 "PartitionSuperPageExtentEntry should not be too big");
37static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
38 kSystemPageSize,
39 "page metadata fits in hole");
40// Limit to prevent callers accidentally overflowing an int size.
41static_assert(kGenericMaxDirectMapped <=
42 (1UL << 31) + kPageAllocationGranularity,
43 "maximum direct mapped allocation");
44// Check that some of our zanier calculations worked out as expected.
45static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
46static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
47static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
48 "System pages per slot span must be less than 128.");
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -070049
Haibo Huang49cc9302020-04-27 16:14:24 -070050internal::PartitionRootBase::PartitionRootBase() = default;
51internal::PartitionRootBase::~PartitionRootBase() = default;
52PartitionRoot::PartitionRoot() = default;
53PartitionRoot::~PartitionRoot() = default;
54PartitionRootGeneric::PartitionRootGeneric() = default;
55PartitionRootGeneric::~PartitionRootGeneric() = default;
56PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
57PartitionAllocatorGeneric::~PartitionAllocatorGeneric() = default;
58
59subtle::SpinLock* GetLock() {
60 static subtle::SpinLock* s_initialized_lock = nullptr;
61 if (!s_initialized_lock)
62 s_initialized_lock = new subtle::SpinLock();
63 return s_initialized_lock;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -070064}
65
Haibo Huang49cc9302020-04-27 16:14:24 -070066static bool g_initialized = false;
67
68void (*internal::PartitionRootBase::gOomHandlingFunction)() = nullptr;
69std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
70subtle::SpinLock PartitionAllocHooks::set_hooks_lock_;
71std::atomic<PartitionAllocHooks::AllocationObserverHook*>
72 PartitionAllocHooks::allocation_observer_hook_(nullptr);
73std::atomic<PartitionAllocHooks::FreeObserverHook*>
74 PartitionAllocHooks::free_observer_hook_(nullptr);
75std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
76 PartitionAllocHooks::allocation_override_hook_(nullptr);
77std::atomic<PartitionAllocHooks::FreeOverrideHook*>
78 PartitionAllocHooks::free_override_hook_(nullptr);
79std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
80 PartitionAllocHooks::realloc_override_hook_(nullptr);
81
82void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
83 FreeObserverHook* free_hook) {
84 subtle::SpinLock::Guard guard(set_hooks_lock_);
85
86 // Chained hooks are not supported. Registering a non-null hook when a
87 // non-null hook is already registered indicates somebody is trying to
88 // overwrite a hook.
89 CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
90 (!alloc_hook && !free_hook));
91 allocation_observer_hook_ = alloc_hook;
92 free_observer_hook_ = free_hook;
93
94 hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
95}
96
97void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
98 FreeOverrideHook* free_hook,
99 ReallocOverrideHook realloc_hook) {
100 subtle::SpinLock::Guard guard(set_hooks_lock_);
101
102 CHECK((!allocation_override_hook_ && !free_override_hook_ &&
103 !realloc_override_hook_) ||
104 (!alloc_hook && !free_hook && !realloc_hook));
105 allocation_override_hook_ = alloc_hook;
106 free_override_hook_ = free_hook;
107 realloc_override_hook_ = realloc_hook;
108
109 hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
110}
111
112void PartitionAllocHooks::AllocationObserverHookIfEnabled(
113 void* address,
114 size_t size,
115 const char* type_name) {
116 if (AllocationObserverHook* hook =
117 allocation_observer_hook_.load(std::memory_order_relaxed)) {
118 hook(address, size, type_name);
119 }
120}
121
122bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
123 void** out,
124 int flags,
125 size_t size,
126 const char* type_name) {
127 if (AllocationOverrideHook* hook =
128 allocation_override_hook_.load(std::memory_order_relaxed)) {
129 return hook(out, flags, size, type_name);
130 }
131 return false;
132}
133
134void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
135 if (FreeObserverHook* hook =
136 free_observer_hook_.load(std::memory_order_relaxed)) {
137 hook(address);
138 }
139}
140
141bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
142 if (FreeOverrideHook* hook =
143 free_override_hook_.load(std::memory_order_relaxed)) {
144 return hook(address);
145 }
146 return false;
147}
148
149void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
150 void* new_address,
151 size_t size,
152 const char* type_name) {
153 // Report a reallocation as a free followed by an allocation.
154 AllocationObserverHook* allocation_hook =
155 allocation_observer_hook_.load(std::memory_order_relaxed);
156 FreeObserverHook* free_hook =
157 free_observer_hook_.load(std::memory_order_relaxed);
158 if (allocation_hook && free_hook) {
159 free_hook(old_address);
160 allocation_hook(new_address, size, type_name);
161 }
162}
163bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
164 void* address) {
165 if (ReallocOverrideHook* hook =
166 realloc_override_hook_.load(std::memory_order_relaxed)) {
167 return hook(out, address);
168 }
169 return false;
170}
171
172static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700173 DCHECK(!root->initialized);
174 {
Haibo Huang49cc9302020-04-27 16:14:24 -0700175 subtle::SpinLock::Guard guard(*GetLock());
176 if (!g_initialized) {
177 g_initialized = true;
178 // We mark the sentinel bucket/page as free to make sure it is skipped by
179 // our logic to find a new active page.
180 internal::PartitionBucket::get_sentinel_bucket()->active_pages_head =
181 internal::PartitionPage::get_sentinel_page();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700182 }
183 }
184
185 root->initialized = true;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700186
187 // This is a "magic" value so we can test if a root pointer is valid.
188 root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
189}
190
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700191void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
192 DCHECK(oom_handling_function);
Haibo Huang49cc9302020-04-27 16:14:24 -0700193 internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700194}
195
Haibo Huang49cc9302020-04-27 16:14:24 -0700196void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
197 PartitionAllocBaseInit(this);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700198
Haibo Huang49cc9302020-04-27 16:14:24 -0700199 num_buckets = bucket_count;
200 max_allocation = maximum_allocation;
201 for (size_t i = 0; i < num_buckets; ++i) {
202 internal::PartitionBucket& bucket = buckets()[i];
203 bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700204 }
205}
206
Haibo Huang49cc9302020-04-27 16:14:24 -0700207void PartitionRootGeneric::Init() {
208 subtle::SpinLock::Guard guard(lock);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700209
Haibo Huang49cc9302020-04-27 16:14:24 -0700210 PartitionAllocBaseInit(this);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700211
212 // Precalculate some shift and mask constants used in the hot path.
213 // Example: malloc(41) == 101001 binary.
214 // Order is 6 (1 << 6-1) == 32 is highest bit set.
215 // order_index is the next three MSB == 010 == 2.
216 // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
217 // for
218 // the sub_order_index).
219 size_t order;
220 for (order = 0; order <= kBitsPerSizeT; ++order) {
221 size_t order_index_shift;
222 if (order < kGenericNumBucketsPerOrderBits + 1)
223 order_index_shift = 0;
224 else
225 order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
Haibo Huang49cc9302020-04-27 16:14:24 -0700226 order_index_shifts[order] = order_index_shift;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700227 size_t sub_order_index_mask;
228 if (order == kBitsPerSizeT) {
229 // This avoids invoking undefined behavior for an excessive shift.
230 sub_order_index_mask =
231 static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
232 } else {
233 sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
234 (kGenericNumBucketsPerOrderBits + 1);
235 }
Haibo Huang49cc9302020-04-27 16:14:24 -0700236 order_sub_index_masks[order] = sub_order_index_mask;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700237 }
238
239 // Set up the actual usable buckets first.
240 // Note that typical values (i.e. min allocation size of 8) will result in
241 // pseudo buckets (size==9 etc. or more generally, size is not a multiple
242 // of the smallest allocation granularity).
243 // We avoid them in the bucket lookup map, but we tolerate them to keep the
244 // code simpler and the structures more generic.
245 size_t i, j;
246 size_t current_size = kGenericSmallestBucket;
Haibo Huang49cc9302020-04-27 16:14:24 -0700247 size_t current_increment =
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700248 kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
Haibo Huang49cc9302020-04-27 16:14:24 -0700249 internal::PartitionBucket* bucket = &buckets[0];
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700250 for (i = 0; i < kGenericNumBucketedOrders; ++i) {
251 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700252 bucket->Init(current_size);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700253 // Disable psuedo buckets so that touching them faults.
254 if (current_size % kGenericSmallestBucket)
Haibo Huang49cc9302020-04-27 16:14:24 -0700255 bucket->active_pages_head = nullptr;
256 current_size += current_increment;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700257 ++bucket;
258 }
Haibo Huang49cc9302020-04-27 16:14:24 -0700259 current_increment <<= 1;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700260 }
261 DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
Haibo Huang49cc9302020-04-27 16:14:24 -0700262 DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700263
264 // Then set up the fast size -> bucket lookup table.
Haibo Huang49cc9302020-04-27 16:14:24 -0700265 bucket = &buckets[0];
266 internal::PartitionBucket** bucket_ptr = &bucket_lookups[0];
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700267 for (order = 0; order <= kBitsPerSizeT; ++order) {
268 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
269 if (order < kGenericMinBucketedOrder) {
270 // Use the bucket of the finest granularity for malloc(0) etc.
Haibo Huang49cc9302020-04-27 16:14:24 -0700271 *bucket_ptr++ = &buckets[0];
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700272 } else if (order > kGenericMaxBucketedOrder) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700273 *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700274 } else {
Haibo Huang49cc9302020-04-27 16:14:24 -0700275 internal::PartitionBucket* valid_bucket = bucket;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700276 // Skip over invalid buckets.
Haibo Huang49cc9302020-04-27 16:14:24 -0700277 while (valid_bucket->slot_size % kGenericSmallestBucket)
278 valid_bucket++;
279 *bucket_ptr++ = valid_bucket;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700280 bucket++;
281 }
282 }
283 }
Haibo Huang49cc9302020-04-27 16:14:24 -0700284 DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
285 DCHECK(bucket_ptr == &bucket_lookups[0] +
286 ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700287 // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
288 // which tries to overflow to a non-existant order.
Haibo Huang49cc9302020-04-27 16:14:24 -0700289 *bucket_ptr = internal::PartitionBucket::get_sentinel_bucket();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700290}
291
Haibo Huang49cc9302020-04-27 16:14:24 -0700292bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
293 internal::PartitionPage* page,
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700294 size_t raw_size) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700295 DCHECK(page->bucket->is_direct_mapped());
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700296
Haibo Huang49cc9302020-04-27 16:14:24 -0700297 raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700298
299 // Note that the new size might be a bucketed size; this function is called
300 // whenever we're reallocating a direct mapped allocation.
Haibo Huang49cc9302020-04-27 16:14:24 -0700301 size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700302 if (new_size < kGenericMinDirectMappedDownsize)
303 return false;
304
305 // bucket->slot_size is the current size of the allocation.
306 size_t current_size = page->bucket->slot_size;
Haibo Huang49cc9302020-04-27 16:14:24 -0700307 char* char_ptr = static_cast<char*>(internal::PartitionPage::ToPointer(page));
308 if (new_size == current_size) {
309 // No need to move any memory around, but update size and cookie below.
310 } else if (new_size < current_size) {
311 size_t map_size =
312 internal::PartitionDirectMapExtent::FromPage(page)->map_size;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700313
314 // Don't reallocate in-place if new size is less than 80 % of the full
315 // map size, to avoid holding on to too much unused address space.
316 if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4)
317 return false;
318
319 // Shrink by decommitting unneeded pages and making them inaccessible.
Haibo Huang49cc9302020-04-27 16:14:24 -0700320 size_t decommit_size = current_size - new_size;
321 root->DecommitSystemPages(char_ptr + new_size, decommit_size);
322 SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible);
323 } else if (new_size <=
324 internal::PartitionDirectMapExtent::FromPage(page)->map_size) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700325 // Grow within the actually allocated memory. Just need to make the
326 // pages accessible again.
327 size_t recommit_size = new_size - current_size;
Haibo Huang49cc9302020-04-27 16:14:24 -0700328 SetSystemPagesAccess(char_ptr + current_size, recommit_size, PageReadWrite);
329 root->RecommitSystemPages(char_ptr + current_size, recommit_size);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700330
331#if DCHECK_IS_ON()
332 memset(char_ptr + current_size, kUninitializedByte, recommit_size);
333#endif
334 } else {
335 // We can't perform the realloc in-place.
336 // TODO: support this too when possible.
337 return false;
338 }
339
340#if DCHECK_IS_ON()
341 // Write a new trailing cookie.
Haibo Huang49cc9302020-04-27 16:14:24 -0700342 internal::PartitionCookieWriteValue(char_ptr + raw_size -
343 internal::kCookieSize);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700344#endif
345
Haibo Huang49cc9302020-04-27 16:14:24 -0700346 page->set_raw_size(raw_size);
347 DCHECK(page->get_raw_size() == raw_size);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700348
349 page->bucket->slot_size = new_size;
350 return true;
351}
352
Haibo Huang49cc9302020-04-27 16:14:24 -0700353void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
354 int flags,
355 void* ptr,
356 size_t new_size,
357 const char* type_name) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700358#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
Haibo Huang49cc9302020-04-27 16:14:24 -0700359 CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
360 void* result = realloc(ptr, new_size);
361 CHECK(result || flags & PartitionAllocReturnNull);
362 return result;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700363#else
364 if (UNLIKELY(!ptr))
Haibo Huang49cc9302020-04-27 16:14:24 -0700365 return PartitionAllocGenericFlags(root, flags, new_size, type_name);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700366 if (UNLIKELY(!new_size)) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700367 root->Free(ptr);
368 return nullptr;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700369 }
370
Haibo Huang49cc9302020-04-27 16:14:24 -0700371 if (new_size > kGenericMaxDirectMapped) {
372 if (flags & PartitionAllocReturnNull)
373 return nullptr;
374 internal::PartitionExcessiveAllocationSize();
375 }
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700376
Haibo Huang49cc9302020-04-27 16:14:24 -0700377 const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
378 bool overridden = false;
379 size_t actual_old_size;
380 if (UNLIKELY(hooks_enabled)) {
381 overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
382 &actual_old_size, ptr);
383 }
384 if (LIKELY(!overridden)) {
385 internal::PartitionPage* page = internal::PartitionPage::FromPointer(
386 internal::PartitionCookieFreePointerAdjust(ptr));
387 // TODO(palmer): See if we can afford to make this a CHECK.
388 DCHECK(root->IsValidPage(page));
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700389
Haibo Huang49cc9302020-04-27 16:14:24 -0700390 if (UNLIKELY(page->bucket->is_direct_mapped())) {
391 // We may be able to perform the realloc in place by changing the
392 // accessibility of memory pages and, if reducing the size, decommitting
393 // them.
394 if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
395 if (UNLIKELY(hooks_enabled)) {
396 PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size,
397 type_name);
398 }
399 return ptr;
400 }
401 }
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700402
Haibo Huang49cc9302020-04-27 16:14:24 -0700403 const size_t actual_new_size = root->ActualSize(new_size);
404 actual_old_size = PartitionAllocGetSize(ptr);
405
406 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
407 // new size is a significant percentage smaller. We could do the same if we
408 // determine it is a win.
409 if (actual_new_size == actual_old_size) {
410 // Trying to allocate a block of size |new_size| would give us a block of
411 // the same size as the one we've already got, so re-use the allocation
412 // after updating statistics (and cookies, if present).
413 page->set_raw_size(internal::PartitionCookieSizeAdjustAdd(new_size));
414#if DCHECK_IS_ON()
415 // Write a new trailing cookie when it is possible to keep track of
416 // |new_size| via the raw size pointer.
417 if (page->get_raw_size_ptr())
418 internal::PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
419#endif
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700420 return ptr;
421 }
422 }
423
Haibo Huang49cc9302020-04-27 16:14:24 -0700424 // This realloc cannot be resized in-place. Sadness.
425 void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name);
426 if (!ret) {
427 if (flags & PartitionAllocReturnNull)
428 return nullptr;
429 internal::PartitionExcessiveAllocationSize();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700430 }
431
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700432 size_t copy_size = actual_old_size;
433 if (new_size < copy_size)
434 copy_size = new_size;
435
436 memcpy(ret, ptr, copy_size);
Haibo Huang49cc9302020-04-27 16:14:24 -0700437 root->Free(ptr);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700438 return ret;
439#endif
440}
441
Haibo Huang49cc9302020-04-27 16:14:24 -0700442void* PartitionRootGeneric::Realloc(void* ptr,
443 size_t new_size,
444 const char* type_name) {
445 return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name);
446}
447
448void* PartitionRootGeneric::TryRealloc(void* ptr,
449 size_t new_size,
450 const char* type_name) {
451 return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr,
452 new_size, type_name);
453}
454
455static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
456 const internal::PartitionBucket* bucket = page->bucket;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700457 size_t slot_size = bucket->slot_size;
458 if (slot_size < kSystemPageSize || !page->num_allocated_slots)
459 return 0;
460
Haibo Huang49cc9302020-04-27 16:14:24 -0700461 size_t bucket_num_slots = bucket->get_slots_per_span();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700462 size_t discardable_bytes = 0;
463
Haibo Huang49cc9302020-04-27 16:14:24 -0700464 size_t raw_size = page->get_raw_size();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700465 if (raw_size) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700466 uint32_t used_bytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
467 discardable_bytes = bucket->slot_size - used_bytes;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700468 if (discardable_bytes && discard) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700469 char* ptr =
470 reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
471 ptr += used_bytes;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700472 DiscardSystemPages(ptr, discardable_bytes);
473 }
474 return discardable_bytes;
475 }
476
Haibo Huang49cc9302020-04-27 16:14:24 -0700477 constexpr size_t kMaxSlotCount =
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700478 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
Haibo Huang49cc9302020-04-27 16:14:24 -0700479 DCHECK(bucket_num_slots <= kMaxSlotCount);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700480 DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
481 size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
Haibo Huang49cc9302020-04-27 16:14:24 -0700482 char slot_usage[kMaxSlotCount];
483#if !defined(OS_WIN)
484 // The last freelist entry should not be discarded when using OS_WIN.
485 // DiscardVirtualMemory makes the contents of discarded memory undefined.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700486 size_t last_slot = static_cast<size_t>(-1);
Haibo Huang49cc9302020-04-27 16:14:24 -0700487#endif
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700488 memset(slot_usage, 1, num_slots);
Haibo Huang49cc9302020-04-27 16:14:24 -0700489 char* ptr = reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700490 // First, walk the freelist for this page and make a bitmap of which slots
491 // are not in use.
Haibo Huang49cc9302020-04-27 16:14:24 -0700492 for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
493 /**/) {
494 size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
495 DCHECK(slot_index < num_slots);
496 slot_usage[slot_index] = 0;
497 entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
498#if !defined(OS_WIN)
499 // If we have a slot where the masked freelist entry is 0, we can actually
500 // discard that freelist entry because touching a discarded page is
501 // guaranteed to return original content or 0. (Note that this optimization
502 // won't fire on big-endian machines because the masking function is
503 // negation.)
504 if (!internal::PartitionFreelistEntry::Encode(entry))
505 last_slot = slot_index;
506#endif
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700507 }
508
Haibo Huang49cc9302020-04-27 16:14:24 -0700509 // If the slot(s) at the end of the slot span are not in used, we can truncate
510 // them entirely and rewrite the freelist.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700511 size_t truncated_slots = 0;
512 while (!slot_usage[num_slots - 1]) {
513 truncated_slots++;
514 num_slots--;
515 DCHECK(num_slots);
516 }
517 // First, do the work of calculating the discardable bytes. Don't actually
518 // discard anything unless the discard flag was passed in.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700519 if (truncated_slots) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700520 size_t unprovisioned_bytes = 0;
521 char* begin_ptr = ptr + (num_slots * slot_size);
522 char* end_ptr = begin_ptr + (slot_size * truncated_slots);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700523 begin_ptr = reinterpret_cast<char*>(
524 RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
Haibo Huang49cc9302020-04-27 16:14:24 -0700525 // We round the end pointer here up and not down because we're at the end of
526 // a slot span, so we "own" all the way up the page boundary.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700527 end_ptr = reinterpret_cast<char*>(
528 RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
Haibo Huang49cc9302020-04-27 16:14:24 -0700529 DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700530 if (begin_ptr < end_ptr) {
531 unprovisioned_bytes = end_ptr - begin_ptr;
532 discardable_bytes += unprovisioned_bytes;
533 }
Haibo Huang49cc9302020-04-27 16:14:24 -0700534 if (unprovisioned_bytes && discard) {
535 DCHECK(truncated_slots > 0);
536 size_t num_new_entries = 0;
537 page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
538
539 // Rewrite the freelist.
540 internal::PartitionFreelistEntry* head = nullptr;
541 internal::PartitionFreelistEntry* back = head;
542 for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
543 if (slot_usage[slot_index])
544 continue;
545
546 auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
547 ptr + (slot_size * slot_index));
548 if (!head) {
549 head = entry;
550 back = entry;
551 } else {
552 back->next = internal::PartitionFreelistEntry::Encode(entry);
553 back = entry;
554 }
555 num_new_entries++;
556#if !defined(OS_WIN)
557 last_slot = slot_index;
558#endif
559 }
560
561 page->freelist_head = head;
562 if (back)
563 back->next = internal::PartitionFreelistEntry::Encode(nullptr);
564
565 DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
566 // Discard the memory.
567 DiscardSystemPages(begin_ptr, unprovisioned_bytes);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700568 }
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700569 }
570
Haibo Huang49cc9302020-04-27 16:14:24 -0700571 // Next, walk the slots and for any not in use, consider where the system page
572 // boundaries occur. We can release any system pages back to the system as
573 // long as we don't interfere with a freelist pointer or an adjacent slot.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700574 for (size_t i = 0; i < num_slots; ++i) {
575 if (slot_usage[i])
576 continue;
577 // The first address we can safely discard is just after the freelist
Haibo Huang49cc9302020-04-27 16:14:24 -0700578 // pointer. There's one quirk: if the freelist pointer is actually NULL, we
579 // can discard that pointer value too.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700580 char* begin_ptr = ptr + (i * slot_size);
581 char* end_ptr = begin_ptr + slot_size;
Haibo Huang49cc9302020-04-27 16:14:24 -0700582#if !defined(OS_WIN)
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700583 if (i != last_slot)
Haibo Huang49cc9302020-04-27 16:14:24 -0700584 begin_ptr += sizeof(internal::PartitionFreelistEntry);
585#else
586 begin_ptr += sizeof(internal::PartitionFreelistEntry);
587#endif
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700588 begin_ptr = reinterpret_cast<char*>(
589 RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
590 end_ptr = reinterpret_cast<char*>(
591 RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr)));
592 if (begin_ptr < end_ptr) {
593 size_t partial_slot_bytes = end_ptr - begin_ptr;
594 discardable_bytes += partial_slot_bytes;
595 if (discard)
596 DiscardSystemPages(begin_ptr, partial_slot_bytes);
597 }
598 }
599 return discardable_bytes;
600}
601
Haibo Huang49cc9302020-04-27 16:14:24 -0700602static void PartitionPurgeBucket(internal::PartitionBucket* bucket) {
603 if (bucket->active_pages_head !=
604 internal::PartitionPage::get_sentinel_page()) {
605 for (internal::PartitionPage* page = bucket->active_pages_head; page;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700606 page = page->next_page) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700607 DCHECK(page != internal::PartitionPage::get_sentinel_page());
608 PartitionPurgePage(page, true);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700609 }
610 }
611}
612
Haibo Huang49cc9302020-04-27 16:14:24 -0700613void PartitionRoot::PurgeMemory(int flags) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700614 if (flags & PartitionPurgeDecommitEmptyPages)
Haibo Huang49cc9302020-04-27 16:14:24 -0700615 DecommitEmptyPages();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700616 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
Haibo Huang49cc9302020-04-27 16:14:24 -0700617 // here because that flag is only useful for allocations >= system page size.
618 // We only have allocations that large inside generic partitions at the
619 // moment.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700620}
621
Haibo Huang49cc9302020-04-27 16:14:24 -0700622void PartitionRootGeneric::PurgeMemory(int flags) {
623 subtle::SpinLock::Guard guard(lock);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700624 if (flags & PartitionPurgeDecommitEmptyPages)
Haibo Huang49cc9302020-04-27 16:14:24 -0700625 DecommitEmptyPages();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700626 if (flags & PartitionPurgeDiscardUnusedSystemPages) {
627 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700628 internal::PartitionBucket* bucket = &buckets[i];
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700629 if (bucket->slot_size >= kSystemPageSize)
630 PartitionPurgeBucket(bucket);
631 }
632 }
633}
634
635static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
Haibo Huang49cc9302020-04-27 16:14:24 -0700636 internal::PartitionPage* page) {
637 uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700638
Haibo Huang49cc9302020-04-27 16:14:24 -0700639 if (page->is_decommitted()) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700640 ++stats_out->num_decommitted_pages;
641 return;
642 }
643
Haibo Huang49cc9302020-04-27 16:14:24 -0700644 stats_out->discardable_bytes += PartitionPurgePage(page, false);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700645
Haibo Huang49cc9302020-04-27 16:14:24 -0700646 size_t raw_size = page->get_raw_size();
647 if (raw_size) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700648 stats_out->active_bytes += static_cast<uint32_t>(raw_size);
Haibo Huang49cc9302020-04-27 16:14:24 -0700649 } else {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700650 stats_out->active_bytes +=
651 (page->num_allocated_slots * stats_out->bucket_slot_size);
Haibo Huang49cc9302020-04-27 16:14:24 -0700652 }
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700653
654 size_t page_bytes_resident =
655 RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
656 stats_out->bucket_slot_size);
657 stats_out->resident_bytes += page_bytes_resident;
Haibo Huang49cc9302020-04-27 16:14:24 -0700658 if (page->is_empty()) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700659 stats_out->decommittable_bytes += page_bytes_resident;
660 ++stats_out->num_empty_pages;
Haibo Huang49cc9302020-04-27 16:14:24 -0700661 } else if (page->is_full()) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700662 ++stats_out->num_full_pages;
663 } else {
Haibo Huang49cc9302020-04-27 16:14:24 -0700664 DCHECK(page->is_active());
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700665 ++stats_out->num_active_pages;
666 }
667}
668
669static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
Haibo Huang49cc9302020-04-27 16:14:24 -0700670 const internal::PartitionBucket* bucket) {
671 DCHECK(!bucket->is_direct_mapped());
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700672 stats_out->is_valid = false;
Haibo Huang49cc9302020-04-27 16:14:24 -0700673 // If the active page list is empty (==
674 // internal::PartitionPage::get_sentinel_page()), the bucket might still need
675 // to be reported if it has a list of empty, decommitted or full pages.
676 if (bucket->active_pages_head ==
677 internal::PartitionPage::get_sentinel_page() &&
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700678 !bucket->empty_pages_head && !bucket->decommitted_pages_head &&
679 !bucket->num_full_pages)
680 return;
681
682 memset(stats_out, '\0', sizeof(*stats_out));
683 stats_out->is_valid = true;
684 stats_out->is_direct_map = false;
685 stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
686 stats_out->bucket_slot_size = bucket->slot_size;
Haibo Huang49cc9302020-04-27 16:14:24 -0700687 uint16_t bucket_num_slots = bucket->get_slots_per_span();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700688 size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
Haibo Huang49cc9302020-04-27 16:14:24 -0700689 stats_out->allocated_page_size = bucket->get_bytes_per_span();
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700690 stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
691 stats_out->resident_bytes =
692 bucket->num_full_pages * stats_out->allocated_page_size;
693
Haibo Huang49cc9302020-04-27 16:14:24 -0700694 for (internal::PartitionPage* page = bucket->empty_pages_head; page;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700695 page = page->next_page) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700696 DCHECK(page->is_empty() || page->is_decommitted());
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700697 PartitionDumpPageStats(stats_out, page);
698 }
Haibo Huang49cc9302020-04-27 16:14:24 -0700699 for (internal::PartitionPage* page = bucket->decommitted_pages_head; page;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700700 page = page->next_page) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700701 DCHECK(page->is_decommitted());
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700702 PartitionDumpPageStats(stats_out, page);
703 }
704
Haibo Huang49cc9302020-04-27 16:14:24 -0700705 if (bucket->active_pages_head !=
706 internal::PartitionPage::get_sentinel_page()) {
707 for (internal::PartitionPage* page = bucket->active_pages_head; page;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700708 page = page->next_page) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700709 DCHECK(page != internal::PartitionPage::get_sentinel_page());
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700710 PartitionDumpPageStats(stats_out, page);
711 }
712 }
713}
714
Haibo Huang49cc9302020-04-27 16:14:24 -0700715void PartitionRootGeneric::DumpStats(const char* partition_name,
716 bool is_light_dump,
717 PartitionStatsDumper* dumper) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700718 PartitionMemoryStats stats = {0};
Haibo Huang49cc9302020-04-27 16:14:24 -0700719 stats.total_mmapped_bytes =
720 total_size_of_super_pages + total_size_of_direct_mapped_pages;
721 stats.total_committed_bytes = total_size_of_committed_pages;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700722
723 size_t direct_mapped_allocations_total_size = 0;
724
725 static const size_t kMaxReportableDirectMaps = 4096;
726
727 // Allocate on the heap rather than on the stack to avoid stack overflow
728 // skirmishes (on Windows, in particular).
729 std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr;
730 if (!is_light_dump) {
731 direct_map_lengths =
732 std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
733 }
734
735 PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
736 size_t num_direct_mapped_allocations = 0;
737 {
Haibo Huang49cc9302020-04-27 16:14:24 -0700738 subtle::SpinLock::Guard guard(lock);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700739
740 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
Haibo Huang49cc9302020-04-27 16:14:24 -0700741 const internal::PartitionBucket* bucket = &buckets[i];
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700742 // Don't report the pseudo buckets that the generic allocator sets up in
743 // order to preserve a fast size->bucket map (see
Haibo Huang49cc9302020-04-27 16:14:24 -0700744 // PartitionRootGeneric::Init() for details).
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700745 if (!bucket->active_pages_head)
746 bucket_stats[i].is_valid = false;
747 else
748 PartitionDumpBucketStats(&bucket_stats[i], bucket);
749 if (bucket_stats[i].is_valid) {
750 stats.total_resident_bytes += bucket_stats[i].resident_bytes;
751 stats.total_active_bytes += bucket_stats[i].active_bytes;
752 stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
753 stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
754 }
755 }
756
Haibo Huang49cc9302020-04-27 16:14:24 -0700757 for (internal::PartitionDirectMapExtent* extent = direct_map_list;
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700758 extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
759 extent = extent->next_extent, ++num_direct_mapped_allocations) {
760 DCHECK(!extent->next_extent ||
761 extent->next_extent->prev_extent == extent);
762 size_t slot_size = extent->bucket->slot_size;
763 direct_mapped_allocations_total_size += slot_size;
764 if (is_light_dump)
765 continue;
766 direct_map_lengths[num_direct_mapped_allocations] = slot_size;
767 }
768 }
769
770 if (!is_light_dump) {
771 // Call |PartitionsDumpBucketStats| after collecting stats because it can
Haibo Huang49cc9302020-04-27 16:14:24 -0700772 // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
773 // obtain the lock.
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700774 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
775 if (bucket_stats[i].is_valid)
776 dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
777 }
778
779 for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
780 uint32_t size = direct_map_lengths[i];
781
Haibo Huang49cc9302020-04-27 16:14:24 -0700782 PartitionBucketMemoryStats mapped_stats = {};
783 mapped_stats.is_valid = true;
784 mapped_stats.is_direct_map = true;
785 mapped_stats.num_full_pages = 1;
786 mapped_stats.allocated_page_size = size;
787 mapped_stats.bucket_slot_size = size;
788 mapped_stats.active_bytes = size;
789 mapped_stats.resident_bytes = size;
790 dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700791 }
792 }
793
794 stats.total_resident_bytes += direct_mapped_allocations_total_size;
795 stats.total_active_bytes += direct_mapped_allocations_total_size;
796 dumper->PartitionDumpTotals(partition_name, &stats);
797}
798
Haibo Huang49cc9302020-04-27 16:14:24 -0700799void PartitionRoot::DumpStats(const char* partition_name,
800 bool is_light_dump,
801 PartitionStatsDumper* dumper) {
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700802 PartitionMemoryStats stats = {0};
Haibo Huang49cc9302020-04-27 16:14:24 -0700803 stats.total_mmapped_bytes = total_size_of_super_pages;
804 stats.total_committed_bytes = total_size_of_committed_pages;
805 DCHECK(!total_size_of_direct_mapped_pages);
806
807 static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
808 std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
809 if (!is_light_dump) {
810 memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>(
811 new PartitionBucketMemoryStats[kMaxReportableBuckets]);
812 }
813
814 const size_t partition_num_buckets = num_buckets;
815 DCHECK(partition_num_buckets <= kMaxReportableBuckets);
816
817 for (size_t i = 0; i < partition_num_buckets; ++i) {
818 PartitionBucketMemoryStats bucket_stats = {0};
819 PartitionDumpBucketStats(&bucket_stats, &buckets()[i]);
820 if (bucket_stats.is_valid) {
821 stats.total_resident_bytes += bucket_stats.resident_bytes;
822 stats.total_active_bytes += bucket_stats.active_bytes;
823 stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
824 stats.total_discardable_bytes += bucket_stats.discardable_bytes;
825 }
826 if (!is_light_dump) {
827 if (bucket_stats.is_valid)
828 memory_stats[i] = bucket_stats;
829 else
830 memory_stats[i].is_valid = false;
831 }
832 }
833 if (!is_light_dump) {
834 // PartitionsDumpBucketStats is called after collecting stats because it
835 // can use PartitionRoot::Alloc() to allocate and this can affect the
836 // statistics.
837 for (size_t i = 0; i < partition_num_buckets; ++i) {
838 if (memory_stats[i].is_valid)
Philip P. Moltmannd904c1e2018-03-19 09:26:45 -0700839 dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
840 }
841 }
842 dumper->PartitionDumpTotals(partition_name, &stats);
843}
844
845} // namespace base
846} // namespace pdfium