blob: 6178b21a48d296e65ab9596ef497404dd674cdd5 [file] [log] [blame]
bcwhite248583d2016-03-16 11:37:45 +09001// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/metrics/persistent_histogram_allocator.h"
6
dchengcc8e4d82016-04-05 06:25:51 +09007#include <memory>
8
bcwhite4229e152017-01-25 05:59:28 +09009#include "base/atomicops.h"
bcwhite84228822016-06-11 00:47:05 +090010#include "base/files/file_path.h"
11#include "base/files/file_util.h"
bcwhiteff70a392016-05-13 23:39:40 +090012#include "base/files/important_file_writer.h"
bcwhite84228822016-06-11 00:47:05 +090013#include "base/files/memory_mapped_file.h"
bcwhite248583d2016-03-16 11:37:45 +090014#include "base/lazy_instance.h"
15#include "base/logging.h"
dchengcc8e4d82016-04-05 06:25:51 +090016#include "base/memory/ptr_util.h"
bcwhite248583d2016-03-16 11:37:45 +090017#include "base/metrics/histogram.h"
18#include "base/metrics/histogram_base.h"
19#include "base/metrics/histogram_samples.h"
Alexei Svitkinea4643262017-07-25 07:54:34 +090020#include "base/metrics/metrics_hashes.h"
bcwhiteb4ea1e42016-04-18 10:33:10 +090021#include "base/metrics/persistent_sample_map.h"
bcwhite7efd5102016-03-17 22:21:56 +090022#include "base/metrics/sparse_histogram.h"
bcwhite248583d2016-03-16 11:37:45 +090023#include "base/metrics/statistics_recorder.h"
Brian Whiteffa2eb32017-08-03 06:18:01 +090024#include "base/numerics/safe_conversions.h"
bcwhitec78fc992016-06-03 13:59:44 +090025#include "base/pickle.h"
Brian White76daaba2017-10-28 02:18:23 +090026#include "base/process/process_handle.h"
27#include "base/strings/string_number_conversions.h"
28#include "base/strings/string_split.h"
bcwhited0716d62017-07-05 00:48:00 +090029#include "base/strings/stringprintf.h"
bcwhite248583d2016-03-16 11:37:45 +090030#include "base/synchronization/lock.h"
31
bcwhite248583d2016-03-16 11:37:45 +090032namespace base {
33
34namespace {
35
36// Name of histogram for storing results of local operations.
37const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
38
39// Type identifiers used when storing in persistent memory so they can be
40// identified during extraction; the first 4 bytes of the SHA1 of the name
41// is used as a unique integer. A "version number" is added to the base
42// so that, if the structure of that object changes, stored older versions
43// will be safely ignored.
44enum : uint32_t {
bcwhite248583d2016-03-16 11:37:45 +090045 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
46 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
47};
48
49// The current globally-active persistent allocator for all new histograms.
50// The object held here will obviously not be destructed at process exit
51// but that's best since PersistentMemoryAllocator objects (that underlie
bcwhite15d6aba2016-04-06 11:03:53 +090052// GlobalHistogramAllocator objects) are explicitly forbidden from doing
bcwhite248583d2016-03-16 11:37:45 +090053// anything essential at exit anyway due to the fact that they depend on data
bcwhite4229e152017-01-25 05:59:28 +090054// managed elsewhere and which could be destructed first. An AtomicWord is
55// used instead of std::atomic because the latter can create global ctors
56// and dtors.
Nico Weber9fbe1e52017-10-18 05:56:49 +090057subtle::AtomicWord g_histogram_allocator = 0;
bcwhite248583d2016-03-16 11:37:45 +090058
59// Take an array of range boundaries and create a proper BucketRanges object
60// which is returned to the caller. A return of nullptr indicates that the
61// passed boundaries are invalid.
dchengcc8e4d82016-04-05 06:25:51 +090062std::unique_ptr<BucketRanges> CreateRangesFromData(
bcwhite248583d2016-03-16 11:37:45 +090063 HistogramBase::Sample* ranges_data,
64 uint32_t ranges_checksum,
65 size_t count) {
66 // To avoid racy destruction at shutdown, the following may be leaked.
dchengcc8e4d82016-04-05 06:25:51 +090067 std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
bcwhite248583d2016-03-16 11:37:45 +090068 DCHECK_EQ(count, ranges->size());
69 for (size_t i = 0; i < count; ++i) {
70 if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
71 return nullptr;
72 ranges->set_range(i, ranges_data[i]);
73 }
74
75 ranges->ResetChecksum();
76 if (ranges->checksum() != ranges_checksum)
77 return nullptr;
78
79 return ranges;
80}
81
82// Calculate the number of bytes required to store all of a histogram's
83// "counts". This will return zero (0) if |bucket_count| is not valid.
84size_t CalculateRequiredCountsBytes(size_t bucket_count) {
85 // 2 because each "sample count" also requires a backup "logged count"
86 // used for calculating the delta during snapshot operations.
bcwhite7efd5102016-03-17 22:21:56 +090087 const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
bcwhite248583d2016-03-16 11:37:45 +090088
89 // If the |bucket_count| is such that it would overflow the return type,
90 // perhaps as the result of a malicious actor, then return zero to
91 // indicate the problem to the caller.
bcwhite7efd5102016-03-17 22:21:56 +090092 if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
bcwhite248583d2016-03-16 11:37:45 +090093 return 0;
94
95 return bucket_count * kBytesPerBucket;
96}
97
98} // namespace
99
100const Feature kPersistentHistogramsFeature{
101 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
102};
103
bcwhiteb4ea1e42016-04-18 10:33:10 +0900104
105PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
106 PersistentMemoryAllocator* allocator)
107 : allocator_(allocator), record_iterator_(allocator) {}
108
Chris Watkinsd155d9f2017-11-29 16:16:38 +0900109PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
110 default;
bcwhiteb4ea1e42016-04-18 10:33:10 +0900111
112PersistentSampleMapRecords*
113PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
114 const void* user) {
115 base::AutoLock auto_lock(lock_);
116 return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
117}
118
119PersistentSampleMapRecords*
120PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
121 uint64_t id) {
122 lock_.AssertAcquired();
123
124 auto found = sample_records_.find(id);
125 if (found != sample_records_.end())
126 return found->second.get();
127
128 std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
Jeremy Romancd0c4672017-08-17 08:27:24 +0900129 samples = std::make_unique<PersistentSampleMapRecords>(this, id);
bcwhiteb4ea1e42016-04-18 10:33:10 +0900130 return samples.get();
131}
132
133bool PersistentSparseHistogramDataManager::LoadRecords(
134 PersistentSampleMapRecords* sample_map_records) {
135 // DataManager must be locked in order to access the found_ field of any
136 // PersistentSampleMapRecords object.
137 base::AutoLock auto_lock(lock_);
138 bool found = false;
139
140 // If there are already "found" entries for the passed object, move them.
141 if (!sample_map_records->found_.empty()) {
142 sample_map_records->records_.reserve(sample_map_records->records_.size() +
143 sample_map_records->found_.size());
144 sample_map_records->records_.insert(sample_map_records->records_.end(),
145 sample_map_records->found_.begin(),
146 sample_map_records->found_.end());
147 sample_map_records->found_.clear();
148 found = true;
149 }
150
151 // Acquiring a lock is a semi-expensive operation so load some records with
152 // each call. More than this number may be loaded if it takes longer to
153 // find at least one matching record for the passed object.
154 const int kMinimumNumberToLoad = 10;
155 const uint64_t match_id = sample_map_records->sample_map_id_;
156
157 // Loop while no enty is found OR we haven't yet loaded the minimum number.
158 // This will continue reading even after a match is found.
159 for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
160 // Get the next sample-record. The iterator will always resume from where
161 // it left off even if it previously had nothing further to return.
162 uint64_t found_id;
163 PersistentMemoryAllocator::Reference ref =
164 PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
165 &found_id);
166
167 // Stop immediately if there are none.
168 if (!ref)
169 break;
170
171 // The sample-record could be for any sparse histogram. Add the reference
172 // to the appropriate collection for later use.
173 if (found_id == match_id) {
174 sample_map_records->records_.push_back(ref);
175 found = true;
176 } else {
177 PersistentSampleMapRecords* samples =
178 GetSampleMapRecordsWhileLocked(found_id);
179 DCHECK(samples);
180 samples->found_.push_back(ref);
181 }
182 }
183
184 return found;
185}
186
187
188PersistentSampleMapRecords::PersistentSampleMapRecords(
189 PersistentSparseHistogramDataManager* data_manager,
190 uint64_t sample_map_id)
191 : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
192
Chris Watkinsd155d9f2017-11-29 16:16:38 +0900193PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
bcwhiteb4ea1e42016-04-18 10:33:10 +0900194
195PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
196 const void* user) {
197 DCHECK(!user_);
198 user_ = user;
199 seen_ = 0;
200 return this;
201}
202
203void PersistentSampleMapRecords::Release(const void* user) {
204 DCHECK_EQ(user_, user);
205 user_ = nullptr;
206}
207
208PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
209 DCHECK(user_);
210
211 // If there are no unseen records, lock and swap in all the found ones.
212 if (records_.size() == seen_) {
213 if (!data_manager_->LoadRecords(this))
214 return false;
215 }
216
217 // Return the next record. Records *must* be returned in the same order
218 // they are found in the persistent memory in order to ensure that all
219 // objects using this data always have the same state. Race conditions
220 // can cause duplicate records so using the "first found" is the only
221 // guarantee that all objects always access the same one.
222 DCHECK_LT(seen_, records_.size());
223 return records_[seen_++];
224}
225
226PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
227 HistogramBase::Sample value) {
228 return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
229 sample_map_id_, value);
230}
231
232
bcwhite248583d2016-03-16 11:37:45 +0900233// This data will be held in persistent memory in order for processes to
234// locate and use histograms created elsewhere.
235struct PersistentHistogramAllocator::PersistentHistogramData {
bcwhite10574ca2017-01-11 21:42:13 +0900236 // SHA1(Histogram): Increment this if structure changes!
237 static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
238
piman064b27d2016-11-23 06:03:29 +0900239 // Expected size for 32/64-bit check.
240 static constexpr size_t kExpectedInstanceSize =
241 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
242
bcwhite248583d2016-03-16 11:37:45 +0900243 int32_t histogram_type;
244 int32_t flags;
245 int32_t minimum;
246 int32_t maximum;
247 uint32_t bucket_count;
248 PersistentMemoryAllocator::Reference ranges_ref;
249 uint32_t ranges_checksum;
bcwhite4e70cfa2017-05-02 01:43:25 +0900250 subtle::Atomic32 counts_ref; // PersistentMemoryAllocator::Reference
bcwhite248583d2016-03-16 11:37:45 +0900251 HistogramSamples::Metadata samples_metadata;
252 HistogramSamples::Metadata logged_metadata;
253
254 // Space for the histogram name will be added during the actual allocation
255 // request. This must be the last field of the structure. A zero-size array
256 // or a "flexible" array would be preferred but is not (yet) valid C++.
piman064b27d2016-11-23 06:03:29 +0900257 char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds.
bcwhite248583d2016-03-16 11:37:45 +0900258};
259
bcwhite0c417bf2016-04-07 00:39:01 +0900260PersistentHistogramAllocator::Iterator::Iterator(
261 PersistentHistogramAllocator* allocator)
262 : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
263
264std::unique_ptr<HistogramBase>
265PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
266 PersistentMemoryAllocator::Reference ref;
bcwhite10574ca2017-01-11 21:42:13 +0900267 while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
bcwhite0c417bf2016-04-07 00:39:01 +0900268 if (ref != ignore)
269 return allocator_->GetHistogram(ref);
270 }
271 return nullptr;
272}
273
bcwhiteb4ea1e42016-04-18 10:33:10 +0900274
bcwhite248583d2016-03-16 11:37:45 +0900275PersistentHistogramAllocator::PersistentHistogramAllocator(
dchengcc8e4d82016-04-05 06:25:51 +0900276 std::unique_ptr<PersistentMemoryAllocator> memory)
bcwhiteb4ea1e42016-04-18 10:33:10 +0900277 : memory_allocator_(std::move(memory)),
278 sparse_histogram_data_manager_(memory_allocator_.get()) {}
bcwhite248583d2016-03-16 11:37:45 +0900279
Chris Watkinsd155d9f2017-11-29 16:16:38 +0900280PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
bcwhite248583d2016-03-16 11:37:45 +0900281
bcwhite09dac602016-07-13 01:41:16 +0900282std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
283 Reference ref) {
284 // Unfortunately, the histogram "pickle" methods cannot be used as part of
285 // the persistance because the deserialization methods always create local
286 // count data (while these must reference the persistent counts) and always
287 // add it to the local list of known histograms (while these may be simple
288 // references to histograms in other processes).
Alexei Svitkinea4643262017-07-25 07:54:34 +0900289 PersistentHistogramData* data =
bcwhite10574ca2017-01-11 21:42:13 +0900290 memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
Alexei Svitkinea4643262017-07-25 07:54:34 +0900291 const size_t length = memory_allocator_->GetAllocSize(ref);
bcwhite197a48f2016-09-24 02:52:35 +0900292
Alexei Svitkinea4643262017-07-25 07:54:34 +0900293 // Check that metadata is reasonable: name is null-terminated and non-empty,
bcwhite197a48f2016-09-24 02:52:35 +0900294 // ID fields have been loaded with a hash of the name (0 is considered
295 // unset/invalid).
Alexei Svitkinea4643262017-07-25 07:54:34 +0900296 if (!data || data->name[0] == '\0' ||
297 reinterpret_cast<char*>(data)[length - 1] != '\0' ||
298 data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
299 // Note: Sparse histograms use |id + 1| in |logged_metadata|.
300 (data->logged_metadata.id != data->samples_metadata.id &&
301 data->logged_metadata.id != data->samples_metadata.id + 1) ||
302 // Most non-matching values happen due to truncated names. Ideally, we
303 // could just verify the name length based on the overall alloc length,
304 // but that doesn't work because the allocated block may have been
305 // aligned to the next boundary value.
306 HashMetricName(data->name) != data->samples_metadata.id) {
bcwhite09dac602016-07-13 01:41:16 +0900307 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
308 NOTREACHED();
309 return nullptr;
310 }
Alexei Svitkinea4643262017-07-25 07:54:34 +0900311 return CreateHistogram(data);
bcwhite09dac602016-07-13 01:41:16 +0900312}
313
314std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
315 HistogramType histogram_type,
316 const std::string& name,
317 int minimum,
318 int maximum,
319 const BucketRanges* bucket_ranges,
320 int32_t flags,
321 Reference* ref_ptr) {
322 // If the allocator is corrupt, don't waste time trying anything else.
323 // This also allows differentiating on the dashboard between allocations
324 // failed due to a corrupt allocator and the number of process instances
325 // with one, the latter being idicated by "newly corrupt", below.
326 if (memory_allocator_->IsCorrupt()) {
327 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
328 return nullptr;
329 }
330
331 // Create the metadata necessary for a persistent sparse histogram. This
332 // is done first because it is a small subset of what is required for
bcwhite197a48f2016-09-24 02:52:35 +0900333 // other histograms. The type is "under construction" so that a crash
334 // during the datafill doesn't leave a bad record around that could cause
335 // confusion by another process trying to read it. It will be corrected
336 // once histogram construction is complete.
bcwhite09dac602016-07-13 01:41:16 +0900337 PersistentHistogramData* histogram_data =
bcwhite18fd0b82017-02-10 05:44:23 +0900338 memory_allocator_->New<PersistentHistogramData>(
bcwhite10574ca2017-01-11 21:42:13 +0900339 offsetof(PersistentHistogramData, name) + name.length() + 1);
bcwhite09dac602016-07-13 01:41:16 +0900340 if (histogram_data) {
341 memcpy(histogram_data->name, name.c_str(), name.size() + 1);
342 histogram_data->histogram_type = histogram_type;
343 histogram_data->flags = flags | HistogramBase::kIsPersistent;
344 }
345
346 // Create the remaining metadata necessary for regular histograms.
347 if (histogram_type != SPARSE_HISTOGRAM) {
348 size_t bucket_count = bucket_ranges->bucket_count();
349 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
350 if (counts_bytes == 0) {
351 // |bucket_count| was out-of-range.
352 NOTREACHED();
353 return nullptr;
354 }
355
bcwhite83b01f12017-04-08 02:02:01 +0900356 // Since the StasticsRecorder keeps a global collection of BucketRanges
357 // objects for re-use, it would be dangerous for one to hold a reference
358 // from a persistent allocator that is not the global one (which is
359 // permanent once set). If this stops being the case, this check can
360 // become an "if" condition beside "!ranges_ref" below and before
361 // set_persistent_reference() farther down.
362 DCHECK_EQ(this, GlobalHistogramAllocator::Get());
363
364 // Re-use an existing BucketRanges persistent allocation if one is known;
365 // otherwise, create one.
366 PersistentMemoryAllocator::Reference ranges_ref =
367 bucket_ranges->persistent_reference();
368 if (!ranges_ref) {
369 size_t ranges_count = bucket_count + 1;
370 size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
371 ranges_ref =
372 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
373 if (ranges_ref) {
374 HistogramBase::Sample* ranges_data =
375 memory_allocator_->GetAsArray<HistogramBase::Sample>(
376 ranges_ref, kTypeIdRangesArray, ranges_count);
377 if (ranges_data) {
378 for (size_t i = 0; i < bucket_ranges->size(); ++i)
379 ranges_data[i] = bucket_ranges->range(i);
380 bucket_ranges->set_persistent_reference(ranges_ref);
381 } else {
382 // This should never happen but be tolerant if it does.
383 NOTREACHED();
384 ranges_ref = PersistentMemoryAllocator::kReferenceNull;
385 }
386 }
387 } else {
388 DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
389 }
390
bcwhite09dac602016-07-13 01:41:16 +0900391
392 // Only continue here if all allocations were successful. If they weren't,
393 // there is no way to free the space but that's not really a problem since
394 // the allocations only fail because the space is full or corrupt and so
395 // any future attempts will also fail.
bcwhite4e70cfa2017-05-02 01:43:25 +0900396 if (ranges_ref && histogram_data) {
bcwhite09dac602016-07-13 01:41:16 +0900397 histogram_data->minimum = minimum;
398 histogram_data->maximum = maximum;
399 // |bucket_count| must fit within 32-bits or the allocation of the counts
400 // array would have failed for being too large; the allocator supports
401 // less than 4GB total size.
402 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
403 histogram_data->ranges_ref = ranges_ref;
404 histogram_data->ranges_checksum = bucket_ranges->checksum();
bcwhite09dac602016-07-13 01:41:16 +0900405 } else {
406 histogram_data = nullptr; // Clear this for proper handling below.
407 }
408 }
409
410 if (histogram_data) {
411 // Create the histogram using resources in persistent memory. This ends up
412 // resolving the "ref" values stored in histogram_data instad of just
413 // using what is already known above but avoids duplicating the switch
414 // statement here and serves as a double-check that everything is
415 // correct before commiting the new histogram to persistent space.
416 std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
417 DCHECK(histogram);
bcwhite197a48f2016-09-24 02:52:35 +0900418 DCHECK_NE(0U, histogram_data->samples_metadata.id);
419 DCHECK_NE(0U, histogram_data->logged_metadata.id);
bcwhite197a48f2016-09-24 02:52:35 +0900420
bcwhite10574ca2017-01-11 21:42:13 +0900421 PersistentMemoryAllocator::Reference histogram_ref =
422 memory_allocator_->GetAsReference(histogram_data);
bcwhite09dac602016-07-13 01:41:16 +0900423 if (ref_ptr != nullptr)
424 *ref_ptr = histogram_ref;
425
426 // By storing the reference within the allocator to this histogram, the
427 // next import (which will happen before the next histogram creation)
428 // will know to skip it.
429 // See also the comment in ImportHistogramsToStatisticsRecorder().
430 subtle::NoBarrier_Store(&last_created_, histogram_ref);
431 return histogram;
432 }
433
434 CreateHistogramResultType result;
435 if (memory_allocator_->IsCorrupt()) {
436 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
437 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
438 } else if (memory_allocator_->IsFull()) {
439 result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
440 } else {
441 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
442 }
443 RecordCreateHistogramResult(result);
bcwhitec20889a2017-01-18 03:51:45 +0900444
445 // Crash for failures caused by internal bugs but not "full" which is
446 // dependent on outside code.
447 if (result != CREATE_HISTOGRAM_ALLOCATOR_FULL)
448 NOTREACHED() << memory_allocator_->Name() << ", error=" << result;
bcwhite09dac602016-07-13 01:41:16 +0900449
450 return nullptr;
451}
452
453void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
454 bool registered) {
bcwhite10574ca2017-01-11 21:42:13 +0900455 if (registered) {
456 // If the created persistent histogram was registered then it needs to
457 // be marked as "iterable" in order to be found by other processes. This
458 // happens only after the histogram is fully formed so it's impossible for
459 // code iterating through the allocator to read a partially created record.
bcwhite09dac602016-07-13 01:41:16 +0900460 memory_allocator_->MakeIterable(ref);
bcwhite10574ca2017-01-11 21:42:13 +0900461 } else {
462 // If it wasn't registered then a race condition must have caused two to
463 // be created. The allocator does not support releasing the acquired memory
464 // so just change the type to be empty.
465 memory_allocator_->ChangeType(ref, 0,
bcwhite18fd0b82017-02-10 05:44:23 +0900466 PersistentHistogramData::kPersistentTypeId,
467 /*clear=*/false);
bcwhite10574ca2017-01-11 21:42:13 +0900468 }
bcwhite09dac602016-07-13 01:41:16 +0900469}
470
471void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
472 HistogramBase* histogram) {
473 DCHECK(histogram);
474
475 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
476 if (!existing) {
477 // The above should never fail but if it does, no real harm is done.
478 // The data won't be merged but it also won't be recorded as merged
479 // so a future try, if successful, will get what was missed. If it
480 // continues to fail, some metric data will be lost but that is better
481 // than crashing.
482 NOTREACHED();
483 return;
484 }
485
486 // Merge the delta from the passed object to the one in the SR.
487 existing->AddSamples(*histogram->SnapshotDelta());
488}
489
490void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
491 const HistogramBase* histogram) {
492 DCHECK(histogram);
493
494 HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
495 if (!existing) {
496 // The above should never fail but if it does, no real harm is done.
497 // Some metric data will be lost but that is better than crashing.
498 NOTREACHED();
499 return;
500 }
501
502 // Merge the delta from the passed object to the one in the SR.
503 existing->AddSamples(*histogram->SnapshotFinalDelta());
504}
505
506PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
507 uint64_t id,
508 const void* user) {
509 return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
510}
511
bcwhite248583d2016-03-16 11:37:45 +0900512void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
513 memory_allocator_->CreateTrackingHistograms(name);
514}
515
516void PersistentHistogramAllocator::UpdateTrackingHistograms() {
517 memory_allocator_->UpdateTrackingHistograms();
518}
519
bcwhite7c480712016-04-29 03:59:53 +0900520void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
521 subtle::NoBarrier_Store(&last_created_, 0);
522}
523
bcwhite248583d2016-03-16 11:37:45 +0900524// static
525HistogramBase*
526PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
Brian Whiteda1f87e2017-08-15 08:24:21 +0900527 // A value that can be stored in an AtomicWord as a flag. It must not be zero
528 // or a valid address.
529 constexpr subtle::AtomicWord kHistogramUnderConstruction = 1;
bcwhite248583d2016-03-16 11:37:45 +0900530
Brian Whiteda1f87e2017-08-15 08:24:21 +0900531 // This is a similar to LazyInstance but with return-if-under-construction
532 // rather than yielding the CPU until construction completes. This is
533 // necessary because the FactoryGet() below creates a histogram and thus
534 // recursively calls this method to try to store the result.
535
536 // Get the existing pointer. If the "under construction" flag is present,
537 // abort now. It's okay to return null from this method.
538 static subtle::AtomicWord atomic_histogram_pointer = 0;
539 subtle::AtomicWord histogram_value =
540 subtle::Acquire_Load(&atomic_histogram_pointer);
541 if (histogram_value == kHistogramUnderConstruction)
542 return nullptr;
543
544 // If a valid histogram pointer already exists, return it.
545 if (histogram_value)
546 return reinterpret_cast<HistogramBase*>(histogram_value);
547
548 // Set the "under construction" flag; abort if something has changed.
549 if (subtle::NoBarrier_CompareAndSwap(&atomic_histogram_pointer, 0,
550 kHistogramUnderConstruction) != 0) {
551 return nullptr;
bcwhite248583d2016-03-16 11:37:45 +0900552 }
Brian Whiteda1f87e2017-08-15 08:24:21 +0900553
554 // Only one thread can be here. Even recursion will be thwarted above.
555
556 if (GlobalHistogramAllocator::Get()) {
557 DVLOG(1) << "Creating the results-histogram inside persistent"
558 << " memory can cause future allocations to crash if"
559 << " that memory is ever released (for testing).";
560 }
561
562 HistogramBase* histogram_pointer = LinearHistogram::FactoryGet(
563 kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
564 HistogramBase::kUmaTargetedHistogramFlag);
565 subtle::Release_Store(
566 &atomic_histogram_pointer,
567 reinterpret_cast<subtle::AtomicWord>(histogram_pointer));
568
bcwhite248583d2016-03-16 11:37:45 +0900569 return histogram_pointer;
570}
571
dchengcc8e4d82016-04-05 06:25:51 +0900572std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
bcwhite248583d2016-03-16 11:37:45 +0900573 PersistentHistogramData* histogram_data_ptr) {
574 if (!histogram_data_ptr) {
575 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
576 NOTREACHED();
577 return nullptr;
578 }
579
bcwhite7efd5102016-03-17 22:21:56 +0900580 // Sparse histograms are quite different so handle them as a special case.
581 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
dchengcc8e4d82016-04-05 06:25:51 +0900582 std::unique_ptr<HistogramBase> histogram =
bcwhiteb4ea1e42016-04-18 10:33:10 +0900583 SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
dchengcc8e4d82016-04-05 06:25:51 +0900584 &histogram_data_ptr->samples_metadata,
585 &histogram_data_ptr->logged_metadata);
bcwhite7efd5102016-03-17 22:21:56 +0900586 DCHECK(histogram);
587 histogram->SetFlags(histogram_data_ptr->flags);
588 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
589 return histogram;
590 }
591
bcwhite11c73842017-05-12 09:30:18 +0900592 // Copy the configuration fields from histogram_data_ptr to local storage
593 // because anything in persistent memory cannot be trusted as it could be
594 // changed at any moment by a malicious actor that shares access. The local
595 // values are validated below and then used to create the histogram, knowing
596 // they haven't changed between validation and use.
597 int32_t histogram_type = histogram_data_ptr->histogram_type;
598 int32_t histogram_flags = histogram_data_ptr->flags;
599 int32_t histogram_minimum = histogram_data_ptr->minimum;
600 int32_t histogram_maximum = histogram_data_ptr->maximum;
601 uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
602 uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
603 uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
bcwhite248583d2016-03-16 11:37:45 +0900604
605 HistogramBase::Sample* ranges_data =
piman064b27d2016-11-23 06:03:29 +0900606 memory_allocator_->GetAsArray<HistogramBase::Sample>(
bcwhite11c73842017-05-12 09:30:18 +0900607 histogram_ranges_ref, kTypeIdRangesArray,
piman064b27d2016-11-23 06:03:29 +0900608 PersistentMemoryAllocator::kSizeAny);
bcwhite248583d2016-03-16 11:37:45 +0900609
610 const uint32_t max_buckets =
611 std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
612 size_t required_bytes =
bcwhite11c73842017-05-12 09:30:18 +0900613 (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
bcwhite248583d2016-03-16 11:37:45 +0900614 size_t allocated_bytes =
bcwhite11c73842017-05-12 09:30:18 +0900615 memory_allocator_->GetAllocSize(histogram_ranges_ref);
616 if (!ranges_data || histogram_bucket_count < 2 ||
617 histogram_bucket_count >= max_buckets ||
bcwhite248583d2016-03-16 11:37:45 +0900618 allocated_bytes < required_bytes) {
619 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
620 NOTREACHED();
621 return nullptr;
622 }
623
bcwhite11c73842017-05-12 09:30:18 +0900624 std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
625 ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
bcwhite248583d2016-03-16 11:37:45 +0900626 if (!created_ranges) {
627 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
628 NOTREACHED();
629 return nullptr;
630 }
631 const BucketRanges* ranges =
632 StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
633 created_ranges.release());
634
bcwhite11c73842017-05-12 09:30:18 +0900635 size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
bcwhite4e70cfa2017-05-02 01:43:25 +0900636 PersistentMemoryAllocator::Reference counts_ref =
bcwhiteea4b6612017-05-16 01:43:29 +0900637 subtle::Acquire_Load(&histogram_data_ptr->counts_ref);
bcwhite4e70cfa2017-05-02 01:43:25 +0900638 if (counts_bytes == 0 ||
639 (counts_ref != 0 &&
640 memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
bcwhite248583d2016-03-16 11:37:45 +0900641 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
642 NOTREACHED();
643 return nullptr;
644 }
645
bcwhite4e70cfa2017-05-02 01:43:25 +0900646 // The "counts" data (including both samples and logged samples) is a delayed
647 // persistent allocation meaning that though its size and storage for a
648 // reference is defined, no space is reserved until actually needed. When
649 // it is needed, memory will be allocated from the persistent segment and
650 // a reference to it stored at the passed address. Other threads can then
651 // notice the valid reference and access the same data.
652 DelayedPersistentAllocation counts_data(memory_allocator_.get(),
653 &histogram_data_ptr->counts_ref,
654 kTypeIdCountsArray, counts_bytes, 0);
bcwhite248583d2016-03-16 11:37:45 +0900655
bcwhite4e70cfa2017-05-02 01:43:25 +0900656 // A second delayed allocations is defined using the same reference storage
657 // location as the first so the allocation of one will automatically be found
658 // by the other. Within the block, the first half of the space is for "counts"
659 // and the second half is for "logged counts".
660 DelayedPersistentAllocation logged_data(
661 memory_allocator_.get(), &histogram_data_ptr->counts_ref,
662 kTypeIdCountsArray, counts_bytes, counts_bytes / 2,
663 /*make_iterable=*/false);
664
665 // Create the right type of histogram.
Brian Whiteca72b432017-11-03 23:46:42 +0900666 const char* name = histogram_data_ptr->name;
dchengcc8e4d82016-04-05 06:25:51 +0900667 std::unique_ptr<HistogramBase> histogram;
bcwhite11c73842017-05-12 09:30:18 +0900668 switch (histogram_type) {
bcwhite248583d2016-03-16 11:37:45 +0900669 case HISTOGRAM:
670 histogram = Histogram::PersistentCreate(
bcwhite11c73842017-05-12 09:30:18 +0900671 name, histogram_minimum, histogram_maximum, ranges, counts_data,
672 logged_data, &histogram_data_ptr->samples_metadata,
bcwhite248583d2016-03-16 11:37:45 +0900673 &histogram_data_ptr->logged_metadata);
674 DCHECK(histogram);
675 break;
676 case LINEAR_HISTOGRAM:
677 histogram = LinearHistogram::PersistentCreate(
bcwhite11c73842017-05-12 09:30:18 +0900678 name, histogram_minimum, histogram_maximum, ranges, counts_data,
679 logged_data, &histogram_data_ptr->samples_metadata,
bcwhite248583d2016-03-16 11:37:45 +0900680 &histogram_data_ptr->logged_metadata);
681 DCHECK(histogram);
682 break;
683 case BOOLEAN_HISTOGRAM:
684 histogram = BooleanHistogram::PersistentCreate(
685 name, ranges, counts_data, logged_data,
686 &histogram_data_ptr->samples_metadata,
687 &histogram_data_ptr->logged_metadata);
688 DCHECK(histogram);
689 break;
690 case CUSTOM_HISTOGRAM:
691 histogram = CustomHistogram::PersistentCreate(
bcwhite4e70cfa2017-05-02 01:43:25 +0900692 name, ranges, counts_data, logged_data,
bcwhite248583d2016-03-16 11:37:45 +0900693 &histogram_data_ptr->samples_metadata,
694 &histogram_data_ptr->logged_metadata);
695 DCHECK(histogram);
696 break;
697 default:
698 NOTREACHED();
699 }
700
701 if (histogram) {
bcwhite11c73842017-05-12 09:30:18 +0900702 DCHECK_EQ(histogram_type, histogram->GetHistogramType());
703 histogram->SetFlags(histogram_flags);
bcwhite248583d2016-03-16 11:37:45 +0900704 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
705 } else {
706 RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
707 }
708
709 return histogram;
710}
711
bcwhite84228822016-06-11 00:47:05 +0900712HistogramBase*
713PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
714 const HistogramBase* histogram) {
715 // This should never be called on the global histogram allocator as objects
716 // created there are already within the global statistics recorder.
bcwhite4229e152017-01-25 05:59:28 +0900717 DCHECK_NE(GlobalHistogramAllocator::Get(), this);
bcwhite84228822016-06-11 00:47:05 +0900718 DCHECK(histogram);
719
720 HistogramBase* existing =
721 StatisticsRecorder::FindHistogram(histogram->histogram_name());
722 if (existing)
723 return existing;
724
725 // Adding the passed histogram to the SR would cause a problem if the
726 // allocator that holds it eventually goes away. Instead, create a new
bcwhitea9d99b02017-01-21 02:44:32 +0900727 // one from a serialized version. Deserialization calls the appropriate
728 // FactoryGet() which will create the histogram in the global persistent-
729 // histogram allocator if such is set.
bcwhite84228822016-06-11 00:47:05 +0900730 base::Pickle pickle;
Daniel Cheng51215ca2017-09-22 14:05:07 +0900731 histogram->SerializeInfo(&pickle);
bcwhite84228822016-06-11 00:47:05 +0900732 PickleIterator iter(pickle);
733 existing = DeserializeHistogramInfo(&iter);
734 if (!existing)
735 return nullptr;
736
737 // Make sure there is no "serialization" flag set.
738 DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
739 // Record the newly created histogram in the SR.
740 return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
741}
742
bcwhite09dac602016-07-13 01:41:16 +0900743// static
744void PersistentHistogramAllocator::RecordCreateHistogramResult(
745 CreateHistogramResultType result) {
746 HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
747 if (result_histogram)
748 result_histogram->Add(result);
bcwhite248583d2016-03-16 11:37:45 +0900749}
750
Chris Watkinsd155d9f2017-11-29 16:16:38 +0900751GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
bcwhite15d6aba2016-04-06 11:03:53 +0900752
bcwhite248583d2016-03-16 11:37:45 +0900753// static
bcwhite15d6aba2016-04-06 11:03:53 +0900754void GlobalHistogramAllocator::CreateWithPersistentMemory(
755 void* base,
756 size_t size,
757 size_t page_size,
758 uint64_t id,
759 StringPiece name) {
ricead95e71b2016-09-13 13:10:11 +0900760 Set(WrapUnique(
Jeremy Romancd0c4672017-08-17 08:27:24 +0900761 new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
ricead95e71b2016-09-13 13:10:11 +0900762 base, size, page_size, id, name, false))));
bcwhite15d6aba2016-04-06 11:03:53 +0900763}
bcwhite248583d2016-03-16 11:37:45 +0900764
bcwhite15d6aba2016-04-06 11:03:53 +0900765// static
766void GlobalHistogramAllocator::CreateWithLocalMemory(
767 size_t size,
768 uint64_t id,
769 StringPiece name) {
770 Set(WrapUnique(new GlobalHistogramAllocator(
Jeremy Romancd0c4672017-08-17 08:27:24 +0900771 std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
bcwhite15d6aba2016-04-06 11:03:53 +0900772}
bcwhite248583d2016-03-16 11:37:45 +0900773
bcwhite84228822016-06-11 00:47:05 +0900774#if !defined(OS_NACL)
775// static
scottmgb1316542016-09-17 04:39:10 +0900776bool GlobalHistogramAllocator::CreateWithFile(
bcwhite84228822016-06-11 00:47:05 +0900777 const FilePath& file_path,
778 size_t size,
779 uint64_t id,
780 StringPiece name) {
781 bool exists = PathExists(file_path);
782 File file(
783 file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
784 File::FLAG_READ | File::FLAG_WRITE);
785
786 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
787 if (exists) {
Brian Whiteffa2eb32017-08-03 06:18:01 +0900788 size = saturated_cast<size_t>(file.GetLength());
bcwhite84228822016-06-11 00:47:05 +0900789 mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
790 } else {
Will Harrisfb540922017-11-10 15:19:10 +0900791 mmfile->Initialize(std::move(file), {0, size},
bcwhite84228822016-06-11 00:47:05 +0900792 MemoryMappedFile::READ_WRITE_EXTEND);
793 }
794 if (!mmfile->IsValid() ||
795 !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
Brett Wilsonae98eb12017-07-25 06:06:05 +0900796 NOTREACHED() << file_path;
scottmgb1316542016-09-17 04:39:10 +0900797 return false;
bcwhite84228822016-06-11 00:47:05 +0900798 }
799
Jeremy Romancd0c4672017-08-17 08:27:24 +0900800 Set(WrapUnique(new GlobalHistogramAllocator(
801 std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), size,
802 id, name, false))));
scottmgb1316542016-09-17 04:39:10 +0900803 Get()->SetPersistentLocation(file_path);
804 return true;
bcwhite84228822016-06-11 00:47:05 +0900805}
scottmgb1316542016-09-17 04:39:10 +0900806
807// static
808bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
809 const FilePath& active_path,
bcwhite4c3c7662017-05-25 05:52:36 +0900810 const FilePath& spare_path,
scottmgb1316542016-09-17 04:39:10 +0900811 size_t size,
812 uint64_t id,
813 StringPiece name) {
bcwhite4c3c7662017-05-25 05:52:36 +0900814 // Old "active" becomes "base".
scottmgb1316542016-09-17 04:39:10 +0900815 if (!base::ReplaceFile(active_path, base_path, nullptr))
816 base::DeleteFile(base_path, /*recursive=*/false);
bcwhite4c3c7662017-05-25 05:52:36 +0900817 DCHECK(!base::PathExists(active_path));
818
819 // Move any "spare" into "active". Okay to continue if file doesn't exist.
820 if (!spare_path.empty()) {
821 base::ReplaceFile(spare_path, active_path, nullptr);
822 DCHECK(!base::PathExists(spare_path));
823 }
scottmgb1316542016-09-17 04:39:10 +0900824
825 return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
826 name);
827}
828
829// static
830bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
831 size_t size,
832 uint64_t id,
833 StringPiece name) {
bcwhite4c3c7662017-05-25 05:52:36 +0900834 FilePath base_path, active_path, spare_path;
835 ConstructFilePaths(dir, name, &base_path, &active_path, &spare_path);
836 return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
837 name);
scottmgb1316542016-09-17 04:39:10 +0900838}
839
840// static
Brian White76daaba2017-10-28 02:18:23 +0900841FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
842 StringPiece name) {
843 return dir.AppendASCII(name).AddExtension(
844 PersistentMemoryAllocator::kFileExtension);
845}
846
847// static
848FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
849 const FilePath& dir,
850 StringPiece name,
851 base::Time stamp,
852 ProcessId pid) {
853 return ConstructFilePath(
854 dir,
855 StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
856 static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
857}
858
859// static
860bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
861 std::string* out_name,
862 Time* out_stamp,
863 ProcessId* out_pid) {
864 std::string filename = path.BaseName().AsUTF8Unsafe();
865 std::vector<base::StringPiece> parts = base::SplitStringPiece(
866 filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
867 if (parts.size() != 4)
868 return false;
869
870 if (out_name)
871 *out_name = parts[0].as_string();
872
873 if (out_stamp) {
874 int64_t stamp;
875 if (!HexStringToInt64(parts[1], &stamp))
876 return false;
877 *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
878 }
879
880 if (out_pid) {
881 int64_t pid;
882 if (!HexStringToInt64(parts[2], &pid))
883 return false;
884 *out_pid = static_cast<ProcessId>(pid);
885 }
886
887 return true;
888}
889
890// static
scottmgb1316542016-09-17 04:39:10 +0900891void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
892 StringPiece name,
893 FilePath* out_base_path,
bcwhite4c3c7662017-05-25 05:52:36 +0900894 FilePath* out_active_path,
895 FilePath* out_spare_path) {
bcwhited0716d62017-07-05 00:48:00 +0900896 if (out_base_path)
Brian White76daaba2017-10-28 02:18:23 +0900897 *out_base_path = ConstructFilePath(dir, name);
bcwhited0716d62017-07-05 00:48:00 +0900898
scottmgb1316542016-09-17 04:39:10 +0900899 if (out_active_path) {
900 *out_active_path =
Brian White76daaba2017-10-28 02:18:23 +0900901 ConstructFilePath(dir, name.as_string().append("-active"));
scottmgb1316542016-09-17 04:39:10 +0900902 }
bcwhited0716d62017-07-05 00:48:00 +0900903
bcwhite4c3c7662017-05-25 05:52:36 +0900904 if (out_spare_path) {
Brian White76daaba2017-10-28 02:18:23 +0900905 *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare"));
bcwhited0716d62017-07-05 00:48:00 +0900906 }
907}
908
909// static
910void GlobalHistogramAllocator::ConstructFilePathsForUploadDir(
911 const FilePath& active_dir,
912 const FilePath& upload_dir,
913 const std::string& name,
914 FilePath* out_upload_path,
915 FilePath* out_active_path,
916 FilePath* out_spare_path) {
917 if (out_upload_path) {
Brian White76daaba2017-10-28 02:18:23 +0900918 *out_upload_path = ConstructFilePathForUploadDir(
919 upload_dir, name, Time::Now(), GetCurrentProcId());
bcwhited0716d62017-07-05 00:48:00 +0900920 }
921
922 if (out_active_path) {
923 *out_active_path =
Brian White76daaba2017-10-28 02:18:23 +0900924 ConstructFilePath(active_dir, name + std::string("-active"));
bcwhited0716d62017-07-05 00:48:00 +0900925 }
926
927 if (out_spare_path) {
928 *out_spare_path =
Brian White76daaba2017-10-28 02:18:23 +0900929 ConstructFilePath(active_dir, name + std::string("-spare"));
bcwhite4c3c7662017-05-25 05:52:36 +0900930 }
931}
932
933// static
934bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
935 size_t size) {
936 FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
937 bool success = true;
938 {
939 File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
940 File::FLAG_READ | File::FLAG_WRITE);
941 if (!spare_file.IsValid())
942 return false;
943
944 MemoryMappedFile mmfile;
Will Harrisfb540922017-11-10 15:19:10 +0900945 mmfile.Initialize(std::move(spare_file), {0, size},
bcwhite4c3c7662017-05-25 05:52:36 +0900946 MemoryMappedFile::READ_WRITE_EXTEND);
947 success = mmfile.IsValid();
948 }
949
950 if (success)
951 success = ReplaceFile(temp_spare_path, spare_path, nullptr);
952
953 if (!success)
954 DeleteFile(temp_spare_path, /*recursive=*/false);
955
956 return success;
957}
958
959// static
960bool GlobalHistogramAllocator::CreateSpareFileInDir(const FilePath& dir,
961 size_t size,
962 StringPiece name) {
963 FilePath spare_path;
964 ConstructFilePaths(dir, name, nullptr, nullptr, &spare_path);
965 return CreateSpareFile(spare_path, size);
scottmgb1316542016-09-17 04:39:10 +0900966}
967#endif // !defined(OS_NACL)
bcwhite84228822016-06-11 00:47:05 +0900968
bcwhite15d6aba2016-04-06 11:03:53 +0900969// static
bcwhite15d6aba2016-04-06 11:03:53 +0900970void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
971 const SharedMemoryHandle& handle,
972 size_t size) {
973 std::unique_ptr<SharedMemory> shm(
974 new SharedMemory(handle, /*readonly=*/false));
bcwhite55978042016-04-08 21:43:56 +0900975 if (!shm->Map(size) ||
976 !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
bcwhite15d6aba2016-04-06 11:03:53 +0900977 NOTREACHED();
978 return;
979 }
980
Jeremy Romancd0c4672017-08-17 08:27:24 +0900981 Set(WrapUnique(new GlobalHistogramAllocator(
982 std::make_unique<SharedPersistentMemoryAllocator>(
ricead95e71b2016-09-13 13:10:11 +0900983 std::move(shm), 0, StringPiece(), /*readonly=*/false))));
bcwhite15d6aba2016-04-06 11:03:53 +0900984}
985
986// static
987void GlobalHistogramAllocator::Set(
988 std::unique_ptr<GlobalHistogramAllocator> allocator) {
989 // Releasing or changing an allocator is extremely dangerous because it
990 // likely has histograms stored within it. If the backing memory is also
991 // also released, future accesses to those histograms will seg-fault.
Nico Weber9fbe1e52017-10-18 05:56:49 +0900992 CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
993 subtle::Release_Store(&g_histogram_allocator,
bcwhite1b498d12017-02-10 06:10:53 +0900994 reinterpret_cast<uintptr_t>(allocator.release()));
bcwhite15d6aba2016-04-06 11:03:53 +0900995 size_t existing = StatisticsRecorder::GetHistogramCount();
996
bcwhitef0b12082016-04-26 03:17:55 +0900997 DVLOG_IF(1, existing)
bcwhite15d6aba2016-04-06 11:03:53 +0900998 << existing << " histograms were created before persistence was enabled.";
999}
1000
1001// static
1002GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
bcwhite4229e152017-01-25 05:59:28 +09001003 return reinterpret_cast<GlobalHistogramAllocator*>(
Nico Weber9fbe1e52017-10-18 05:56:49 +09001004 subtle::Acquire_Load(&g_histogram_allocator));
bcwhite15d6aba2016-04-06 11:03:53 +09001005}
1006
1007// static
1008std::unique_ptr<GlobalHistogramAllocator>
1009GlobalHistogramAllocator::ReleaseForTesting() {
bcwhite4229e152017-01-25 05:59:28 +09001010 GlobalHistogramAllocator* histogram_allocator = Get();
bcwhite15d6aba2016-04-06 11:03:53 +09001011 if (!histogram_allocator)
1012 return nullptr;
1013 PersistentMemoryAllocator* memory_allocator =
1014 histogram_allocator->memory_allocator();
1015
1016 // Before releasing the memory, it's necessary to have the Statistics-
1017 // Recorder forget about the histograms contained therein; otherwise,
1018 // some operations will try to access them and the released memory.
bcwhite0c417bf2016-04-07 00:39:01 +09001019 PersistentMemoryAllocator::Iterator iter(memory_allocator);
bcwhite10574ca2017-01-11 21:42:13 +09001020 const PersistentHistogramData* data;
1021 while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
1022 StatisticsRecorder::ForgetHistogramForTesting(data->name);
bcwhite15d6aba2016-04-06 11:03:53 +09001023
bcwhite0c417bf2016-04-07 00:39:01 +09001024 // If a test breaks here then a memory region containing a histogram
1025 // actively used by this code is being released back to the test.
1026 // If that memory segment were to be deleted, future calls to create
1027 // persistent histograms would crash. To avoid this, have the test call
1028 // the method GetCreateHistogramResultHistogram() *before* setting
1029 // the (temporary) memory allocator via SetGlobalAllocator() so that
1030 // histogram is instead allocated from the process heap.
bcwhite10574ca2017-01-11 21:42:13 +09001031 DCHECK_NE(kResultHistogram, data->name);
bcwhite248583d2016-03-16 11:37:45 +09001032 }
bcwhite15d6aba2016-04-06 11:03:53 +09001033
Nico Weber9fbe1e52017-10-18 05:56:49 +09001034 subtle::Release_Store(&g_histogram_allocator, 0);
bcwhite15d6aba2016-04-06 11:03:53 +09001035 return WrapUnique(histogram_allocator);
1036};
1037
bcwhiteff70a392016-05-13 23:39:40 +09001038void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
1039 persistent_location_ = location;
1040}
1041
bcwhite3b20d0f2016-07-01 09:27:37 +09001042const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
1043 return persistent_location_;
1044}
1045
bcwhiteff70a392016-05-13 23:39:40 +09001046bool GlobalHistogramAllocator::WriteToPersistentLocation() {
bcwhiteff70a392016-05-13 23:39:40 +09001047#if defined(OS_NACL)
1048 // NACL doesn't support file operations, including ImportantFileWriter.
1049 NOTREACHED();
1050 return false;
1051#else
1052 // Stop if no destination is set.
1053 if (persistent_location_.empty()) {
1054 NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
1055 << " to file because no location was set.";
1056 return false;
1057 }
1058
1059 StringPiece contents(static_cast<const char*>(data()), used());
1060 if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
1061 contents)) {
1062 LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
1063 << " to file: " << persistent_location_.value();
1064 return false;
1065 }
1066
1067 return true;
1068#endif
1069}
1070
scottmgb1316542016-09-17 04:39:10 +09001071void GlobalHistogramAllocator::DeletePersistentLocation() {
bcwhite6bb66822017-03-17 03:35:24 +09001072 memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
1073
scottmgb1316542016-09-17 04:39:10 +09001074#if defined(OS_NACL)
1075 NOTREACHED();
1076#else
1077 if (persistent_location_.empty())
1078 return;
1079
1080 // Open (with delete) and then immediately close the file by going out of
1081 // scope. This is the only cross-platform safe way to delete a file that may
1082 // be open elsewhere. Open handles will continue to operate normally but
1083 // new opens will not be possible.
1084 File file(persistent_location_,
1085 File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
1086#endif
1087}
1088
bcwhite15d6aba2016-04-06 11:03:53 +09001089GlobalHistogramAllocator::GlobalHistogramAllocator(
1090 std::unique_ptr<PersistentMemoryAllocator> memory)
bcwhite0c417bf2016-04-07 00:39:01 +09001091 : PersistentHistogramAllocator(std::move(memory)),
bcwhite1f0e4102016-09-23 03:26:17 +09001092 import_iterator_(this) {
bcwhite1f0e4102016-09-23 03:26:17 +09001093}
bcwhite15d6aba2016-04-06 11:03:53 +09001094
1095void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
1096 // Skip the import if it's the histogram that was last created. Should a
1097 // race condition cause the "last created" to be overwritten before it
1098 // is recognized here then the histogram will be created and be ignored
1099 // when it is detected as a duplicate by the statistics-recorder. This
1100 // simple check reduces the time of creating persistent histograms by
1101 // about 40%.
1102 Reference record_to_ignore = last_created();
1103
bcwhite5841a602016-04-09 01:57:56 +09001104 // There is no lock on this because the iterator is lock-free while still
1105 // guaranteed to only return each entry only once. The StatisticsRecorder
1106 // has its own lock so the Register operation is safe.
bcwhite15d6aba2016-04-06 11:03:53 +09001107 while (true) {
1108 std::unique_ptr<HistogramBase> histogram =
bcwhite0c417bf2016-04-07 00:39:01 +09001109 import_iterator_.GetNextWithIgnore(record_to_ignore);
bcwhite15d6aba2016-04-06 11:03:53 +09001110 if (!histogram)
1111 break;
1112 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
1113 }
bcwhite248583d2016-03-16 11:37:45 +09001114}
1115
bcwhite248583d2016-03-16 11:37:45 +09001116} // namespace base