bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 1 | // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "base/metrics/persistent_histogram_allocator.h" |
| 6 | |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 7 | #include <memory> |
| 8 | |
bcwhite | 4229e15 | 2017-01-25 05:59:28 +0900 | [diff] [blame] | 9 | #include "base/atomicops.h" |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 10 | #include "base/files/file_path.h" |
| 11 | #include "base/files/file_util.h" |
bcwhite | ff70a39 | 2016-05-13 23:39:40 +0900 | [diff] [blame] | 12 | #include "base/files/important_file_writer.h" |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 13 | #include "base/files/memory_mapped_file.h" |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 14 | #include "base/lazy_instance.h" |
| 15 | #include "base/logging.h" |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 16 | #include "base/memory/ptr_util.h" |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 17 | #include "base/metrics/histogram.h" |
| 18 | #include "base/metrics/histogram_base.h" |
| 19 | #include "base/metrics/histogram_samples.h" |
Alexei Svitkine | a464326 | 2017-07-25 07:54:34 +0900 | [diff] [blame] | 20 | #include "base/metrics/metrics_hashes.h" |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 21 | #include "base/metrics/persistent_sample_map.h" |
bcwhite | 7efd510 | 2016-03-17 22:21:56 +0900 | [diff] [blame] | 22 | #include "base/metrics/sparse_histogram.h" |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 23 | #include "base/metrics/statistics_recorder.h" |
Brian White | ffa2eb3 | 2017-08-03 06:18:01 +0900 | [diff] [blame] | 24 | #include "base/numerics/safe_conversions.h" |
bcwhite | c78fc99 | 2016-06-03 13:59:44 +0900 | [diff] [blame] | 25 | #include "base/pickle.h" |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 26 | #include "base/process/process_handle.h" |
| 27 | #include "base/strings/string_number_conversions.h" |
| 28 | #include "base/strings/string_split.h" |
bcwhite | d0716d6 | 2017-07-05 00:48:00 +0900 | [diff] [blame] | 29 | #include "base/strings/stringprintf.h" |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 30 | #include "base/synchronization/lock.h" |
| 31 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 32 | namespace base { |
| 33 | |
| 34 | namespace { |
| 35 | |
| 36 | // Name of histogram for storing results of local operations. |
| 37 | const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result"; |
| 38 | |
| 39 | // Type identifiers used when storing in persistent memory so they can be |
| 40 | // identified during extraction; the first 4 bytes of the SHA1 of the name |
| 41 | // is used as a unique integer. A "version number" is added to the base |
| 42 | // so that, if the structure of that object changes, stored older versions |
| 43 | // will be safely ignored. |
| 44 | enum : uint32_t { |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 45 | kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 |
| 46 | kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 |
| 47 | }; |
| 48 | |
| 49 | // The current globally-active persistent allocator for all new histograms. |
| 50 | // The object held here will obviously not be destructed at process exit |
| 51 | // but that's best since PersistentMemoryAllocator objects (that underlie |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 52 | // GlobalHistogramAllocator objects) are explicitly forbidden from doing |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 53 | // anything essential at exit anyway due to the fact that they depend on data |
bcwhite | 4229e15 | 2017-01-25 05:59:28 +0900 | [diff] [blame] | 54 | // managed elsewhere and which could be destructed first. An AtomicWord is |
| 55 | // used instead of std::atomic because the latter can create global ctors |
| 56 | // and dtors. |
Nico Weber | 9fbe1e5 | 2017-10-18 05:56:49 +0900 | [diff] [blame] | 57 | subtle::AtomicWord g_histogram_allocator = 0; |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 58 | |
| 59 | // Take an array of range boundaries and create a proper BucketRanges object |
| 60 | // which is returned to the caller. A return of nullptr indicates that the |
| 61 | // passed boundaries are invalid. |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 62 | std::unique_ptr<BucketRanges> CreateRangesFromData( |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 63 | HistogramBase::Sample* ranges_data, |
| 64 | uint32_t ranges_checksum, |
| 65 | size_t count) { |
| 66 | // To avoid racy destruction at shutdown, the following may be leaked. |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 67 | std::unique_ptr<BucketRanges> ranges(new BucketRanges(count)); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 68 | DCHECK_EQ(count, ranges->size()); |
| 69 | for (size_t i = 0; i < count; ++i) { |
| 70 | if (i > 0 && ranges_data[i] <= ranges_data[i - 1]) |
| 71 | return nullptr; |
| 72 | ranges->set_range(i, ranges_data[i]); |
| 73 | } |
| 74 | |
| 75 | ranges->ResetChecksum(); |
| 76 | if (ranges->checksum() != ranges_checksum) |
| 77 | return nullptr; |
| 78 | |
| 79 | return ranges; |
| 80 | } |
| 81 | |
| 82 | // Calculate the number of bytes required to store all of a histogram's |
| 83 | // "counts". This will return zero (0) if |bucket_count| is not valid. |
| 84 | size_t CalculateRequiredCountsBytes(size_t bucket_count) { |
| 85 | // 2 because each "sample count" also requires a backup "logged count" |
| 86 | // used for calculating the delta during snapshot operations. |
bcwhite | 7efd510 | 2016-03-17 22:21:56 +0900 | [diff] [blame] | 87 | const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 88 | |
| 89 | // If the |bucket_count| is such that it would overflow the return type, |
| 90 | // perhaps as the result of a malicious actor, then return zero to |
| 91 | // indicate the problem to the caller. |
bcwhite | 7efd510 | 2016-03-17 22:21:56 +0900 | [diff] [blame] | 92 | if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket) |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 93 | return 0; |
| 94 | |
| 95 | return bucket_count * kBytesPerBucket; |
| 96 | } |
| 97 | |
| 98 | } // namespace |
| 99 | |
| 100 | const Feature kPersistentHistogramsFeature{ |
| 101 | "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT |
| 102 | }; |
| 103 | |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 104 | |
| 105 | PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager( |
| 106 | PersistentMemoryAllocator* allocator) |
| 107 | : allocator_(allocator), record_iterator_(allocator) {} |
| 108 | |
Chris Watkins | d155d9f | 2017-11-29 16:16:38 +0900 | [diff] [blame] | 109 | PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() = |
| 110 | default; |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 111 | |
| 112 | PersistentSampleMapRecords* |
| 113 | PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id, |
| 114 | const void* user) { |
| 115 | base::AutoLock auto_lock(lock_); |
| 116 | return GetSampleMapRecordsWhileLocked(id)->Acquire(user); |
| 117 | } |
| 118 | |
| 119 | PersistentSampleMapRecords* |
| 120 | PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked( |
| 121 | uint64_t id) { |
| 122 | lock_.AssertAcquired(); |
| 123 | |
| 124 | auto found = sample_records_.find(id); |
| 125 | if (found != sample_records_.end()) |
| 126 | return found->second.get(); |
| 127 | |
| 128 | std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id]; |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 129 | samples = std::make_unique<PersistentSampleMapRecords>(this, id); |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 130 | return samples.get(); |
| 131 | } |
| 132 | |
| 133 | bool PersistentSparseHistogramDataManager::LoadRecords( |
| 134 | PersistentSampleMapRecords* sample_map_records) { |
| 135 | // DataManager must be locked in order to access the found_ field of any |
| 136 | // PersistentSampleMapRecords object. |
| 137 | base::AutoLock auto_lock(lock_); |
| 138 | bool found = false; |
| 139 | |
| 140 | // If there are already "found" entries for the passed object, move them. |
| 141 | if (!sample_map_records->found_.empty()) { |
| 142 | sample_map_records->records_.reserve(sample_map_records->records_.size() + |
| 143 | sample_map_records->found_.size()); |
| 144 | sample_map_records->records_.insert(sample_map_records->records_.end(), |
| 145 | sample_map_records->found_.begin(), |
| 146 | sample_map_records->found_.end()); |
| 147 | sample_map_records->found_.clear(); |
| 148 | found = true; |
| 149 | } |
| 150 | |
| 151 | // Acquiring a lock is a semi-expensive operation so load some records with |
| 152 | // each call. More than this number may be loaded if it takes longer to |
| 153 | // find at least one matching record for the passed object. |
| 154 | const int kMinimumNumberToLoad = 10; |
| 155 | const uint64_t match_id = sample_map_records->sample_map_id_; |
| 156 | |
| 157 | // Loop while no enty is found OR we haven't yet loaded the minimum number. |
| 158 | // This will continue reading even after a match is found. |
| 159 | for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) { |
| 160 | // Get the next sample-record. The iterator will always resume from where |
| 161 | // it left off even if it previously had nothing further to return. |
| 162 | uint64_t found_id; |
| 163 | PersistentMemoryAllocator::Reference ref = |
| 164 | PersistentSampleMap::GetNextPersistentRecord(record_iterator_, |
| 165 | &found_id); |
| 166 | |
| 167 | // Stop immediately if there are none. |
| 168 | if (!ref) |
| 169 | break; |
| 170 | |
| 171 | // The sample-record could be for any sparse histogram. Add the reference |
| 172 | // to the appropriate collection for later use. |
| 173 | if (found_id == match_id) { |
| 174 | sample_map_records->records_.push_back(ref); |
| 175 | found = true; |
| 176 | } else { |
| 177 | PersistentSampleMapRecords* samples = |
| 178 | GetSampleMapRecordsWhileLocked(found_id); |
| 179 | DCHECK(samples); |
| 180 | samples->found_.push_back(ref); |
| 181 | } |
| 182 | } |
| 183 | |
| 184 | return found; |
| 185 | } |
| 186 | |
| 187 | |
| 188 | PersistentSampleMapRecords::PersistentSampleMapRecords( |
| 189 | PersistentSparseHistogramDataManager* data_manager, |
| 190 | uint64_t sample_map_id) |
| 191 | : data_manager_(data_manager), sample_map_id_(sample_map_id) {} |
| 192 | |
Chris Watkins | d155d9f | 2017-11-29 16:16:38 +0900 | [diff] [blame] | 193 | PersistentSampleMapRecords::~PersistentSampleMapRecords() = default; |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 194 | |
| 195 | PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire( |
| 196 | const void* user) { |
| 197 | DCHECK(!user_); |
| 198 | user_ = user; |
| 199 | seen_ = 0; |
| 200 | return this; |
| 201 | } |
| 202 | |
| 203 | void PersistentSampleMapRecords::Release(const void* user) { |
| 204 | DCHECK_EQ(user_, user); |
| 205 | user_ = nullptr; |
| 206 | } |
| 207 | |
| 208 | PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() { |
| 209 | DCHECK(user_); |
| 210 | |
| 211 | // If there are no unseen records, lock and swap in all the found ones. |
| 212 | if (records_.size() == seen_) { |
| 213 | if (!data_manager_->LoadRecords(this)) |
| 214 | return false; |
| 215 | } |
| 216 | |
| 217 | // Return the next record. Records *must* be returned in the same order |
| 218 | // they are found in the persistent memory in order to ensure that all |
| 219 | // objects using this data always have the same state. Race conditions |
| 220 | // can cause duplicate records so using the "first found" is the only |
| 221 | // guarantee that all objects always access the same one. |
| 222 | DCHECK_LT(seen_, records_.size()); |
| 223 | return records_[seen_++]; |
| 224 | } |
| 225 | |
| 226 | PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew( |
| 227 | HistogramBase::Sample value) { |
| 228 | return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_, |
| 229 | sample_map_id_, value); |
| 230 | } |
| 231 | |
| 232 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 233 | // This data will be held in persistent memory in order for processes to |
| 234 | // locate and use histograms created elsewhere. |
| 235 | struct PersistentHistogramAllocator::PersistentHistogramData { |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 236 | // SHA1(Histogram): Increment this if structure changes! |
| 237 | static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3; |
| 238 | |
piman | 064b27d | 2016-11-23 06:03:29 +0900 | [diff] [blame] | 239 | // Expected size for 32/64-bit check. |
| 240 | static constexpr size_t kExpectedInstanceSize = |
| 241 | 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize; |
| 242 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 243 | int32_t histogram_type; |
| 244 | int32_t flags; |
| 245 | int32_t minimum; |
| 246 | int32_t maximum; |
| 247 | uint32_t bucket_count; |
| 248 | PersistentMemoryAllocator::Reference ranges_ref; |
| 249 | uint32_t ranges_checksum; |
bcwhite | 4e70cfa | 2017-05-02 01:43:25 +0900 | [diff] [blame] | 250 | subtle::Atomic32 counts_ref; // PersistentMemoryAllocator::Reference |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 251 | HistogramSamples::Metadata samples_metadata; |
| 252 | HistogramSamples::Metadata logged_metadata; |
| 253 | |
| 254 | // Space for the histogram name will be added during the actual allocation |
| 255 | // request. This must be the last field of the structure. A zero-size array |
| 256 | // or a "flexible" array would be preferred but is not (yet) valid C++. |
piman | 064b27d | 2016-11-23 06:03:29 +0900 | [diff] [blame] | 257 | char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds. |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 258 | }; |
| 259 | |
bcwhite | 0c417bf | 2016-04-07 00:39:01 +0900 | [diff] [blame] | 260 | PersistentHistogramAllocator::Iterator::Iterator( |
| 261 | PersistentHistogramAllocator* allocator) |
| 262 | : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {} |
| 263 | |
| 264 | std::unique_ptr<HistogramBase> |
| 265 | PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) { |
| 266 | PersistentMemoryAllocator::Reference ref; |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 267 | while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) { |
bcwhite | 0c417bf | 2016-04-07 00:39:01 +0900 | [diff] [blame] | 268 | if (ref != ignore) |
| 269 | return allocator_->GetHistogram(ref); |
| 270 | } |
| 271 | return nullptr; |
| 272 | } |
| 273 | |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 274 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 275 | PersistentHistogramAllocator::PersistentHistogramAllocator( |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 276 | std::unique_ptr<PersistentMemoryAllocator> memory) |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 277 | : memory_allocator_(std::move(memory)), |
| 278 | sparse_histogram_data_manager_(memory_allocator_.get()) {} |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 279 | |
Chris Watkins | d155d9f | 2017-11-29 16:16:38 +0900 | [diff] [blame] | 280 | PersistentHistogramAllocator::~PersistentHistogramAllocator() = default; |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 281 | |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 282 | std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram( |
| 283 | Reference ref) { |
| 284 | // Unfortunately, the histogram "pickle" methods cannot be used as part of |
| 285 | // the persistance because the deserialization methods always create local |
| 286 | // count data (while these must reference the persistent counts) and always |
| 287 | // add it to the local list of known histograms (while these may be simple |
| 288 | // references to histograms in other processes). |
Alexei Svitkine | a464326 | 2017-07-25 07:54:34 +0900 | [diff] [blame] | 289 | PersistentHistogramData* data = |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 290 | memory_allocator_->GetAsObject<PersistentHistogramData>(ref); |
Alexei Svitkine | a464326 | 2017-07-25 07:54:34 +0900 | [diff] [blame] | 291 | const size_t length = memory_allocator_->GetAllocSize(ref); |
bcwhite | 197a48f | 2016-09-24 02:52:35 +0900 | [diff] [blame] | 292 | |
Alexei Svitkine | a464326 | 2017-07-25 07:54:34 +0900 | [diff] [blame] | 293 | // Check that metadata is reasonable: name is null-terminated and non-empty, |
bcwhite | 197a48f | 2016-09-24 02:52:35 +0900 | [diff] [blame] | 294 | // ID fields have been loaded with a hash of the name (0 is considered |
| 295 | // unset/invalid). |
Alexei Svitkine | a464326 | 2017-07-25 07:54:34 +0900 | [diff] [blame] | 296 | if (!data || data->name[0] == '\0' || |
| 297 | reinterpret_cast<char*>(data)[length - 1] != '\0' || |
| 298 | data->samples_metadata.id == 0 || data->logged_metadata.id == 0 || |
| 299 | // Note: Sparse histograms use |id + 1| in |logged_metadata|. |
| 300 | (data->logged_metadata.id != data->samples_metadata.id && |
| 301 | data->logged_metadata.id != data->samples_metadata.id + 1) || |
| 302 | // Most non-matching values happen due to truncated names. Ideally, we |
| 303 | // could just verify the name length based on the overall alloc length, |
| 304 | // but that doesn't work because the allocated block may have been |
| 305 | // aligned to the next boundary value. |
| 306 | HashMetricName(data->name) != data->samples_metadata.id) { |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 307 | RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA); |
| 308 | NOTREACHED(); |
| 309 | return nullptr; |
| 310 | } |
Alexei Svitkine | a464326 | 2017-07-25 07:54:34 +0900 | [diff] [blame] | 311 | return CreateHistogram(data); |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram( |
| 315 | HistogramType histogram_type, |
| 316 | const std::string& name, |
| 317 | int minimum, |
| 318 | int maximum, |
| 319 | const BucketRanges* bucket_ranges, |
| 320 | int32_t flags, |
| 321 | Reference* ref_ptr) { |
| 322 | // If the allocator is corrupt, don't waste time trying anything else. |
| 323 | // This also allows differentiating on the dashboard between allocations |
| 324 | // failed due to a corrupt allocator and the number of process instances |
| 325 | // with one, the latter being idicated by "newly corrupt", below. |
| 326 | if (memory_allocator_->IsCorrupt()) { |
| 327 | RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT); |
| 328 | return nullptr; |
| 329 | } |
| 330 | |
| 331 | // Create the metadata necessary for a persistent sparse histogram. This |
| 332 | // is done first because it is a small subset of what is required for |
bcwhite | 197a48f | 2016-09-24 02:52:35 +0900 | [diff] [blame] | 333 | // other histograms. The type is "under construction" so that a crash |
| 334 | // during the datafill doesn't leave a bad record around that could cause |
| 335 | // confusion by another process trying to read it. It will be corrected |
| 336 | // once histogram construction is complete. |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 337 | PersistentHistogramData* histogram_data = |
bcwhite | 18fd0b8 | 2017-02-10 05:44:23 +0900 | [diff] [blame] | 338 | memory_allocator_->New<PersistentHistogramData>( |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 339 | offsetof(PersistentHistogramData, name) + name.length() + 1); |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 340 | if (histogram_data) { |
| 341 | memcpy(histogram_data->name, name.c_str(), name.size() + 1); |
| 342 | histogram_data->histogram_type = histogram_type; |
| 343 | histogram_data->flags = flags | HistogramBase::kIsPersistent; |
| 344 | } |
| 345 | |
| 346 | // Create the remaining metadata necessary for regular histograms. |
| 347 | if (histogram_type != SPARSE_HISTOGRAM) { |
| 348 | size_t bucket_count = bucket_ranges->bucket_count(); |
| 349 | size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count); |
| 350 | if (counts_bytes == 0) { |
| 351 | // |bucket_count| was out-of-range. |
| 352 | NOTREACHED(); |
| 353 | return nullptr; |
| 354 | } |
| 355 | |
bcwhite | 83b01f1 | 2017-04-08 02:02:01 +0900 | [diff] [blame] | 356 | // Since the StasticsRecorder keeps a global collection of BucketRanges |
| 357 | // objects for re-use, it would be dangerous for one to hold a reference |
| 358 | // from a persistent allocator that is not the global one (which is |
| 359 | // permanent once set). If this stops being the case, this check can |
| 360 | // become an "if" condition beside "!ranges_ref" below and before |
| 361 | // set_persistent_reference() farther down. |
| 362 | DCHECK_EQ(this, GlobalHistogramAllocator::Get()); |
| 363 | |
| 364 | // Re-use an existing BucketRanges persistent allocation if one is known; |
| 365 | // otherwise, create one. |
| 366 | PersistentMemoryAllocator::Reference ranges_ref = |
| 367 | bucket_ranges->persistent_reference(); |
| 368 | if (!ranges_ref) { |
| 369 | size_t ranges_count = bucket_count + 1; |
| 370 | size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample); |
| 371 | ranges_ref = |
| 372 | memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray); |
| 373 | if (ranges_ref) { |
| 374 | HistogramBase::Sample* ranges_data = |
| 375 | memory_allocator_->GetAsArray<HistogramBase::Sample>( |
| 376 | ranges_ref, kTypeIdRangesArray, ranges_count); |
| 377 | if (ranges_data) { |
| 378 | for (size_t i = 0; i < bucket_ranges->size(); ++i) |
| 379 | ranges_data[i] = bucket_ranges->range(i); |
| 380 | bucket_ranges->set_persistent_reference(ranges_ref); |
| 381 | } else { |
| 382 | // This should never happen but be tolerant if it does. |
| 383 | NOTREACHED(); |
| 384 | ranges_ref = PersistentMemoryAllocator::kReferenceNull; |
| 385 | } |
| 386 | } |
| 387 | } else { |
| 388 | DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref)); |
| 389 | } |
| 390 | |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 391 | |
| 392 | // Only continue here if all allocations were successful. If they weren't, |
| 393 | // there is no way to free the space but that's not really a problem since |
| 394 | // the allocations only fail because the space is full or corrupt and so |
| 395 | // any future attempts will also fail. |
bcwhite | 4e70cfa | 2017-05-02 01:43:25 +0900 | [diff] [blame] | 396 | if (ranges_ref && histogram_data) { |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 397 | histogram_data->minimum = minimum; |
| 398 | histogram_data->maximum = maximum; |
| 399 | // |bucket_count| must fit within 32-bits or the allocation of the counts |
| 400 | // array would have failed for being too large; the allocator supports |
| 401 | // less than 4GB total size. |
| 402 | histogram_data->bucket_count = static_cast<uint32_t>(bucket_count); |
| 403 | histogram_data->ranges_ref = ranges_ref; |
| 404 | histogram_data->ranges_checksum = bucket_ranges->checksum(); |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 405 | } else { |
| 406 | histogram_data = nullptr; // Clear this for proper handling below. |
| 407 | } |
| 408 | } |
| 409 | |
| 410 | if (histogram_data) { |
| 411 | // Create the histogram using resources in persistent memory. This ends up |
| 412 | // resolving the "ref" values stored in histogram_data instad of just |
| 413 | // using what is already known above but avoids duplicating the switch |
| 414 | // statement here and serves as a double-check that everything is |
| 415 | // correct before commiting the new histogram to persistent space. |
| 416 | std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data); |
| 417 | DCHECK(histogram); |
bcwhite | 197a48f | 2016-09-24 02:52:35 +0900 | [diff] [blame] | 418 | DCHECK_NE(0U, histogram_data->samples_metadata.id); |
| 419 | DCHECK_NE(0U, histogram_data->logged_metadata.id); |
bcwhite | 197a48f | 2016-09-24 02:52:35 +0900 | [diff] [blame] | 420 | |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 421 | PersistentMemoryAllocator::Reference histogram_ref = |
| 422 | memory_allocator_->GetAsReference(histogram_data); |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 423 | if (ref_ptr != nullptr) |
| 424 | *ref_ptr = histogram_ref; |
| 425 | |
| 426 | // By storing the reference within the allocator to this histogram, the |
| 427 | // next import (which will happen before the next histogram creation) |
| 428 | // will know to skip it. |
| 429 | // See also the comment in ImportHistogramsToStatisticsRecorder(). |
| 430 | subtle::NoBarrier_Store(&last_created_, histogram_ref); |
| 431 | return histogram; |
| 432 | } |
| 433 | |
| 434 | CreateHistogramResultType result; |
| 435 | if (memory_allocator_->IsCorrupt()) { |
| 436 | RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT); |
| 437 | result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT; |
| 438 | } else if (memory_allocator_->IsFull()) { |
| 439 | result = CREATE_HISTOGRAM_ALLOCATOR_FULL; |
| 440 | } else { |
| 441 | result = CREATE_HISTOGRAM_ALLOCATOR_ERROR; |
| 442 | } |
| 443 | RecordCreateHistogramResult(result); |
bcwhite | c20889a | 2017-01-18 03:51:45 +0900 | [diff] [blame] | 444 | |
| 445 | // Crash for failures caused by internal bugs but not "full" which is |
| 446 | // dependent on outside code. |
| 447 | if (result != CREATE_HISTOGRAM_ALLOCATOR_FULL) |
| 448 | NOTREACHED() << memory_allocator_->Name() << ", error=" << result; |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 449 | |
| 450 | return nullptr; |
| 451 | } |
| 452 | |
| 453 | void PersistentHistogramAllocator::FinalizeHistogram(Reference ref, |
| 454 | bool registered) { |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 455 | if (registered) { |
| 456 | // If the created persistent histogram was registered then it needs to |
| 457 | // be marked as "iterable" in order to be found by other processes. This |
| 458 | // happens only after the histogram is fully formed so it's impossible for |
| 459 | // code iterating through the allocator to read a partially created record. |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 460 | memory_allocator_->MakeIterable(ref); |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 461 | } else { |
| 462 | // If it wasn't registered then a race condition must have caused two to |
| 463 | // be created. The allocator does not support releasing the acquired memory |
| 464 | // so just change the type to be empty. |
| 465 | memory_allocator_->ChangeType(ref, 0, |
bcwhite | 18fd0b8 | 2017-02-10 05:44:23 +0900 | [diff] [blame] | 466 | PersistentHistogramData::kPersistentTypeId, |
| 467 | /*clear=*/false); |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 468 | } |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder( |
| 472 | HistogramBase* histogram) { |
| 473 | DCHECK(histogram); |
| 474 | |
| 475 | HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram); |
| 476 | if (!existing) { |
| 477 | // The above should never fail but if it does, no real harm is done. |
| 478 | // The data won't be merged but it also won't be recorded as merged |
| 479 | // so a future try, if successful, will get what was missed. If it |
| 480 | // continues to fail, some metric data will be lost but that is better |
| 481 | // than crashing. |
| 482 | NOTREACHED(); |
| 483 | return; |
| 484 | } |
| 485 | |
| 486 | // Merge the delta from the passed object to the one in the SR. |
| 487 | existing->AddSamples(*histogram->SnapshotDelta()); |
| 488 | } |
| 489 | |
| 490 | void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder( |
| 491 | const HistogramBase* histogram) { |
| 492 | DCHECK(histogram); |
| 493 | |
| 494 | HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram); |
| 495 | if (!existing) { |
| 496 | // The above should never fail but if it does, no real harm is done. |
| 497 | // Some metric data will be lost but that is better than crashing. |
| 498 | NOTREACHED(); |
| 499 | return; |
| 500 | } |
| 501 | |
| 502 | // Merge the delta from the passed object to the one in the SR. |
| 503 | existing->AddSamples(*histogram->SnapshotFinalDelta()); |
| 504 | } |
| 505 | |
| 506 | PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords( |
| 507 | uint64_t id, |
| 508 | const void* user) { |
| 509 | return sparse_histogram_data_manager_.UseSampleMapRecords(id, user); |
| 510 | } |
| 511 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 512 | void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) { |
| 513 | memory_allocator_->CreateTrackingHistograms(name); |
| 514 | } |
| 515 | |
| 516 | void PersistentHistogramAllocator::UpdateTrackingHistograms() { |
| 517 | memory_allocator_->UpdateTrackingHistograms(); |
| 518 | } |
| 519 | |
bcwhite | 7c48071 | 2016-04-29 03:59:53 +0900 | [diff] [blame] | 520 | void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() { |
| 521 | subtle::NoBarrier_Store(&last_created_, 0); |
| 522 | } |
| 523 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 524 | // static |
| 525 | HistogramBase* |
| 526 | PersistentHistogramAllocator::GetCreateHistogramResultHistogram() { |
Brian White | da1f87e | 2017-08-15 08:24:21 +0900 | [diff] [blame] | 527 | // A value that can be stored in an AtomicWord as a flag. It must not be zero |
| 528 | // or a valid address. |
| 529 | constexpr subtle::AtomicWord kHistogramUnderConstruction = 1; |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 530 | |
Brian White | da1f87e | 2017-08-15 08:24:21 +0900 | [diff] [blame] | 531 | // This is a similar to LazyInstance but with return-if-under-construction |
| 532 | // rather than yielding the CPU until construction completes. This is |
| 533 | // necessary because the FactoryGet() below creates a histogram and thus |
| 534 | // recursively calls this method to try to store the result. |
| 535 | |
| 536 | // Get the existing pointer. If the "under construction" flag is present, |
| 537 | // abort now. It's okay to return null from this method. |
| 538 | static subtle::AtomicWord atomic_histogram_pointer = 0; |
| 539 | subtle::AtomicWord histogram_value = |
| 540 | subtle::Acquire_Load(&atomic_histogram_pointer); |
| 541 | if (histogram_value == kHistogramUnderConstruction) |
| 542 | return nullptr; |
| 543 | |
| 544 | // If a valid histogram pointer already exists, return it. |
| 545 | if (histogram_value) |
| 546 | return reinterpret_cast<HistogramBase*>(histogram_value); |
| 547 | |
| 548 | // Set the "under construction" flag; abort if something has changed. |
| 549 | if (subtle::NoBarrier_CompareAndSwap(&atomic_histogram_pointer, 0, |
| 550 | kHistogramUnderConstruction) != 0) { |
| 551 | return nullptr; |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 552 | } |
Brian White | da1f87e | 2017-08-15 08:24:21 +0900 | [diff] [blame] | 553 | |
| 554 | // Only one thread can be here. Even recursion will be thwarted above. |
| 555 | |
| 556 | if (GlobalHistogramAllocator::Get()) { |
| 557 | DVLOG(1) << "Creating the results-histogram inside persistent" |
| 558 | << " memory can cause future allocations to crash if" |
| 559 | << " that memory is ever released (for testing)."; |
| 560 | } |
| 561 | |
| 562 | HistogramBase* histogram_pointer = LinearHistogram::FactoryGet( |
| 563 | kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1, |
| 564 | HistogramBase::kUmaTargetedHistogramFlag); |
| 565 | subtle::Release_Store( |
| 566 | &atomic_histogram_pointer, |
| 567 | reinterpret_cast<subtle::AtomicWord>(histogram_pointer)); |
| 568 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 569 | return histogram_pointer; |
| 570 | } |
| 571 | |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 572 | std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram( |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 573 | PersistentHistogramData* histogram_data_ptr) { |
| 574 | if (!histogram_data_ptr) { |
| 575 | RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); |
| 576 | NOTREACHED(); |
| 577 | return nullptr; |
| 578 | } |
| 579 | |
bcwhite | 7efd510 | 2016-03-17 22:21:56 +0900 | [diff] [blame] | 580 | // Sparse histograms are quite different so handle them as a special case. |
| 581 | if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) { |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 582 | std::unique_ptr<HistogramBase> histogram = |
bcwhite | b4ea1e4 | 2016-04-18 10:33:10 +0900 | [diff] [blame] | 583 | SparseHistogram::PersistentCreate(this, histogram_data_ptr->name, |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 584 | &histogram_data_ptr->samples_metadata, |
| 585 | &histogram_data_ptr->logged_metadata); |
bcwhite | 7efd510 | 2016-03-17 22:21:56 +0900 | [diff] [blame] | 586 | DCHECK(histogram); |
| 587 | histogram->SetFlags(histogram_data_ptr->flags); |
| 588 | RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS); |
| 589 | return histogram; |
| 590 | } |
| 591 | |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 592 | // Copy the configuration fields from histogram_data_ptr to local storage |
| 593 | // because anything in persistent memory cannot be trusted as it could be |
| 594 | // changed at any moment by a malicious actor that shares access. The local |
| 595 | // values are validated below and then used to create the histogram, knowing |
| 596 | // they haven't changed between validation and use. |
| 597 | int32_t histogram_type = histogram_data_ptr->histogram_type; |
| 598 | int32_t histogram_flags = histogram_data_ptr->flags; |
| 599 | int32_t histogram_minimum = histogram_data_ptr->minimum; |
| 600 | int32_t histogram_maximum = histogram_data_ptr->maximum; |
| 601 | uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count; |
| 602 | uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref; |
| 603 | uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum; |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 604 | |
| 605 | HistogramBase::Sample* ranges_data = |
piman | 064b27d | 2016-11-23 06:03:29 +0900 | [diff] [blame] | 606 | memory_allocator_->GetAsArray<HistogramBase::Sample>( |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 607 | histogram_ranges_ref, kTypeIdRangesArray, |
piman | 064b27d | 2016-11-23 06:03:29 +0900 | [diff] [blame] | 608 | PersistentMemoryAllocator::kSizeAny); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 609 | |
| 610 | const uint32_t max_buckets = |
| 611 | std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample); |
| 612 | size_t required_bytes = |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 613 | (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 614 | size_t allocated_bytes = |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 615 | memory_allocator_->GetAllocSize(histogram_ranges_ref); |
| 616 | if (!ranges_data || histogram_bucket_count < 2 || |
| 617 | histogram_bucket_count >= max_buckets || |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 618 | allocated_bytes < required_bytes) { |
| 619 | RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); |
| 620 | NOTREACHED(); |
| 621 | return nullptr; |
| 622 | } |
| 623 | |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 624 | std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData( |
| 625 | ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 626 | if (!created_ranges) { |
| 627 | RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); |
| 628 | NOTREACHED(); |
| 629 | return nullptr; |
| 630 | } |
| 631 | const BucketRanges* ranges = |
| 632 | StatisticsRecorder::RegisterOrDeleteDuplicateRanges( |
| 633 | created_ranges.release()); |
| 634 | |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 635 | size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count); |
bcwhite | 4e70cfa | 2017-05-02 01:43:25 +0900 | [diff] [blame] | 636 | PersistentMemoryAllocator::Reference counts_ref = |
bcwhite | ea4b661 | 2017-05-16 01:43:29 +0900 | [diff] [blame] | 637 | subtle::Acquire_Load(&histogram_data_ptr->counts_ref); |
bcwhite | 4e70cfa | 2017-05-02 01:43:25 +0900 | [diff] [blame] | 638 | if (counts_bytes == 0 || |
| 639 | (counts_ref != 0 && |
| 640 | memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) { |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 641 | RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); |
| 642 | NOTREACHED(); |
| 643 | return nullptr; |
| 644 | } |
| 645 | |
bcwhite | 4e70cfa | 2017-05-02 01:43:25 +0900 | [diff] [blame] | 646 | // The "counts" data (including both samples and logged samples) is a delayed |
| 647 | // persistent allocation meaning that though its size and storage for a |
| 648 | // reference is defined, no space is reserved until actually needed. When |
| 649 | // it is needed, memory will be allocated from the persistent segment and |
| 650 | // a reference to it stored at the passed address. Other threads can then |
| 651 | // notice the valid reference and access the same data. |
| 652 | DelayedPersistentAllocation counts_data(memory_allocator_.get(), |
| 653 | &histogram_data_ptr->counts_ref, |
| 654 | kTypeIdCountsArray, counts_bytes, 0); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 655 | |
bcwhite | 4e70cfa | 2017-05-02 01:43:25 +0900 | [diff] [blame] | 656 | // A second delayed allocations is defined using the same reference storage |
| 657 | // location as the first so the allocation of one will automatically be found |
| 658 | // by the other. Within the block, the first half of the space is for "counts" |
| 659 | // and the second half is for "logged counts". |
| 660 | DelayedPersistentAllocation logged_data( |
| 661 | memory_allocator_.get(), &histogram_data_ptr->counts_ref, |
| 662 | kTypeIdCountsArray, counts_bytes, counts_bytes / 2, |
| 663 | /*make_iterable=*/false); |
| 664 | |
| 665 | // Create the right type of histogram. |
Brian White | ca72b43 | 2017-11-03 23:46:42 +0900 | [diff] [blame] | 666 | const char* name = histogram_data_ptr->name; |
dcheng | cc8e4d8 | 2016-04-05 06:25:51 +0900 | [diff] [blame] | 667 | std::unique_ptr<HistogramBase> histogram; |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 668 | switch (histogram_type) { |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 669 | case HISTOGRAM: |
| 670 | histogram = Histogram::PersistentCreate( |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 671 | name, histogram_minimum, histogram_maximum, ranges, counts_data, |
| 672 | logged_data, &histogram_data_ptr->samples_metadata, |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 673 | &histogram_data_ptr->logged_metadata); |
| 674 | DCHECK(histogram); |
| 675 | break; |
| 676 | case LINEAR_HISTOGRAM: |
| 677 | histogram = LinearHistogram::PersistentCreate( |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 678 | name, histogram_minimum, histogram_maximum, ranges, counts_data, |
| 679 | logged_data, &histogram_data_ptr->samples_metadata, |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 680 | &histogram_data_ptr->logged_metadata); |
| 681 | DCHECK(histogram); |
| 682 | break; |
| 683 | case BOOLEAN_HISTOGRAM: |
| 684 | histogram = BooleanHistogram::PersistentCreate( |
| 685 | name, ranges, counts_data, logged_data, |
| 686 | &histogram_data_ptr->samples_metadata, |
| 687 | &histogram_data_ptr->logged_metadata); |
| 688 | DCHECK(histogram); |
| 689 | break; |
| 690 | case CUSTOM_HISTOGRAM: |
| 691 | histogram = CustomHistogram::PersistentCreate( |
bcwhite | 4e70cfa | 2017-05-02 01:43:25 +0900 | [diff] [blame] | 692 | name, ranges, counts_data, logged_data, |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 693 | &histogram_data_ptr->samples_metadata, |
| 694 | &histogram_data_ptr->logged_metadata); |
| 695 | DCHECK(histogram); |
| 696 | break; |
| 697 | default: |
| 698 | NOTREACHED(); |
| 699 | } |
| 700 | |
| 701 | if (histogram) { |
bcwhite | 11c7384 | 2017-05-12 09:30:18 +0900 | [diff] [blame] | 702 | DCHECK_EQ(histogram_type, histogram->GetHistogramType()); |
| 703 | histogram->SetFlags(histogram_flags); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 704 | RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS); |
| 705 | } else { |
| 706 | RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE); |
| 707 | } |
| 708 | |
| 709 | return histogram; |
| 710 | } |
| 711 | |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 712 | HistogramBase* |
| 713 | PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram( |
| 714 | const HistogramBase* histogram) { |
| 715 | // This should never be called on the global histogram allocator as objects |
| 716 | // created there are already within the global statistics recorder. |
bcwhite | 4229e15 | 2017-01-25 05:59:28 +0900 | [diff] [blame] | 717 | DCHECK_NE(GlobalHistogramAllocator::Get(), this); |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 718 | DCHECK(histogram); |
| 719 | |
| 720 | HistogramBase* existing = |
| 721 | StatisticsRecorder::FindHistogram(histogram->histogram_name()); |
| 722 | if (existing) |
| 723 | return existing; |
| 724 | |
| 725 | // Adding the passed histogram to the SR would cause a problem if the |
| 726 | // allocator that holds it eventually goes away. Instead, create a new |
bcwhite | a9d99b0 | 2017-01-21 02:44:32 +0900 | [diff] [blame] | 727 | // one from a serialized version. Deserialization calls the appropriate |
| 728 | // FactoryGet() which will create the histogram in the global persistent- |
| 729 | // histogram allocator if such is set. |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 730 | base::Pickle pickle; |
Daniel Cheng | 51215ca | 2017-09-22 14:05:07 +0900 | [diff] [blame] | 731 | histogram->SerializeInfo(&pickle); |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 732 | PickleIterator iter(pickle); |
| 733 | existing = DeserializeHistogramInfo(&iter); |
| 734 | if (!existing) |
| 735 | return nullptr; |
| 736 | |
| 737 | // Make sure there is no "serialization" flag set. |
| 738 | DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag); |
| 739 | // Record the newly created histogram in the SR. |
| 740 | return StatisticsRecorder::RegisterOrDeleteDuplicate(existing); |
| 741 | } |
| 742 | |
bcwhite | 09dac60 | 2016-07-13 01:41:16 +0900 | [diff] [blame] | 743 | // static |
| 744 | void PersistentHistogramAllocator::RecordCreateHistogramResult( |
| 745 | CreateHistogramResultType result) { |
| 746 | HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); |
| 747 | if (result_histogram) |
| 748 | result_histogram->Add(result); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 749 | } |
| 750 | |
Chris Watkins | d155d9f | 2017-11-29 16:16:38 +0900 | [diff] [blame] | 751 | GlobalHistogramAllocator::~GlobalHistogramAllocator() = default; |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 752 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 753 | // static |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 754 | void GlobalHistogramAllocator::CreateWithPersistentMemory( |
| 755 | void* base, |
| 756 | size_t size, |
| 757 | size_t page_size, |
| 758 | uint64_t id, |
| 759 | StringPiece name) { |
ricea | d95e71b | 2016-09-13 13:10:11 +0900 | [diff] [blame] | 760 | Set(WrapUnique( |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 761 | new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>( |
ricea | d95e71b | 2016-09-13 13:10:11 +0900 | [diff] [blame] | 762 | base, size, page_size, id, name, false)))); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 763 | } |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 764 | |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 765 | // static |
| 766 | void GlobalHistogramAllocator::CreateWithLocalMemory( |
| 767 | size_t size, |
| 768 | uint64_t id, |
| 769 | StringPiece name) { |
| 770 | Set(WrapUnique(new GlobalHistogramAllocator( |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 771 | std::make_unique<LocalPersistentMemoryAllocator>(size, id, name)))); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 772 | } |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 773 | |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 774 | #if !defined(OS_NACL) |
| 775 | // static |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 776 | bool GlobalHistogramAllocator::CreateWithFile( |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 777 | const FilePath& file_path, |
| 778 | size_t size, |
| 779 | uint64_t id, |
| 780 | StringPiece name) { |
| 781 | bool exists = PathExists(file_path); |
| 782 | File file( |
| 783 | file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE | |
| 784 | File::FLAG_READ | File::FLAG_WRITE); |
| 785 | |
| 786 | std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile()); |
| 787 | if (exists) { |
Brian White | ffa2eb3 | 2017-08-03 06:18:01 +0900 | [diff] [blame] | 788 | size = saturated_cast<size_t>(file.GetLength()); |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 789 | mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE); |
| 790 | } else { |
Will Harris | fb54092 | 2017-11-10 15:19:10 +0900 | [diff] [blame] | 791 | mmfile->Initialize(std::move(file), {0, size}, |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 792 | MemoryMappedFile::READ_WRITE_EXTEND); |
| 793 | } |
| 794 | if (!mmfile->IsValid() || |
| 795 | !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) { |
Brett Wilson | ae98eb1 | 2017-07-25 06:06:05 +0900 | [diff] [blame] | 796 | NOTREACHED() << file_path; |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 797 | return false; |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 798 | } |
| 799 | |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 800 | Set(WrapUnique(new GlobalHistogramAllocator( |
| 801 | std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), size, |
| 802 | id, name, false)))); |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 803 | Get()->SetPersistentLocation(file_path); |
| 804 | return true; |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 805 | } |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 806 | |
| 807 | // static |
| 808 | bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path, |
| 809 | const FilePath& active_path, |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 810 | const FilePath& spare_path, |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 811 | size_t size, |
| 812 | uint64_t id, |
| 813 | StringPiece name) { |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 814 | // Old "active" becomes "base". |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 815 | if (!base::ReplaceFile(active_path, base_path, nullptr)) |
| 816 | base::DeleteFile(base_path, /*recursive=*/false); |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 817 | DCHECK(!base::PathExists(active_path)); |
| 818 | |
| 819 | // Move any "spare" into "active". Okay to continue if file doesn't exist. |
| 820 | if (!spare_path.empty()) { |
| 821 | base::ReplaceFile(spare_path, active_path, nullptr); |
| 822 | DCHECK(!base::PathExists(spare_path)); |
| 823 | } |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 824 | |
| 825 | return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id, |
| 826 | name); |
| 827 | } |
| 828 | |
| 829 | // static |
| 830 | bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir, |
| 831 | size_t size, |
| 832 | uint64_t id, |
| 833 | StringPiece name) { |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 834 | FilePath base_path, active_path, spare_path; |
| 835 | ConstructFilePaths(dir, name, &base_path, &active_path, &spare_path); |
| 836 | return CreateWithActiveFile(base_path, active_path, spare_path, size, id, |
| 837 | name); |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 838 | } |
| 839 | |
| 840 | // static |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 841 | FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir, |
| 842 | StringPiece name) { |
| 843 | return dir.AppendASCII(name).AddExtension( |
| 844 | PersistentMemoryAllocator::kFileExtension); |
| 845 | } |
| 846 | |
| 847 | // static |
| 848 | FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir( |
| 849 | const FilePath& dir, |
| 850 | StringPiece name, |
| 851 | base::Time stamp, |
| 852 | ProcessId pid) { |
| 853 | return ConstructFilePath( |
| 854 | dir, |
| 855 | StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(), |
| 856 | static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid))); |
| 857 | } |
| 858 | |
| 859 | // static |
| 860 | bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path, |
| 861 | std::string* out_name, |
| 862 | Time* out_stamp, |
| 863 | ProcessId* out_pid) { |
| 864 | std::string filename = path.BaseName().AsUTF8Unsafe(); |
| 865 | std::vector<base::StringPiece> parts = base::SplitStringPiece( |
| 866 | filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL); |
| 867 | if (parts.size() != 4) |
| 868 | return false; |
| 869 | |
| 870 | if (out_name) |
| 871 | *out_name = parts[0].as_string(); |
| 872 | |
| 873 | if (out_stamp) { |
| 874 | int64_t stamp; |
| 875 | if (!HexStringToInt64(parts[1], &stamp)) |
| 876 | return false; |
| 877 | *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp)); |
| 878 | } |
| 879 | |
| 880 | if (out_pid) { |
| 881 | int64_t pid; |
| 882 | if (!HexStringToInt64(parts[2], &pid)) |
| 883 | return false; |
| 884 | *out_pid = static_cast<ProcessId>(pid); |
| 885 | } |
| 886 | |
| 887 | return true; |
| 888 | } |
| 889 | |
| 890 | // static |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 891 | void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir, |
| 892 | StringPiece name, |
| 893 | FilePath* out_base_path, |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 894 | FilePath* out_active_path, |
| 895 | FilePath* out_spare_path) { |
bcwhite | d0716d6 | 2017-07-05 00:48:00 +0900 | [diff] [blame] | 896 | if (out_base_path) |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 897 | *out_base_path = ConstructFilePath(dir, name); |
bcwhite | d0716d6 | 2017-07-05 00:48:00 +0900 | [diff] [blame] | 898 | |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 899 | if (out_active_path) { |
| 900 | *out_active_path = |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 901 | ConstructFilePath(dir, name.as_string().append("-active")); |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 902 | } |
bcwhite | d0716d6 | 2017-07-05 00:48:00 +0900 | [diff] [blame] | 903 | |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 904 | if (out_spare_path) { |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 905 | *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare")); |
bcwhite | d0716d6 | 2017-07-05 00:48:00 +0900 | [diff] [blame] | 906 | } |
| 907 | } |
| 908 | |
| 909 | // static |
| 910 | void GlobalHistogramAllocator::ConstructFilePathsForUploadDir( |
| 911 | const FilePath& active_dir, |
| 912 | const FilePath& upload_dir, |
| 913 | const std::string& name, |
| 914 | FilePath* out_upload_path, |
| 915 | FilePath* out_active_path, |
| 916 | FilePath* out_spare_path) { |
| 917 | if (out_upload_path) { |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 918 | *out_upload_path = ConstructFilePathForUploadDir( |
| 919 | upload_dir, name, Time::Now(), GetCurrentProcId()); |
bcwhite | d0716d6 | 2017-07-05 00:48:00 +0900 | [diff] [blame] | 920 | } |
| 921 | |
| 922 | if (out_active_path) { |
| 923 | *out_active_path = |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 924 | ConstructFilePath(active_dir, name + std::string("-active")); |
bcwhite | d0716d6 | 2017-07-05 00:48:00 +0900 | [diff] [blame] | 925 | } |
| 926 | |
| 927 | if (out_spare_path) { |
| 928 | *out_spare_path = |
Brian White | 76daaba | 2017-10-28 02:18:23 +0900 | [diff] [blame] | 929 | ConstructFilePath(active_dir, name + std::string("-spare")); |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 930 | } |
| 931 | } |
| 932 | |
| 933 | // static |
| 934 | bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path, |
| 935 | size_t size) { |
| 936 | FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp")); |
| 937 | bool success = true; |
| 938 | { |
| 939 | File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS | |
| 940 | File::FLAG_READ | File::FLAG_WRITE); |
| 941 | if (!spare_file.IsValid()) |
| 942 | return false; |
| 943 | |
| 944 | MemoryMappedFile mmfile; |
Will Harris | fb54092 | 2017-11-10 15:19:10 +0900 | [diff] [blame] | 945 | mmfile.Initialize(std::move(spare_file), {0, size}, |
bcwhite | 4c3c766 | 2017-05-25 05:52:36 +0900 | [diff] [blame] | 946 | MemoryMappedFile::READ_WRITE_EXTEND); |
| 947 | success = mmfile.IsValid(); |
| 948 | } |
| 949 | |
| 950 | if (success) |
| 951 | success = ReplaceFile(temp_spare_path, spare_path, nullptr); |
| 952 | |
| 953 | if (!success) |
| 954 | DeleteFile(temp_spare_path, /*recursive=*/false); |
| 955 | |
| 956 | return success; |
| 957 | } |
| 958 | |
| 959 | // static |
| 960 | bool GlobalHistogramAllocator::CreateSpareFileInDir(const FilePath& dir, |
| 961 | size_t size, |
| 962 | StringPiece name) { |
| 963 | FilePath spare_path; |
| 964 | ConstructFilePaths(dir, name, nullptr, nullptr, &spare_path); |
| 965 | return CreateSpareFile(spare_path, size); |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 966 | } |
| 967 | #endif // !defined(OS_NACL) |
bcwhite | 8422882 | 2016-06-11 00:47:05 +0900 | [diff] [blame] | 968 | |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 969 | // static |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 970 | void GlobalHistogramAllocator::CreateWithSharedMemoryHandle( |
| 971 | const SharedMemoryHandle& handle, |
| 972 | size_t size) { |
| 973 | std::unique_ptr<SharedMemory> shm( |
| 974 | new SharedMemory(handle, /*readonly=*/false)); |
bcwhite | 5597804 | 2016-04-08 21:43:56 +0900 | [diff] [blame] | 975 | if (!shm->Map(size) || |
| 976 | !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) { |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 977 | NOTREACHED(); |
| 978 | return; |
| 979 | } |
| 980 | |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 981 | Set(WrapUnique(new GlobalHistogramAllocator( |
| 982 | std::make_unique<SharedPersistentMemoryAllocator>( |
ricea | d95e71b | 2016-09-13 13:10:11 +0900 | [diff] [blame] | 983 | std::move(shm), 0, StringPiece(), /*readonly=*/false)))); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 984 | } |
| 985 | |
| 986 | // static |
| 987 | void GlobalHistogramAllocator::Set( |
| 988 | std::unique_ptr<GlobalHistogramAllocator> allocator) { |
| 989 | // Releasing or changing an allocator is extremely dangerous because it |
| 990 | // likely has histograms stored within it. If the backing memory is also |
| 991 | // also released, future accesses to those histograms will seg-fault. |
Nico Weber | 9fbe1e5 | 2017-10-18 05:56:49 +0900 | [diff] [blame] | 992 | CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator)); |
| 993 | subtle::Release_Store(&g_histogram_allocator, |
bcwhite | 1b498d1 | 2017-02-10 06:10:53 +0900 | [diff] [blame] | 994 | reinterpret_cast<uintptr_t>(allocator.release())); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 995 | size_t existing = StatisticsRecorder::GetHistogramCount(); |
| 996 | |
bcwhite | f0b1208 | 2016-04-26 03:17:55 +0900 | [diff] [blame] | 997 | DVLOG_IF(1, existing) |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 998 | << existing << " histograms were created before persistence was enabled."; |
| 999 | } |
| 1000 | |
| 1001 | // static |
| 1002 | GlobalHistogramAllocator* GlobalHistogramAllocator::Get() { |
bcwhite | 4229e15 | 2017-01-25 05:59:28 +0900 | [diff] [blame] | 1003 | return reinterpret_cast<GlobalHistogramAllocator*>( |
Nico Weber | 9fbe1e5 | 2017-10-18 05:56:49 +0900 | [diff] [blame] | 1004 | subtle::Acquire_Load(&g_histogram_allocator)); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1005 | } |
| 1006 | |
| 1007 | // static |
| 1008 | std::unique_ptr<GlobalHistogramAllocator> |
| 1009 | GlobalHistogramAllocator::ReleaseForTesting() { |
bcwhite | 4229e15 | 2017-01-25 05:59:28 +0900 | [diff] [blame] | 1010 | GlobalHistogramAllocator* histogram_allocator = Get(); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1011 | if (!histogram_allocator) |
| 1012 | return nullptr; |
| 1013 | PersistentMemoryAllocator* memory_allocator = |
| 1014 | histogram_allocator->memory_allocator(); |
| 1015 | |
| 1016 | // Before releasing the memory, it's necessary to have the Statistics- |
| 1017 | // Recorder forget about the histograms contained therein; otherwise, |
| 1018 | // some operations will try to access them and the released memory. |
bcwhite | 0c417bf | 2016-04-07 00:39:01 +0900 | [diff] [blame] | 1019 | PersistentMemoryAllocator::Iterator iter(memory_allocator); |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 1020 | const PersistentHistogramData* data; |
| 1021 | while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) { |
| 1022 | StatisticsRecorder::ForgetHistogramForTesting(data->name); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1023 | |
bcwhite | 0c417bf | 2016-04-07 00:39:01 +0900 | [diff] [blame] | 1024 | // If a test breaks here then a memory region containing a histogram |
| 1025 | // actively used by this code is being released back to the test. |
| 1026 | // If that memory segment were to be deleted, future calls to create |
| 1027 | // persistent histograms would crash. To avoid this, have the test call |
| 1028 | // the method GetCreateHistogramResultHistogram() *before* setting |
| 1029 | // the (temporary) memory allocator via SetGlobalAllocator() so that |
| 1030 | // histogram is instead allocated from the process heap. |
bcwhite | 10574ca | 2017-01-11 21:42:13 +0900 | [diff] [blame] | 1031 | DCHECK_NE(kResultHistogram, data->name); |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 1032 | } |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1033 | |
Nico Weber | 9fbe1e5 | 2017-10-18 05:56:49 +0900 | [diff] [blame] | 1034 | subtle::Release_Store(&g_histogram_allocator, 0); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1035 | return WrapUnique(histogram_allocator); |
| 1036 | }; |
| 1037 | |
bcwhite | ff70a39 | 2016-05-13 23:39:40 +0900 | [diff] [blame] | 1038 | void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) { |
| 1039 | persistent_location_ = location; |
| 1040 | } |
| 1041 | |
bcwhite | 3b20d0f | 2016-07-01 09:27:37 +0900 | [diff] [blame] | 1042 | const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const { |
| 1043 | return persistent_location_; |
| 1044 | } |
| 1045 | |
bcwhite | ff70a39 | 2016-05-13 23:39:40 +0900 | [diff] [blame] | 1046 | bool GlobalHistogramAllocator::WriteToPersistentLocation() { |
bcwhite | ff70a39 | 2016-05-13 23:39:40 +0900 | [diff] [blame] | 1047 | #if defined(OS_NACL) |
| 1048 | // NACL doesn't support file operations, including ImportantFileWriter. |
| 1049 | NOTREACHED(); |
| 1050 | return false; |
| 1051 | #else |
| 1052 | // Stop if no destination is set. |
| 1053 | if (persistent_location_.empty()) { |
| 1054 | NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms" |
| 1055 | << " to file because no location was set."; |
| 1056 | return false; |
| 1057 | } |
| 1058 | |
| 1059 | StringPiece contents(static_cast<const char*>(data()), used()); |
| 1060 | if (!ImportantFileWriter::WriteFileAtomically(persistent_location_, |
| 1061 | contents)) { |
| 1062 | LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms" |
| 1063 | << " to file: " << persistent_location_.value(); |
| 1064 | return false; |
| 1065 | } |
| 1066 | |
| 1067 | return true; |
| 1068 | #endif |
| 1069 | } |
| 1070 | |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 1071 | void GlobalHistogramAllocator::DeletePersistentLocation() { |
bcwhite | 6bb6682 | 2017-03-17 03:35:24 +0900 | [diff] [blame] | 1072 | memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED); |
| 1073 | |
scottmg | b131654 | 2016-09-17 04:39:10 +0900 | [diff] [blame] | 1074 | #if defined(OS_NACL) |
| 1075 | NOTREACHED(); |
| 1076 | #else |
| 1077 | if (persistent_location_.empty()) |
| 1078 | return; |
| 1079 | |
| 1080 | // Open (with delete) and then immediately close the file by going out of |
| 1081 | // scope. This is the only cross-platform safe way to delete a file that may |
| 1082 | // be open elsewhere. Open handles will continue to operate normally but |
| 1083 | // new opens will not be possible. |
| 1084 | File file(persistent_location_, |
| 1085 | File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE); |
| 1086 | #endif |
| 1087 | } |
| 1088 | |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1089 | GlobalHistogramAllocator::GlobalHistogramAllocator( |
| 1090 | std::unique_ptr<PersistentMemoryAllocator> memory) |
bcwhite | 0c417bf | 2016-04-07 00:39:01 +0900 | [diff] [blame] | 1091 | : PersistentHistogramAllocator(std::move(memory)), |
bcwhite | 1f0e410 | 2016-09-23 03:26:17 +0900 | [diff] [blame] | 1092 | import_iterator_(this) { |
bcwhite | 1f0e410 | 2016-09-23 03:26:17 +0900 | [diff] [blame] | 1093 | } |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1094 | |
| 1095 | void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() { |
| 1096 | // Skip the import if it's the histogram that was last created. Should a |
| 1097 | // race condition cause the "last created" to be overwritten before it |
| 1098 | // is recognized here then the histogram will be created and be ignored |
| 1099 | // when it is detected as a duplicate by the statistics-recorder. This |
| 1100 | // simple check reduces the time of creating persistent histograms by |
| 1101 | // about 40%. |
| 1102 | Reference record_to_ignore = last_created(); |
| 1103 | |
bcwhite | 5841a60 | 2016-04-09 01:57:56 +0900 | [diff] [blame] | 1104 | // There is no lock on this because the iterator is lock-free while still |
| 1105 | // guaranteed to only return each entry only once. The StatisticsRecorder |
| 1106 | // has its own lock so the Register operation is safe. |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1107 | while (true) { |
| 1108 | std::unique_ptr<HistogramBase> histogram = |
bcwhite | 0c417bf | 2016-04-07 00:39:01 +0900 | [diff] [blame] | 1109 | import_iterator_.GetNextWithIgnore(record_to_ignore); |
bcwhite | 15d6aba | 2016-04-06 11:03:53 +0900 | [diff] [blame] | 1110 | if (!histogram) |
| 1111 | break; |
| 1112 | StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release()); |
| 1113 | } |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 1114 | } |
| 1115 | |
bcwhite | 248583d | 2016-03-16 11:37:45 +0900 | [diff] [blame] | 1116 | } // namespace base |