Update V8 to version 4.1.0.21
This is a cherry-pick of all commits up to and including the
4.1.0.21 cherry-pick in Chromium.
Original commit message:
Version 4.1.0.21 (cherry-pick)
Merged 206e9136bde0f2b5ae8cb77afbb1e7833e5bd412
Unlink pages from the space page list after evacuation.
BUG=430201
LOG=N
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/953813002
Cr-Commit-Position: refs/branch-heads/4.1@{#22}
Cr-Branched-From: 2e08d2a7aa9d65d269d8c57aba82eb38a8cb0a18-refs/heads/candidates@{#25353}
---
FPIIM-449
Change-Id: I8c23c7bbb70772b4858fe8a47b64fa97ee0d1f8c
diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc
new file mode 100644
index 0000000..179988d
--- /dev/null
+++ b/src/compiler/zone-pool.cc
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/zone-pool.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ZonePool::StatsScope::StatsScope(ZonePool* zone_pool)
+ : zone_pool_(zone_pool),
+ total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
+ max_allocated_bytes_(0) {
+ zone_pool_->stats_.push_back(this);
+ for (auto zone : zone_pool_->used_) {
+ size_t size = static_cast<size_t>(zone->allocation_size());
+ std::pair<InitialValues::iterator, bool> res =
+ initial_values_.insert(std::make_pair(zone, size));
+ USE(res);
+ DCHECK(res.second);
+ }
+}
+
+
+ZonePool::StatsScope::~StatsScope() {
+ DCHECK_EQ(zone_pool_->stats_.back(), this);
+ zone_pool_->stats_.pop_back();
+}
+
+
+size_t ZonePool::StatsScope::GetMaxAllocatedBytes() {
+ return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
+}
+
+
+size_t ZonePool::StatsScope::GetCurrentAllocatedBytes() {
+ size_t total = 0;
+ for (Zone* zone : zone_pool_->used_) {
+ total += static_cast<size_t>(zone->allocation_size());
+ // Adjust for initial values.
+ InitialValues::iterator it = initial_values_.find(zone);
+ if (it != initial_values_.end()) {
+ total -= it->second;
+ }
+ }
+ return total;
+}
+
+
+size_t ZonePool::StatsScope::GetTotalAllocatedBytes() {
+ return zone_pool_->GetTotalAllocatedBytes() - total_allocated_bytes_at_start_;
+}
+
+
+void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
+ size_t current_total = GetCurrentAllocatedBytes();
+ // Update max.
+ max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
+ // Drop zone from initial value map.
+ InitialValues::iterator it = initial_values_.find(zone);
+ if (it != initial_values_.end()) {
+ initial_values_.erase(it);
+ }
+}
+
+
+ZonePool::ZonePool(Isolate* isolate)
+ : isolate_(isolate), max_allocated_bytes_(0), total_deleted_bytes_(0) {}
+
+
+ZonePool::~ZonePool() {
+ DCHECK(used_.empty());
+ DCHECK(stats_.empty());
+ for (Zone* zone : unused_) {
+ delete zone;
+ }
+}
+
+
+size_t ZonePool::GetMaxAllocatedBytes() {
+ return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
+}
+
+
+size_t ZonePool::GetCurrentAllocatedBytes() {
+ size_t total = 0;
+ for (Zone* zone : used_) {
+ total += static_cast<size_t>(zone->allocation_size());
+ }
+ return total;
+}
+
+
+size_t ZonePool::GetTotalAllocatedBytes() {
+ return total_deleted_bytes_ + GetCurrentAllocatedBytes();
+}
+
+
+Zone* ZonePool::NewEmptyZone() {
+ Zone* zone;
+ // Grab a zone from pool if possible.
+ if (!unused_.empty()) {
+ zone = unused_.back();
+ unused_.pop_back();
+ } else {
+ zone = new Zone(isolate_);
+ }
+ used_.push_back(zone);
+ DCHECK_EQ(0, zone->allocation_size());
+ return zone;
+}
+
+
+void ZonePool::ReturnZone(Zone* zone) {
+ size_t current_total = GetCurrentAllocatedBytes();
+ // Update max.
+ max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
+ // Update stats.
+ for (auto stat_scope : stats_) {
+ stat_scope->ZoneReturned(zone);
+ }
+ // Remove from used.
+ Used::iterator it = std::find(used_.begin(), used_.end(), zone);
+ DCHECK(it != used_.end());
+ used_.erase(it);
+ total_deleted_bytes_ += static_cast<size_t>(zone->allocation_size());
+ // Delete zone or clear and stash on unused_.
+ if (unused_.size() >= kMaxUnusedSize) {
+ delete zone;
+ } else {
+ zone->DeleteAll();
+ DCHECK_EQ(0, zone->allocation_size());
+ unused_.push_back(zone);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8