Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1 | // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "src/heap/memory-reducer.h" |
| 6 | |
| 7 | #include "src/flags.h" |
| 8 | #include "src/heap/gc-tracer.h" |
| 9 | #include "src/heap/heap-inl.h" |
| 10 | #include "src/utils.h" |
| 11 | #include "src/v8.h" |
| 12 | |
| 13 | namespace v8 { |
| 14 | namespace internal { |
| 15 | |
| 16 | const int MemoryReducer::kLongDelayMs = 8000; |
| 17 | const int MemoryReducer::kShortDelayMs = 500; |
| 18 | const int MemoryReducer::kWatchdogDelayMs = 100000; |
| 19 | const int MemoryReducer::kMaxNumberOfGCs = 3; |
| 20 | |
| 21 | MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer) |
| 22 | : CancelableTask(memory_reducer->heap()->isolate()), |
| 23 | memory_reducer_(memory_reducer) {} |
| 24 | |
| 25 | |
| 26 | void MemoryReducer::TimerTask::RunInternal() { |
| 27 | const double kJsCallsPerMsThreshold = 0.5; |
| 28 | Heap* heap = memory_reducer_->heap(); |
| 29 | Event event; |
| 30 | double time_ms = heap->MonotonicallyIncreasingTimeInMs(); |
| 31 | heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(), |
| 32 | heap->OldGenerationAllocationCounter()); |
| 33 | double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms); |
| 34 | bool low_allocation_rate = heap->HasLowAllocationRate(); |
| 35 | bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate; |
| 36 | bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage(); |
| 37 | if (FLAG_trace_gc_verbose) { |
| 38 | PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n", |
| 39 | js_call_rate, low_allocation_rate ? "low alloc" : "high alloc", |
| 40 | optimize_for_memory ? "background" : "foreground"); |
| 41 | } |
| 42 | event.type = kTimer; |
| 43 | event.time_ms = time_ms; |
| 44 | // The memory reducer will start incremental markig if |
| 45 | // 1) mutator is likely idle: js call rate is low and allocation rate is low. |
| 46 | // 2) mutator is in background: optimize for memory flag is set. |
| 47 | event.should_start_incremental_gc = is_idle || optimize_for_memory; |
| 48 | event.can_start_incremental_gc = |
| 49 | heap->incremental_marking()->IsStopped() && |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 50 | (heap->incremental_marking()->CanBeActivated() || optimize_for_memory); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 51 | memory_reducer_->NotifyTimer(event); |
| 52 | } |
| 53 | |
| 54 | |
| 55 | double MemoryReducer::SampleAndGetJsCallsPerMs(double time_ms) { |
| 56 | unsigned int counter = heap()->isolate()->js_calls_from_api_counter(); |
| 57 | unsigned int call_delta = counter - js_calls_counter_; |
| 58 | double time_delta_ms = time_ms - js_calls_sample_time_ms_; |
| 59 | js_calls_counter_ = counter; |
| 60 | js_calls_sample_time_ms_ = time_ms; |
| 61 | return time_delta_ms > 0 ? call_delta / time_delta_ms : 0; |
| 62 | } |
| 63 | |
| 64 | |
| 65 | void MemoryReducer::NotifyTimer(const Event& event) { |
| 66 | DCHECK_EQ(kTimer, event.type); |
| 67 | DCHECK_EQ(kWait, state_.action); |
| 68 | state_ = Step(state_, event); |
| 69 | if (state_.action == kRun) { |
| 70 | DCHECK(heap()->incremental_marking()->IsStopped()); |
| 71 | DCHECK(FLAG_incremental_marking); |
| 72 | if (FLAG_trace_gc_verbose) { |
| 73 | PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n", |
| 74 | state_.started_gcs); |
| 75 | } |
| 76 | heap()->StartIdleIncrementalMarking(); |
| 77 | } else if (state_.action == kWait) { |
| 78 | if (!heap()->incremental_marking()->IsStopped() && |
| 79 | heap()->ShouldOptimizeForMemoryUsage()) { |
| 80 | // Make progress with pending incremental marking if memory usage has |
| 81 | // higher priority than latency. This is important for background tabs |
| 82 | // that do not send idle notifications. |
| 83 | const int kIncrementalMarkingDelayMs = 500; |
| 84 | double deadline = heap()->MonotonicallyIncreasingTimeInMs() + |
| 85 | kIncrementalMarkingDelayMs; |
| 86 | heap()->incremental_marking()->AdvanceIncrementalMarking( |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 87 | deadline, i::IncrementalMarking::StepActions( |
| 88 | i::IncrementalMarking::NO_GC_VIA_STACK_GUARD, |
| 89 | i::IncrementalMarking::FORCE_MARKING, |
| 90 | i::IncrementalMarking::FORCE_COMPLETION)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 91 | heap()->FinalizeIncrementalMarkingIfComplete( |
| 92 | "Memory reducer: finalize incremental marking"); |
| 93 | } |
| 94 | // Re-schedule the timer. |
| 95 | ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms); |
| 96 | if (FLAG_trace_gc_verbose) { |
| 97 | PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n", |
| 98 | state_.next_gc_start_ms - event.time_ms); |
| 99 | } |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | |
| 104 | void MemoryReducer::NotifyMarkCompact(const Event& event) { |
| 105 | DCHECK_EQ(kMarkCompact, event.type); |
| 106 | Action old_action = state_.action; |
| 107 | state_ = Step(state_, event); |
| 108 | if (old_action != kWait && state_.action == kWait) { |
| 109 | // If we are transitioning to the WAIT state, start the timer. |
| 110 | ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms); |
| 111 | } |
| 112 | if (old_action == kRun) { |
| 113 | if (FLAG_trace_gc_verbose) { |
| 114 | PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n", |
| 115 | state_.started_gcs, |
| 116 | state_.action == kWait ? "will do more" : "done"); |
| 117 | } |
| 118 | } |
| 119 | } |
| 120 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 121 | void MemoryReducer::NotifyPossibleGarbage(const Event& event) { |
| 122 | DCHECK_EQ(kPossibleGarbage, event.type); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 123 | Action old_action = state_.action; |
| 124 | state_ = Step(state_, event); |
| 125 | if (old_action != kWait && state_.action == kWait) { |
| 126 | // If we are transitioning to the WAIT state, start the timer. |
| 127 | ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms); |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | |
| 132 | bool MemoryReducer::WatchdogGC(const State& state, const Event& event) { |
| 133 | return state.last_gc_time_ms != 0 && |
| 134 | event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs; |
| 135 | } |
| 136 | |
| 137 | |
| 138 | // For specification of this function see the comment for MemoryReducer class. |
| 139 | MemoryReducer::State MemoryReducer::Step(const State& state, |
| 140 | const Event& event) { |
| 141 | if (!FLAG_incremental_marking || !FLAG_memory_reducer) { |
| 142 | return State(kDone, 0, 0, state.last_gc_time_ms); |
| 143 | } |
| 144 | switch (state.action) { |
| 145 | case kDone: |
| 146 | if (event.type == kTimer) { |
| 147 | return state; |
| 148 | } else { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 149 | DCHECK(event.type == kPossibleGarbage || event.type == kMarkCompact); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 150 | return State( |
| 151 | kWait, 0, event.time_ms + kLongDelayMs, |
| 152 | event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms); |
| 153 | } |
| 154 | case kWait: |
| 155 | switch (event.type) { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 156 | case kPossibleGarbage: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 157 | return state; |
| 158 | case kTimer: |
| 159 | if (state.started_gcs >= kMaxNumberOfGCs) { |
| 160 | return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms); |
| 161 | } else if (event.can_start_incremental_gc && |
| 162 | (event.should_start_incremental_gc || |
| 163 | WatchdogGC(state, event))) { |
| 164 | if (state.next_gc_start_ms <= event.time_ms) { |
| 165 | return State(kRun, state.started_gcs + 1, 0.0, |
| 166 | state.last_gc_time_ms); |
| 167 | } else { |
| 168 | return state; |
| 169 | } |
| 170 | } else { |
| 171 | return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs, |
| 172 | state.last_gc_time_ms); |
| 173 | } |
| 174 | case kMarkCompact: |
| 175 | return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs, |
| 176 | event.time_ms); |
| 177 | } |
| 178 | case kRun: |
| 179 | if (event.type != kMarkCompact) { |
| 180 | return state; |
| 181 | } else { |
| 182 | if (state.started_gcs < kMaxNumberOfGCs && |
| 183 | (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) { |
| 184 | return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs, |
| 185 | event.time_ms); |
| 186 | } else { |
| 187 | return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms); |
| 188 | } |
| 189 | } |
| 190 | } |
| 191 | UNREACHABLE(); |
| 192 | return State(kDone, 0, 0, 0.0); // Make the compiler happy. |
| 193 | } |
| 194 | |
| 195 | |
| 196 | void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) { |
| 197 | DCHECK(delay_ms > 0); |
| 198 | // Record the time and the js call counter. |
| 199 | SampleAndGetJsCallsPerMs(time_ms); |
| 200 | // Leave some room for precision error in task scheduler. |
| 201 | const double kSlackMs = 100; |
| 202 | v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate()); |
| 203 | auto timer_task = new MemoryReducer::TimerTask(this); |
| 204 | V8::GetCurrentPlatform()->CallDelayedOnForegroundThread( |
| 205 | isolate, timer_task, (delay_ms + kSlackMs) / 1000.0); |
| 206 | } |
| 207 | |
| 208 | |
| 209 | void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); } |
| 210 | |
| 211 | } // namespace internal |
| 212 | } // namespace v8 |