blob: c7fd1ccd667b145bac22d4275665ebb051026e35 [file] [log] [blame]
Ben Murdoch014dc512016-03-22 12:00:34 +00001// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-scheduler.h"
6
7#include "src/base/adapters.h"
Ben Murdoch109988c2016-05-18 11:27:45 +01008#include "src/base/utils/random-number-generator.h"
Ben Murdoch014dc512016-03-22 12:00:34 +00009
10namespace v8 {
11namespace internal {
12namespace compiler {
13
Ben Murdochf3b273f2017-01-17 12:11:28 +000014void InstructionScheduler::SchedulingQueueBase::AddNode(
15 ScheduleGraphNode* node) {
16 // We keep the ready list sorted by total latency so that we can quickly find
17 // the next best candidate to schedule.
18 auto it = nodes_.begin();
19 while ((it != nodes_.end()) &&
20 ((*it)->total_latency() >= node->total_latency())) {
21 ++it;
22 }
23 nodes_.insert(it, node);
Ben Murdoch109988c2016-05-18 11:27:45 +010024}
25
26
27InstructionScheduler::ScheduleGraphNode*
28InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
29 DCHECK(!IsEmpty());
30 auto candidate = nodes_.end();
31 for (auto iterator = nodes_.begin(); iterator != nodes_.end(); ++iterator) {
Ben Murdochf3b273f2017-01-17 12:11:28 +000032 // We only consider instructions that have all their operands ready.
Ben Murdoch109988c2016-05-18 11:27:45 +010033 if (cycle >= (*iterator)->start_cycle()) {
Ben Murdochf3b273f2017-01-17 12:11:28 +000034 candidate = iterator;
35 break;
Ben Murdoch109988c2016-05-18 11:27:45 +010036 }
37 }
38
39 if (candidate != nodes_.end()) {
40 ScheduleGraphNode *result = *candidate;
41 nodes_.erase(candidate);
42 return result;
43 }
44
45 return nullptr;
46}
47
48
49InstructionScheduler::ScheduleGraphNode*
50InstructionScheduler::StressSchedulerQueue::PopBestCandidate(int cycle) {
51 DCHECK(!IsEmpty());
52 // Choose a random element from the ready list.
53 auto candidate = nodes_.begin();
54 std::advance(candidate, isolate()->random_number_generator()->NextInt(
55 static_cast<int>(nodes_.size())));
56 ScheduleGraphNode *result = *candidate;
57 nodes_.erase(candidate);
58 return result;
59}
60
61
Ben Murdoch014dc512016-03-22 12:00:34 +000062InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
63 Zone* zone,
64 Instruction* instr)
65 : instr_(instr),
66 successors_(zone),
67 unscheduled_predecessors_count_(0),
68 latency_(GetInstructionLatency(instr)),
69 total_latency_(-1),
70 start_cycle_(-1) {
71}
72
73
74void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
75 ScheduleGraphNode* node) {
76 successors_.push_back(node);
77 node->unscheduled_predecessors_count_++;
78}
79
80
81InstructionScheduler::InstructionScheduler(Zone* zone,
82 InstructionSequence* sequence)
83 : zone_(zone),
84 sequence_(sequence),
85 graph_(zone),
86 last_side_effect_instr_(nullptr),
87 pending_loads_(zone),
Ben Murdochbcf72ee2016-08-08 18:44:38 +010088 last_live_in_reg_marker_(nullptr),
Ben Murdochf91f0612016-11-29 16:50:11 +000089 last_deopt_(nullptr),
90 operands_map_(zone) {}
Ben Murdoch014dc512016-03-22 12:00:34 +000091
92
93void InstructionScheduler::StartBlock(RpoNumber rpo) {
94 DCHECK(graph_.empty());
95 DCHECK(last_side_effect_instr_ == nullptr);
96 DCHECK(pending_loads_.empty());
97 DCHECK(last_live_in_reg_marker_ == nullptr);
Ben Murdochbcf72ee2016-08-08 18:44:38 +010098 DCHECK(last_deopt_ == nullptr);
Ben Murdochf91f0612016-11-29 16:50:11 +000099 DCHECK(operands_map_.empty());
Ben Murdoch014dc512016-03-22 12:00:34 +0000100 sequence()->StartBlock(rpo);
101}
102
103
104void InstructionScheduler::EndBlock(RpoNumber rpo) {
Ben Murdoch109988c2016-05-18 11:27:45 +0100105 if (FLAG_turbo_stress_instruction_scheduling) {
106 ScheduleBlock<StressSchedulerQueue>();
107 } else {
108 ScheduleBlock<CriticalPathFirstQueue>();
109 }
Ben Murdoch014dc512016-03-22 12:00:34 +0000110 sequence()->EndBlock(rpo);
111 graph_.clear();
112 last_side_effect_instr_ = nullptr;
113 pending_loads_.clear();
114 last_live_in_reg_marker_ = nullptr;
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100115 last_deopt_ = nullptr;
Ben Murdochf91f0612016-11-29 16:50:11 +0000116 operands_map_.clear();
Ben Murdoch014dc512016-03-22 12:00:34 +0000117}
118
119
120void InstructionScheduler::AddInstruction(Instruction* instr) {
121 ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
122
123 if (IsBlockTerminator(instr)) {
124 // Make sure that basic block terminators are not moved by adding them
125 // as successor of every instruction.
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100126 for (ScheduleGraphNode* node : graph_) {
Ben Murdoch014dc512016-03-22 12:00:34 +0000127 node->AddSuccessor(new_node);
128 }
129 } else if (IsFixedRegisterParameter(instr)) {
130 if (last_live_in_reg_marker_ != nullptr) {
131 last_live_in_reg_marker_->AddSuccessor(new_node);
132 }
133 last_live_in_reg_marker_ = new_node;
134 } else {
135 if (last_live_in_reg_marker_ != nullptr) {
136 last_live_in_reg_marker_->AddSuccessor(new_node);
137 }
138
Ben Murdochf3b273f2017-01-17 12:11:28 +0000139 // Make sure that instructions are not scheduled before the last
140 // deoptimization point when they depend on it.
141 if ((last_deopt_ != nullptr) && DependsOnDeoptimization(instr)) {
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100142 last_deopt_->AddSuccessor(new_node);
143 }
144
Ben Murdoch014dc512016-03-22 12:00:34 +0000145 // Instructions with side effects and memory operations can't be
146 // reordered with respect to each other.
147 if (HasSideEffect(instr)) {
148 if (last_side_effect_instr_ != nullptr) {
149 last_side_effect_instr_->AddSuccessor(new_node);
150 }
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100151 for (ScheduleGraphNode* load : pending_loads_) {
Ben Murdoch014dc512016-03-22 12:00:34 +0000152 load->AddSuccessor(new_node);
153 }
154 pending_loads_.clear();
155 last_side_effect_instr_ = new_node;
156 } else if (IsLoadOperation(instr)) {
157 // Load operations can't be reordered with side effects instructions but
158 // independent loads can be reordered with respect to each other.
159 if (last_side_effect_instr_ != nullptr) {
160 last_side_effect_instr_->AddSuccessor(new_node);
161 }
162 pending_loads_.push_back(new_node);
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100163 } else if (instr->IsDeoptimizeCall()) {
164 // Ensure that deopts are not reordered with respect to side-effect
165 // instructions.
166 if (last_side_effect_instr_ != nullptr) {
167 last_side_effect_instr_->AddSuccessor(new_node);
168 }
169 last_deopt_ = new_node;
Ben Murdoch014dc512016-03-22 12:00:34 +0000170 }
171
172 // Look for operand dependencies.
Ben Murdochf91f0612016-11-29 16:50:11 +0000173 for (size_t i = 0; i < instr->InputCount(); ++i) {
174 const InstructionOperand* input = instr->InputAt(i);
175 if (input->IsUnallocated()) {
176 int32_t vreg = UnallocatedOperand::cast(input)->virtual_register();
177 auto it = operands_map_.find(vreg);
178 if (it != operands_map_.end()) {
179 it->second->AddSuccessor(new_node);
180 }
181 }
182 }
183
184 // Record the virtual registers defined by this instruction.
185 for (size_t i = 0; i < instr->OutputCount(); ++i) {
186 const InstructionOperand* output = instr->OutputAt(i);
187 if (output->IsUnallocated()) {
188 operands_map_[UnallocatedOperand::cast(output)->virtual_register()] =
189 new_node;
190 } else if (output->IsConstant()) {
191 operands_map_[ConstantOperand::cast(output)->virtual_register()] =
192 new_node;
Ben Murdoch014dc512016-03-22 12:00:34 +0000193 }
194 }
195 }
196
197 graph_.push_back(new_node);
198}
199
200
Ben Murdoch109988c2016-05-18 11:27:45 +0100201template <typename QueueType>
Ben Murdoch014dc512016-03-22 12:00:34 +0000202void InstructionScheduler::ScheduleBlock() {
Ben Murdoch109988c2016-05-18 11:27:45 +0100203 QueueType ready_list(this);
Ben Murdoch014dc512016-03-22 12:00:34 +0000204
205 // Compute total latencies so that we can schedule the critical path first.
206 ComputeTotalLatencies();
207
208 // Add nodes which don't have dependencies to the ready list.
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100209 for (ScheduleGraphNode* node : graph_) {
Ben Murdoch014dc512016-03-22 12:00:34 +0000210 if (!node->HasUnscheduledPredecessor()) {
Ben Murdoch109988c2016-05-18 11:27:45 +0100211 ready_list.AddNode(node);
Ben Murdoch014dc512016-03-22 12:00:34 +0000212 }
213 }
214
215 // Go through the ready list and schedule the instructions.
216 int cycle = 0;
Ben Murdoch109988c2016-05-18 11:27:45 +0100217 while (!ready_list.IsEmpty()) {
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100218 ScheduleGraphNode* candidate = ready_list.PopBestCandidate(cycle);
Ben Murdoch014dc512016-03-22 12:00:34 +0000219
Ben Murdoch109988c2016-05-18 11:27:45 +0100220 if (candidate != nullptr) {
221 sequence()->AddInstruction(candidate->instruction());
Ben Murdoch014dc512016-03-22 12:00:34 +0000222
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100223 for (ScheduleGraphNode* successor : candidate->successors()) {
Ben Murdoch014dc512016-03-22 12:00:34 +0000224 successor->DropUnscheduledPredecessor();
225 successor->set_start_cycle(
226 std::max(successor->start_cycle(),
Ben Murdoch109988c2016-05-18 11:27:45 +0100227 cycle + candidate->latency()));
Ben Murdoch014dc512016-03-22 12:00:34 +0000228
229 if (!successor->HasUnscheduledPredecessor()) {
Ben Murdoch109988c2016-05-18 11:27:45 +0100230 ready_list.AddNode(successor);
Ben Murdoch014dc512016-03-22 12:00:34 +0000231 }
232 }
Ben Murdoch014dc512016-03-22 12:00:34 +0000233 }
234
235 cycle++;
236 }
237}
238
239
240int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
241 switch (instr->arch_opcode()) {
242 case kArchNop:
Ben Murdochf2e39942016-05-18 10:25:55 +0000243 case kArchFramePointer:
Ben Murdoch109988c2016-05-18 11:27:45 +0100244 case kArchParentFramePointer:
Ben Murdochf2e39942016-05-18 10:25:55 +0000245 case kArchTruncateDoubleToI:
Ben Murdoch109988c2016-05-18 11:27:45 +0100246 case kArchStackSlot:
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100247 case kArchDebugBreak:
248 case kArchComment:
Ben Murdochf91f0612016-11-29 16:50:11 +0000249 case kIeee754Float64Acos:
250 case kIeee754Float64Acosh:
251 case kIeee754Float64Asin:
252 case kIeee754Float64Asinh:
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100253 case kIeee754Float64Atan:
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100254 case kIeee754Float64Atanh:
Ben Murdochf91f0612016-11-29 16:50:11 +0000255 case kIeee754Float64Atan2:
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100256 case kIeee754Float64Cbrt:
257 case kIeee754Float64Cos:
Ben Murdochf91f0612016-11-29 16:50:11 +0000258 case kIeee754Float64Cosh:
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100259 case kIeee754Float64Exp:
260 case kIeee754Float64Expm1:
261 case kIeee754Float64Log:
262 case kIeee754Float64Log1p:
263 case kIeee754Float64Log10:
264 case kIeee754Float64Log2:
Ben Murdochf91f0612016-11-29 16:50:11 +0000265 case kIeee754Float64Pow:
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100266 case kIeee754Float64Sin:
Ben Murdochf91f0612016-11-29 16:50:11 +0000267 case kIeee754Float64Sinh:
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100268 case kIeee754Float64Tan:
Ben Murdochf91f0612016-11-29 16:50:11 +0000269 case kIeee754Float64Tanh:
Ben Murdochf2e39942016-05-18 10:25:55 +0000270 return kNoOpcodeFlags;
Ben Murdoch83897452016-05-17 11:12:09 +0100271
Ben Murdoch109988c2016-05-18 11:27:45 +0100272 case kArchStackPointer:
273 // ArchStackPointer instruction loads the current stack pointer value and
274 // must not be reordered with instruction with side effects.
275 return kIsLoadOperation;
276
Ben Murdoch014dc512016-03-22 12:00:34 +0000277 case kArchPrepareCallCFunction:
278 case kArchPrepareTailCall:
279 case kArchCallCFunction:
280 case kArchCallCodeObject:
281 case kArchCallJSFunction:
Ben Murdoch014dc512016-03-22 12:00:34 +0000282 return kHasSideEffect;
283
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100284 case kArchTailCallCodeObjectFromJSFunction:
Ben Murdoch014dc512016-03-22 12:00:34 +0000285 case kArchTailCallCodeObject:
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100286 case kArchTailCallJSFunctionFromJSFunction:
Ben Murdoch014dc512016-03-22 12:00:34 +0000287 case kArchTailCallJSFunction:
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100288 case kArchTailCallAddress:
Ben Murdoch014dc512016-03-22 12:00:34 +0000289 return kHasSideEffect | kIsBlockTerminator;
290
291 case kArchDeoptimize:
292 case kArchJmp:
293 case kArchLookupSwitch:
294 case kArchTableSwitch:
295 case kArchRet:
296 case kArchThrowTerminator:
297 return kIsBlockTerminator;
298
299 case kCheckedLoadInt8:
300 case kCheckedLoadUint8:
301 case kCheckedLoadInt16:
302 case kCheckedLoadUint16:
303 case kCheckedLoadWord32:
304 case kCheckedLoadWord64:
305 case kCheckedLoadFloat32:
306 case kCheckedLoadFloat64:
307 return kIsLoadOperation;
308
309 case kCheckedStoreWord8:
310 case kCheckedStoreWord16:
311 case kCheckedStoreWord32:
312 case kCheckedStoreWord64:
313 case kCheckedStoreFloat32:
314 case kCheckedStoreFloat64:
315 case kArchStoreWithWriteBarrier:
316 return kHasSideEffect;
317
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100318 case kAtomicLoadInt8:
319 case kAtomicLoadUint8:
320 case kAtomicLoadInt16:
321 case kAtomicLoadUint16:
322 case kAtomicLoadWord32:
323 return kIsLoadOperation;
324
325 case kAtomicStoreWord8:
326 case kAtomicStoreWord16:
327 case kAtomicStoreWord32:
328 return kHasSideEffect;
329
Ben Murdoch014dc512016-03-22 12:00:34 +0000330#define CASE(Name) case k##Name:
331 TARGET_ARCH_OPCODE_LIST(CASE)
332#undef CASE
333 return GetTargetInstructionFlags(instr);
334 }
335
336 UNREACHABLE();
337 return kNoOpcodeFlags;
338}
339
340
Ben Murdoch014dc512016-03-22 12:00:34 +0000341bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
342 return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
343 (instr->flags_mode() == kFlags_branch));
344}
345
346
347void InstructionScheduler::ComputeTotalLatencies() {
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100348 for (ScheduleGraphNode* node : base::Reversed(graph_)) {
Ben Murdoch014dc512016-03-22 12:00:34 +0000349 int max_latency = 0;
350
Ben Murdoch3b9bc312016-06-02 14:46:10 +0100351 for (ScheduleGraphNode* successor : node->successors()) {
Ben Murdoch014dc512016-03-22 12:00:34 +0000352 DCHECK(successor->total_latency() != -1);
353 if (successor->total_latency() > max_latency) {
354 max_latency = successor->total_latency();
355 }
356 }
357
358 node->set_total_latency(max_latency + node->latency());
359 }
360}
361
362} // namespace compiler
363} // namespace internal
364} // namespace v8