blob: 6bfc2e2a07e89a15701c0559e8a47bfb4b80fbdf [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_X87
6
7#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
8#include "src/register-configuration.h"
9
10#include "src/crankshaft/x87/lithium-codegen-x87.h"
11
12namespace v8 {
13namespace internal {
14
15LGapResolver::LGapResolver(LCodeGen* owner)
16 : cgen_(owner),
17 moves_(32, owner->zone()),
18 source_uses_(),
19 destination_uses_(),
20 spilled_register_(-1) {}
21
22
23void LGapResolver::Resolve(LParallelMove* parallel_move) {
24 DCHECK(HasBeenReset());
25 // Build up a worklist of moves.
26 BuildInitialMoveList(parallel_move);
27
28 for (int i = 0; i < moves_.length(); ++i) {
29 LMoveOperands move = moves_[i];
30 // Skip constants to perform them last. They don't block other moves
31 // and skipping such moves with register destinations keeps those
32 // registers free for the whole algorithm.
33 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
34 PerformMove(i);
35 }
36 }
37
38 // Perform the moves with constant sources.
39 for (int i = 0; i < moves_.length(); ++i) {
40 if (!moves_[i].IsEliminated()) {
41 DCHECK(moves_[i].source()->IsConstantOperand());
42 EmitMove(i);
43 }
44 }
45
46 Finish();
47 DCHECK(HasBeenReset());
48}
49
50
51void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
52 // Perform a linear sweep of the moves to add them to the initial list of
53 // moves to perform, ignoring any move that is redundant (the source is
54 // the same as the destination, the destination is ignored and
55 // unallocated, or the move was already eliminated).
56 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
57 for (int i = 0; i < moves->length(); ++i) {
58 LMoveOperands move = moves->at(i);
59 if (!move.IsRedundant()) AddMove(move);
60 }
61 Verify();
62}
63
64
65void LGapResolver::PerformMove(int index) {
66 // Each call to this function performs a move and deletes it from the move
67 // graph. We first recursively perform any move blocking this one. We
68 // mark a move as "pending" on entry to PerformMove in order to detect
69 // cycles in the move graph. We use operand swaps to resolve cycles,
70 // which means that a call to PerformMove could change any source operand
71 // in the move graph.
72
73 DCHECK(!moves_[index].IsPending());
74 DCHECK(!moves_[index].IsRedundant());
75
76 // Clear this move's destination to indicate a pending move. The actual
77 // destination is saved on the side.
78 DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
79 LOperand* destination = moves_[index].destination();
80 moves_[index].set_destination(NULL);
81
82 // Perform a depth-first traversal of the move graph to resolve
83 // dependencies. Any unperformed, unpending move with a source the same
84 // as this one's destination blocks this one so recursively perform all
85 // such moves.
86 for (int i = 0; i < moves_.length(); ++i) {
87 LMoveOperands other_move = moves_[i];
88 if (other_move.Blocks(destination) && !other_move.IsPending()) {
89 // Though PerformMove can change any source operand in the move graph,
90 // this call cannot create a blocking move via a swap (this loop does
91 // not miss any). Assume there is a non-blocking move with source A
92 // and this move is blocked on source B and there is a swap of A and
93 // B. Then A and B must be involved in the same cycle (or they would
94 // not be swapped). Since this move's destination is B and there is
95 // only a single incoming edge to an operand, this move must also be
96 // involved in the same cycle. In that case, the blocking move will
97 // be created but will be "pending" when we return from PerformMove.
98 PerformMove(i);
99 }
100 }
101
102 // We are about to resolve this move and don't need it marked as
103 // pending, so restore its destination.
104 moves_[index].set_destination(destination);
105
106 // This move's source may have changed due to swaps to resolve cycles and
107 // so it may now be the last move in the cycle. If so remove it.
108 if (moves_[index].source()->Equals(destination)) {
109 RemoveMove(index);
110 return;
111 }
112
113 // The move may be blocked on a (at most one) pending move, in which case
114 // we have a cycle. Search for such a blocking move and perform a swap to
115 // resolve it.
116 for (int i = 0; i < moves_.length(); ++i) {
117 LMoveOperands other_move = moves_[i];
118 if (other_move.Blocks(destination)) {
119 DCHECK(other_move.IsPending());
120 EmitSwap(index);
121 return;
122 }
123 }
124
125 // This move is not blocked.
126 EmitMove(index);
127}
128
129
130void LGapResolver::AddMove(LMoveOperands move) {
131 LOperand* source = move.source();
132 if (source->IsRegister()) ++source_uses_[source->index()];
133
134 LOperand* destination = move.destination();
135 if (destination->IsRegister()) ++destination_uses_[destination->index()];
136
137 moves_.Add(move, cgen_->zone());
138}
139
140
141void LGapResolver::RemoveMove(int index) {
142 LOperand* source = moves_[index].source();
143 if (source->IsRegister()) {
144 --source_uses_[source->index()];
145 DCHECK(source_uses_[source->index()] >= 0);
146 }
147
148 LOperand* destination = moves_[index].destination();
149 if (destination->IsRegister()) {
150 --destination_uses_[destination->index()];
151 DCHECK(destination_uses_[destination->index()] >= 0);
152 }
153
154 moves_[index].Eliminate();
155}
156
157
158int LGapResolver::CountSourceUses(LOperand* operand) {
159 int count = 0;
160 for (int i = 0; i < moves_.length(); ++i) {
161 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
162 ++count;
163 }
164 }
165 return count;
166}
167
168
169Register LGapResolver::GetFreeRegisterNot(Register reg) {
170 int skip_index = reg.is(no_reg) ? -1 : reg.code();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100171 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000172 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
173 int code = config->GetAllocatableGeneralCode(i);
174 if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
175 code != skip_index) {
176 return Register::from_code(code);
177 }
178 }
179 return no_reg;
180}
181
182
183bool LGapResolver::HasBeenReset() {
184 if (!moves_.is_empty()) return false;
185 if (spilled_register_ >= 0) return false;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100186 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000187 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
188 int code = config->GetAllocatableGeneralCode(i);
189 if (source_uses_[code] != 0) return false;
190 if (destination_uses_[code] != 0) return false;
191 }
192 return true;
193}
194
195
196void LGapResolver::Verify() {
197#ifdef ENABLE_SLOW_DCHECKS
198 // No operand should be the destination for more than one move.
199 for (int i = 0; i < moves_.length(); ++i) {
200 LOperand* destination = moves_[i].destination();
201 for (int j = i + 1; j < moves_.length(); ++j) {
202 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
203 }
204 }
205#endif
206}
207
208
209#define __ ACCESS_MASM(cgen_->masm())
210
211void LGapResolver::Finish() {
212 if (spilled_register_ >= 0) {
213 __ pop(Register::from_code(spilled_register_));
214 spilled_register_ = -1;
215 }
216 moves_.Rewind(0);
217}
218
219
220void LGapResolver::EnsureRestored(LOperand* operand) {
221 if (operand->IsRegister() && operand->index() == spilled_register_) {
222 __ pop(Register::from_code(spilled_register_));
223 spilled_register_ = -1;
224 }
225}
226
227
228Register LGapResolver::EnsureTempRegister() {
229 // 1. We may have already spilled to create a temp register.
230 if (spilled_register_ >= 0) {
231 return Register::from_code(spilled_register_);
232 }
233
234 // 2. We may have a free register that we can use without spilling.
235 Register free = GetFreeRegisterNot(no_reg);
236 if (!free.is(no_reg)) return free;
237
238 // 3. Prefer to spill a register that is not used in any remaining move
239 // because it will not need to be restored until the end.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100240 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000241 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
242 int code = config->GetAllocatableGeneralCode(i);
243 if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
244 Register scratch = Register::from_code(code);
245 __ push(scratch);
246 spilled_register_ = code;
247 return scratch;
248 }
249 }
250
251 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
252 spilled_register_ = config->GetAllocatableGeneralCode(0);
253 Register scratch = Register::from_code(spilled_register_);
254 __ push(scratch);
255 return scratch;
256}
257
258
259void LGapResolver::EmitMove(int index) {
260 LOperand* source = moves_[index].source();
261 LOperand* destination = moves_[index].destination();
262 EnsureRestored(source);
263 EnsureRestored(destination);
264
265 // Dispatch on the source and destination operand kinds. Not all
266 // combinations are possible.
267 if (source->IsRegister()) {
268 DCHECK(destination->IsRegister() || destination->IsStackSlot());
269 Register src = cgen_->ToRegister(source);
270 Operand dst = cgen_->ToOperand(destination);
271 __ mov(dst, src);
272
273 } else if (source->IsStackSlot()) {
274 DCHECK(destination->IsRegister() || destination->IsStackSlot());
275 Operand src = cgen_->ToOperand(source);
276 if (destination->IsRegister()) {
277 Register dst = cgen_->ToRegister(destination);
278 __ mov(dst, src);
279 } else {
280 // Spill on demand to use a temporary register for memory-to-memory
281 // moves.
282 Register tmp = EnsureTempRegister();
283 Operand dst = cgen_->ToOperand(destination);
284 __ mov(tmp, src);
285 __ mov(dst, tmp);
286 }
287
288 } else if (source->IsConstantOperand()) {
289 LConstantOperand* constant_source = LConstantOperand::cast(source);
290 if (destination->IsRegister()) {
291 Register dst = cgen_->ToRegister(destination);
292 Representation r = cgen_->IsSmi(constant_source)
293 ? Representation::Smi() : Representation::Integer32();
294 if (cgen_->IsInteger32(constant_source)) {
295 __ Move(dst, cgen_->ToImmediate(constant_source, r));
296 } else {
297 __ LoadObject(dst, cgen_->ToHandle(constant_source));
298 }
299 } else if (destination->IsDoubleRegister()) {
300 double v = cgen_->ToDouble(constant_source);
301 uint64_t int_val = bit_cast<uint64_t, double>(v);
302 int32_t lower = static_cast<int32_t>(int_val);
303 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
304 __ push(Immediate(upper));
305 __ push(Immediate(lower));
306 X87Register dst = cgen_->ToX87Register(destination);
307 cgen_->X87Mov(dst, MemOperand(esp, 0));
308 __ add(esp, Immediate(kDoubleSize));
309 } else {
310 DCHECK(destination->IsStackSlot());
311 Operand dst = cgen_->ToOperand(destination);
312 Representation r = cgen_->IsSmi(constant_source)
313 ? Representation::Smi() : Representation::Integer32();
314 if (cgen_->IsInteger32(constant_source)) {
315 __ Move(dst, cgen_->ToImmediate(constant_source, r));
316 } else {
317 Register tmp = EnsureTempRegister();
318 __ LoadObject(tmp, cgen_->ToHandle(constant_source));
319 __ mov(dst, tmp);
320 }
321 }
322
323 } else if (source->IsDoubleRegister()) {
324 // load from the register onto the stack, store in destination, which must
325 // be a double stack slot in the non-SSE2 case.
326 if (destination->IsDoubleStackSlot()) {
327 Operand dst = cgen_->ToOperand(destination);
328 X87Register src = cgen_->ToX87Register(source);
329 cgen_->X87Mov(dst, src);
330 } else {
331 X87Register dst = cgen_->ToX87Register(destination);
332 X87Register src = cgen_->ToX87Register(source);
333 cgen_->X87Mov(dst, src);
334 }
335 } else if (source->IsDoubleStackSlot()) {
336 // load from the stack slot on top of the floating point stack, and then
337 // store in destination. If destination is a double register, then it
338 // represents the top of the stack and nothing needs to be done.
339 if (destination->IsDoubleStackSlot()) {
340 Register tmp = EnsureTempRegister();
341 Operand src0 = cgen_->ToOperand(source);
342 Operand src1 = cgen_->HighOperand(source);
343 Operand dst0 = cgen_->ToOperand(destination);
344 Operand dst1 = cgen_->HighOperand(destination);
345 __ mov(tmp, src0); // Then use tmp to copy source to destination.
346 __ mov(dst0, tmp);
347 __ mov(tmp, src1);
348 __ mov(dst1, tmp);
349 } else {
350 Operand src = cgen_->ToOperand(source);
351 X87Register dst = cgen_->ToX87Register(destination);
352 cgen_->X87Mov(dst, src);
353 }
354 } else {
355 UNREACHABLE();
356 }
357
358 RemoveMove(index);
359}
360
361
362void LGapResolver::EmitSwap(int index) {
363 LOperand* source = moves_[index].source();
364 LOperand* destination = moves_[index].destination();
365 EnsureRestored(source);
366 EnsureRestored(destination);
367
368 // Dispatch on the source and destination operand kinds. Not all
369 // combinations are possible.
370 if (source->IsRegister() && destination->IsRegister()) {
371 // Register-register.
372 Register src = cgen_->ToRegister(source);
373 Register dst = cgen_->ToRegister(destination);
374 __ xchg(dst, src);
375
376 } else if ((source->IsRegister() && destination->IsStackSlot()) ||
377 (source->IsStackSlot() && destination->IsRegister())) {
378 // Register-memory. Use a free register as a temp if possible. Do not
379 // spill on demand because the simple spill implementation cannot avoid
380 // spilling src at this point.
381 Register tmp = GetFreeRegisterNot(no_reg);
382 Register reg =
383 cgen_->ToRegister(source->IsRegister() ? source : destination);
384 Operand mem =
385 cgen_->ToOperand(source->IsRegister() ? destination : source);
386 if (tmp.is(no_reg)) {
387 __ xor_(reg, mem);
388 __ xor_(mem, reg);
389 __ xor_(reg, mem);
390 } else {
391 __ mov(tmp, mem);
392 __ mov(mem, reg);
393 __ mov(reg, tmp);
394 }
395
396 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
397 // Memory-memory. Spill on demand to use a temporary. If there is a
398 // free register after that, use it as a second temporary.
399 Register tmp0 = EnsureTempRegister();
400 Register tmp1 = GetFreeRegisterNot(tmp0);
401 Operand src = cgen_->ToOperand(source);
402 Operand dst = cgen_->ToOperand(destination);
403 if (tmp1.is(no_reg)) {
404 // Only one temp register available to us.
405 __ mov(tmp0, dst);
406 __ xor_(tmp0, src);
407 __ xor_(src, tmp0);
408 __ xor_(tmp0, src);
409 __ mov(dst, tmp0);
410 } else {
411 __ mov(tmp0, dst);
412 __ mov(tmp1, src);
413 __ mov(dst, tmp1);
414 __ mov(src, tmp0);
415 }
416 } else {
417 // No other combinations are possible.
418 UNREACHABLE();
419 }
420
421 // The swap of source and destination has executed a move from source to
422 // destination.
423 RemoveMove(index);
424
425 // Any unperformed (including pending) move with a source of either
426 // this move's source or destination needs to have their source
427 // changed to reflect the state of affairs after the swap.
428 for (int i = 0; i < moves_.length(); ++i) {
429 LMoveOperands other_move = moves_[i];
430 if (other_move.Blocks(source)) {
431 moves_[i].set_source(destination);
432 } else if (other_move.Blocks(destination)) {
433 moves_[i].set_source(source);
434 }
435 }
436
437 // In addition to swapping the actual uses as sources, we need to update
438 // the use counts.
439 if (source->IsRegister() && destination->IsRegister()) {
440 int temp = source_uses_[source->index()];
441 source_uses_[source->index()] = source_uses_[destination->index()];
442 source_uses_[destination->index()] = temp;
443 } else if (source->IsRegister()) {
444 // We don't have use counts for non-register operands like destination.
445 // Compute those counts now.
446 source_uses_[source->index()] = CountSourceUses(source);
447 } else if (destination->IsRegister()) {
448 source_uses_[destination->index()] = CountSourceUses(destination);
449 }
450}
451
452#undef __
453
454} // namespace internal
455} // namespace v8
456
457#endif // V8_TARGET_ARCH_X87