blob: 461e56902313cdf0b962cc9386471032ac60386d [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_S390_CODE_STUBS_S390_H_
6#define V8_S390_CODE_STUBS_S390_H_
7
8#include "src/s390/frames-s390.h"
9
10namespace v8 {
11namespace internal {
12
13void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
14
15class StringHelper : public AllStatic {
16 public:
17 // Generate code for copying a large number of characters. This function
18 // is allowed to spend extra time setting up conditions to make copying
19 // faster. Copying of overlapping regions is not supported.
20 // Dest register ends at the position after the last character written.
21 static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
22 Register src, Register count,
23 Register scratch,
24 String::Encoding encoding);
25
26 // Compares two flat one-byte strings and returns result in r0.
27 static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
28 Register left, Register right,
29 Register scratch1,
30 Register scratch2,
31 Register scratch3);
32
33 // Compares two flat one-byte strings for equality and returns result in r0.
34 static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
35 Register left, Register right,
36 Register scratch1,
37 Register scratch2);
38
39 private:
40 static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
41 Register left, Register right,
42 Register length,
43 Register scratch1,
44 Label* chars_not_equal);
45
46 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
47};
48
49class StoreRegistersStateStub : public PlatformCodeStub {
50 public:
51 explicit StoreRegistersStateStub(Isolate* isolate)
52 : PlatformCodeStub(isolate) {}
53
54 static void GenerateAheadOfTime(Isolate* isolate);
55
56 private:
57 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
58 DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
59};
60
61class RestoreRegistersStateStub : public PlatformCodeStub {
62 public:
63 explicit RestoreRegistersStateStub(Isolate* isolate)
64 : PlatformCodeStub(isolate) {}
65
66 static void GenerateAheadOfTime(Isolate* isolate);
67
68 private:
69 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
70 DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
71};
72
73class RecordWriteStub : public PlatformCodeStub {
74 public:
75 RecordWriteStub(Isolate* isolate, Register object, Register value,
76 Register address, RememberedSetAction remembered_set_action,
77 SaveFPRegsMode fp_mode)
78 : PlatformCodeStub(isolate),
79 regs_(object, // An input reg.
80 address, // An input reg.
81 value) { // One scratch reg.
82 minor_key_ = ObjectBits::encode(object.code()) |
83 ValueBits::encode(value.code()) |
84 AddressBits::encode(address.code()) |
85 RememberedSetActionBits::encode(remembered_set_action) |
86 SaveFPRegsModeBits::encode(fp_mode);
87 }
88
89 RecordWriteStub(uint32_t key, Isolate* isolate)
90 : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
91
92 enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
93
94 bool SometimesSetsUpAFrame() override { return false; }
95
96 // Patch an always taken branch into a NOP branch
97 static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
98 int32_t instrLen = masm->instr_length_at(pos);
99 DCHECK(instrLen == 4 || instrLen == 6);
100
101 if (instrLen == 4) {
102 // BRC - Branch Mask @ Bits 23-20
103 FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
104 masm->instr_at_put<FourByteInstr>(
105 pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
106 } else {
107 // BRCL - Branch Mask @ Bits 39-36
108 SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
109 masm->instr_at_put<SixByteInstr>(
110 pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
111 }
112 }
113
114 static bool isBranchNop(SixByteInstr instr, int instrLength) {
115 if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
116 // BRC - Check for 0x0 mask condition.
117 (6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
118 // BRCL - Check for 0x0 mask condition
119 return true;
120 }
121 return false;
122 }
123
124 static Mode GetMode(Code* stub) {
125 int32_t first_instr_length =
126 Instruction::InstructionLength(stub->instruction_start());
127 int32_t second_instr_length = Instruction::InstructionLength(
128 stub->instruction_start() + first_instr_length);
129
130 uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
131 uint64_t second_instr =
132 Assembler::instr_at(stub->instruction_start() + first_instr_length);
133
134 DCHECK(first_instr_length == 4 || first_instr_length == 6);
135 DCHECK(second_instr_length == 4 || second_instr_length == 6);
136
137 bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
138 bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
139
140 // STORE_BUFFER_ONLY has NOP on both branches
141 if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
142 // INCREMENTAL_COMPACTION has NOP on second branch.
143 else if (isFirstInstrNOP && !isSecondInstrNOP)
144 return INCREMENTAL_COMPACTION;
145 // INCREMENTAL has NOP on first branch.
146 else if (!isFirstInstrNOP && isSecondInstrNOP)
147 return INCREMENTAL;
148
149 DCHECK(false);
150 return STORE_BUFFER_ONLY;
151 }
152
153 static void Patch(Code* stub, Mode mode) {
154 MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
155 stub->instruction_size(), CodeObjectRequired::kNo);
156
157 // Get instruction lengths of two branches
158 int32_t first_instr_length = masm.instr_length_at(0);
159 int32_t second_instr_length = masm.instr_length_at(first_instr_length);
160
161 switch (mode) {
162 case STORE_BUFFER_ONLY:
163 DCHECK(GetMode(stub) == INCREMENTAL ||
164 GetMode(stub) == INCREMENTAL_COMPACTION);
165
166 PatchBranchCondMask(&masm, 0, CC_NOP);
167 PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
168 break;
169 case INCREMENTAL:
170 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
171 PatchBranchCondMask(&masm, 0, CC_ALWAYS);
172 break;
173 case INCREMENTAL_COMPACTION:
174 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
175 PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
176 break;
177 }
178 DCHECK(GetMode(stub) == mode);
179 Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
180 first_instr_length + second_instr_length);
181 }
182
183 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
184
185 private:
186 // This is a helper class for freeing up 3 scratch registers. The input is
187 // two registers that must be preserved and one scratch register provided by
188 // the caller.
189 class RegisterAllocation {
190 public:
191 RegisterAllocation(Register object, Register address, Register scratch0)
192 : object_(object), address_(address), scratch0_(scratch0) {
193 DCHECK(!AreAliased(scratch0, object, address, no_reg));
194 scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
195 }
196
197 void Save(MacroAssembler* masm) {
198 DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
199 // We don't have to save scratch0_ because it was given to us as
200 // a scratch register.
201 masm->push(scratch1_);
202 }
203
204 void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
205
206 // If we have to call into C then we need to save and restore all caller-
207 // saved registers that were not already preserved. The scratch registers
208 // will be restored by other means so we don't bother pushing them here.
209 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
210 masm->push(r14);
211 masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
212 if (mode == kSaveFPRegs) {
213 // Save all volatile FP registers except d0.
214 masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
215 }
216 }
217
218 inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
219 SaveFPRegsMode mode) {
220 if (mode == kSaveFPRegs) {
221 // Restore all volatile FP registers except d0.
222 masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
223 }
224 masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
225 masm->pop(r14);
226 }
227
228 inline Register object() { return object_; }
229 inline Register address() { return address_; }
230 inline Register scratch0() { return scratch0_; }
231 inline Register scratch1() { return scratch1_; }
232
233 private:
234 Register object_;
235 Register address_;
236 Register scratch0_;
237 Register scratch1_;
238
239 friend class RecordWriteStub;
240 };
241
242 enum OnNoNeedToInformIncrementalMarker {
243 kReturnOnNoNeedToInformIncrementalMarker,
244 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
245 };
246
247 inline Major MajorKey() const final { return RecordWrite; }
248
249 void Generate(MacroAssembler* masm) override;
250 void GenerateIncremental(MacroAssembler* masm, Mode mode);
251 void CheckNeedsToInformIncrementalMarker(
252 MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
253 Mode mode);
254 void InformIncrementalMarker(MacroAssembler* masm);
255
256 void Activate(Code* code) override {
257 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
258 }
259
260 Register object() const {
261 return Register::from_code(ObjectBits::decode(minor_key_));
262 }
263
264 Register value() const {
265 return Register::from_code(ValueBits::decode(minor_key_));
266 }
267
268 Register address() const {
269 return Register::from_code(AddressBits::decode(minor_key_));
270 }
271
272 RememberedSetAction remembered_set_action() const {
273 return RememberedSetActionBits::decode(minor_key_);
274 }
275
276 SaveFPRegsMode save_fp_regs_mode() const {
277 return SaveFPRegsModeBits::decode(minor_key_);
278 }
279
280 class ObjectBits : public BitField<int, 0, 4> {};
281 class ValueBits : public BitField<int, 4, 4> {};
282 class AddressBits : public BitField<int, 8, 4> {};
283 class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
284 };
285 class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
286
287 Label slow_;
288 RegisterAllocation regs_;
289
290 DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
291};
292
293// Trampoline stub to call into native code. To call safely into native code
294// in the presence of compacting GC (which can move code objects) we need to
295// keep the code which called into native pinned in the memory. Currently the
296// simplest approach is to generate such stub early enough so it can never be
297// moved by GC
298class DirectCEntryStub : public PlatformCodeStub {
299 public:
300 explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
301 void GenerateCall(MacroAssembler* masm, Register target);
302
303 private:
304 bool NeedsImmovableCode() override { return true; }
305
306 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
307 DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
308};
309
310class NameDictionaryLookupStub : public PlatformCodeStub {
311 public:
312 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
313
314 NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
315 : PlatformCodeStub(isolate) {
316 minor_key_ = LookupModeBits::encode(mode);
317 }
318
319 static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
320 Label* done, Register receiver,
321 Register properties, Handle<Name> name,
322 Register scratch0);
323
324 static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
325 Label* done, Register elements,
326 Register name, Register r0, Register r1);
327
328 bool SometimesSetsUpAFrame() override { return false; }
329
330 private:
331 static const int kInlinedProbes = 4;
332 static const int kTotalProbes = 20;
333
334 static const int kCapacityOffset =
335 NameDictionary::kHeaderSize +
336 NameDictionary::kCapacityIndex * kPointerSize;
337
338 static const int kElementsStartOffset =
339 NameDictionary::kHeaderSize +
340 NameDictionary::kElementsStartIndex * kPointerSize;
341
342 LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
343
344 class LookupModeBits : public BitField<LookupMode, 0, 1> {};
345
346 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
347 DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
348};
349
350class FloatingPointHelper : public AllStatic {
351 public:
352 enum Destination { kFPRegisters, kCoreRegisters };
353
354 // Loads smis from r0 and r1 (right and left in binary operations) into
355 // floating point registers. Depending on the destination the values ends up
356 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
357 // floating point registers VFP3 must be supported. If core registers are
358 // requested when VFP3 is supported d6 and d7 will be scratched.
359 static void LoadSmis(MacroAssembler* masm, Register scratch1,
360 Register scratch2);
361
362 // Loads objects from r0 and r1 (right and left in binary operations) into
363 // floating point registers. Depending on the destination the values ends up
364 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
365 // floating point registers VFP3 must be supported. If core registers are
366 // requested when VFP3 is supported d6 and d7 will still be scratched. If
367 // either r0 or r1 is not a number (not smi and not heap number object) the
368 // not_number label is jumped to with r0 and r1 intact.
369 static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
370 Register scratch1, Register scratch2,
371 Label* not_number);
372
373 // Convert the smi or heap number in object to an int32 using the rules
374 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
375 // and brought into the range -2^31 .. +2^31 - 1.
376 static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
377 Register dst, Register heap_number_map,
378 Register scratch1, Register scratch2,
379 Register scratch3,
380 DoubleRegister double_scratch,
381 Label* not_int32);
382
383 // Converts the integer (untagged smi) in |src| to a double, storing
384 // the result to |double_dst|
385 static void ConvertIntToDouble(MacroAssembler* masm, Register src,
386 DoubleRegister double_dst);
387
388 // Converts the unsigned integer (untagged smi) in |src| to
389 // a double, storing the result to |double_dst|
390 static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
391 DoubleRegister double_dst);
392
393 // Converts the integer (untagged smi) in |src| to
394 // a float, storing the result in |dst|
395 static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
396 const Register src);
397
398 // Load the number from object into double_dst in the double format.
399 // Control will jump to not_int32 if the value cannot be exactly represented
400 // by a 32-bit integer.
401 // Floating point value in the 32-bit integer range that are not exact integer
402 // won't be loaded.
403 static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
404 DoubleRegister double_dst,
405 DoubleRegister double_scratch,
406 Register heap_number_map,
407 Register scratch1, Register scratch2,
408 Label* not_int32);
409
410 // Loads the number from object into dst as a 32-bit integer.
411 // Control will jump to not_int32 if the object cannot be exactly represented
412 // by a 32-bit integer.
413 // Floating point value in the 32-bit integer range that are not exact integer
414 // won't be converted.
415 // scratch3 is not used when VFP3 is supported.
416 static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
417 Register dst, Register heap_number_map,
418 Register scratch1, Register scratch2,
419 Register scratch3,
420 DoubleRegister double_scratch0,
421 DoubleRegister double_scratch1,
422 Label* not_int32);
423
424 // Generate non VFP3 code to check if a double can be exactly represented by a
425 // 32-bit integer. This does not check for 0 or -0, which need
426 // to be checked for separately.
427 // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
428 // through otherwise.
429 // src1 and src2 will be cloberred.
430 //
431 // Expected input:
432 // - src1: higher (exponent) part of the double value.
433 // - src2: lower (mantissa) part of the double value.
434 // Output status:
435 // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
436 // - src2: contains 1.
437 // - other registers are clobbered.
438 static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
439 Register src2, Register dst,
440 Register scratch, Label* not_int32);
441
442 // Generates code to call a C function to do a double operation using core
443 // registers. (Used when VFP3 is not supported.)
444 // This code never falls through, but returns with a heap number containing
445 // the result in r0.
446 // Register heapnumber_result must be a heap number in which the
447 // result of the operation will be stored.
448 // Requires the following layout on entry:
449 // r0: Left value (least significant part of mantissa).
450 // r1: Left value (sign, exponent, top of mantissa).
451 // r2: Right value (least significant part of mantissa).
452 // r3: Right value (sign, exponent, top of mantissa).
453 static void CallCCodeForDoubleOperation(MacroAssembler* masm, Token::Value op,
454 Register heap_number_result,
455 Register scratch);
456
457 private:
458 static void LoadNumber(MacroAssembler* masm, Register object,
459 DoubleRegister dst, Register heap_number_map,
460 Register scratch1, Register scratch2,
461 Label* not_number);
462};
463
464} // namespace internal
465} // namespace v8
466
467#endif // V8_S390_CODE_STUBS_S390_H_