blob: 258ac40a2606c5e6c428600340993615deb6e1b8 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <stdlib.h>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000029#include <iostream> // NOLINT(readability/streams)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030
31#include "src/v8.h"
32#include "test/cctest/cctest.h"
33
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000034#include "src/base/utils/random-number-generator.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035#include "src/macro-assembler.h"
36#include "src/mips64/macro-assembler-mips64.h"
37#include "src/mips64/simulator-mips64.h"
38
39
40using namespace v8::internal;
41
42typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
Ben Murdochda12d292016-06-02 14:46:10 +010044typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000045
46#define __ masm->
47
48
49static byte to_non_zero(int n) {
50 return static_cast<unsigned>(n) % 255 + 1;
51}
52
53
54static bool all_zeroes(const byte* beg, const byte* end) {
55 CHECK(beg);
56 CHECK(beg <= end);
57 while (beg < end) {
58 if (*beg++ != 0)
59 return false;
60 }
61 return true;
62}
63
Ben Murdoch61f157c2016-09-16 13:49:30 +010064TEST(BYTESWAP) {
65 DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
66 CcTest::InitializeVM();
67 Isolate* isolate = CcTest::i_isolate();
68 HandleScope scope(isolate);
69
70 struct T {
71 int64_t r1;
72 int64_t r2;
73 int64_t r3;
74 int64_t r4;
75 int64_t r5;
76 int64_t r6;
77 int64_t r7;
78 };
79 T t;
80
81 MacroAssembler assembler(isolate, NULL, 0,
82 v8::internal::CodeObjectRequired::kYes);
83
84 MacroAssembler* masm = &assembler;
85
86 __ ld(a4, MemOperand(a0, offsetof(T, r1)));
87 __ nop();
88 __ ByteSwapSigned(a4, 8);
89 __ sd(a4, MemOperand(a0, offsetof(T, r1)));
90
91 __ ld(a4, MemOperand(a0, offsetof(T, r2)));
92 __ nop();
93 __ ByteSwapSigned(a4, 4);
94 __ sd(a4, MemOperand(a0, offsetof(T, r2)));
95
96 __ ld(a4, MemOperand(a0, offsetof(T, r3)));
97 __ nop();
98 __ ByteSwapSigned(a4, 2);
99 __ sd(a4, MemOperand(a0, offsetof(T, r3)));
100
101 __ ld(a4, MemOperand(a0, offsetof(T, r4)));
102 __ nop();
103 __ ByteSwapSigned(a4, 1);
104 __ sd(a4, MemOperand(a0, offsetof(T, r4)));
105
106 __ ld(a4, MemOperand(a0, offsetof(T, r5)));
107 __ nop();
108 __ ByteSwapUnsigned(a4, 1);
109 __ sd(a4, MemOperand(a0, offsetof(T, r5)));
110
111 __ ld(a4, MemOperand(a0, offsetof(T, r6)));
112 __ nop();
113 __ ByteSwapUnsigned(a4, 2);
114 __ sd(a4, MemOperand(a0, offsetof(T, r6)));
115
116 __ ld(a4, MemOperand(a0, offsetof(T, r7)));
117 __ nop();
118 __ ByteSwapUnsigned(a4, 4);
119 __ sd(a4, MemOperand(a0, offsetof(T, r7)));
120
121 __ jr(ra);
122 __ nop();
123
124 CodeDesc desc;
125 masm->GetCode(&desc);
126 Handle<Code> code = isolate->factory()->NewCode(
127 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
128 ::F3 f = FUNCTION_CAST<::F3>(code->entry());
129 t.r1 = 0x5612FFCD9D327ACC;
130 t.r2 = 0x781A15C3;
131 t.r3 = 0xFCDE;
132 t.r4 = 0x9F;
133 t.r5 = 0x9F;
134 t.r6 = 0xFCDE;
135 t.r7 = 0xC81A15C3;
136 Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
137 USE(dummy);
138
139 CHECK_EQ(static_cast<int64_t>(0xCC7A329DCDFF1256), t.r1);
140 CHECK_EQ(static_cast<int64_t>(0xC3151A7800000000), t.r2);
141 CHECK_EQ(static_cast<int64_t>(0xDEFCFFFFFFFFFFFF), t.r3);
142 CHECK_EQ(static_cast<int64_t>(0x9FFFFFFFFFFFFFFF), t.r4);
143 CHECK_EQ(static_cast<int64_t>(0x9F00000000000000), t.r5);
144 CHECK_EQ(static_cast<int64_t>(0xDEFC000000000000), t.r6);
145 CHECK_EQ(static_cast<int64_t>(0xC3151AC800000000), t.r7);
146}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000147
148TEST(CopyBytes) {
149 CcTest::InitializeVM();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000150 Isolate* isolate = CcTest::i_isolate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000151 HandleScope handles(isolate);
152
153 const int data_size = 1 * KB;
154 size_t act_size;
155
156 // Allocate two blocks to copy data between.
157 byte* src_buffer =
158 static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
159 CHECK(src_buffer);
160 CHECK(act_size >= static_cast<size_t>(data_size));
161 byte* dest_buffer =
162 static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
163 CHECK(dest_buffer);
164 CHECK(act_size >= static_cast<size_t>(data_size));
165
166 // Storage for a0 and a1.
167 byte* a0_;
168 byte* a1_;
169
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000170 MacroAssembler assembler(isolate, NULL, 0,
171 v8::internal::CodeObjectRequired::kYes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000172 MacroAssembler* masm = &assembler;
173
174 // Code to be generated: The stuff in CopyBytes followed by a store of a0 and
175 // a1, respectively.
176 __ CopyBytes(a0, a1, a2, a3);
177 __ li(a2, Operand(reinterpret_cast<int64_t>(&a0_)));
178 __ li(a3, Operand(reinterpret_cast<int64_t>(&a1_)));
179 __ sd(a0, MemOperand(a2));
180 __ jr(ra);
181 __ sd(a1, MemOperand(a3));
182
183 CodeDesc desc;
184 masm->GetCode(&desc);
185 Handle<Code> code = isolate->factory()->NewCode(
186 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
187
188 ::F f = FUNCTION_CAST< ::F>(code->entry());
189
190 // Initialise source data with non-zero bytes.
191 for (int i = 0; i < data_size; i++) {
192 src_buffer[i] = to_non_zero(i);
193 }
194
195 const int fuzz = 11;
196
197 for (int size = 0; size < 600; size++) {
198 for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
199 for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
200 memset(dest_buffer, 0, data_size);
201 CHECK(dest + size < dest_buffer + data_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000202 (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(src),
203 reinterpret_cast<int64_t>(dest), size, 0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000204 // a0 and a1 should point at the first byte after the copied data.
205 CHECK_EQ(src + size, a0_);
206 CHECK_EQ(dest + size, a1_);
207 // Check that we haven't written outside the target area.
208 CHECK(all_zeroes(dest_buffer, dest));
209 CHECK(all_zeroes(dest + size, dest_buffer + data_size));
210 // Check the target area.
211 CHECK_EQ(0, memcmp(src, dest, size));
212 }
213 }
214 }
215
216 // Check that the source data hasn't been clobbered.
217 for (int i = 0; i < data_size; i++) {
218 CHECK(src_buffer[i] == to_non_zero(i));
219 }
220}
221
222
223TEST(LoadConstants) {
224 CcTest::InitializeVM();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000225 Isolate* isolate = CcTest::i_isolate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000226 HandleScope handles(isolate);
227
228 int64_t refConstants[64];
229 int64_t result[64];
230
231 int64_t mask = 1;
232 for (int i = 0; i < 64; i++) {
233 refConstants[i] = ~(mask << i);
234 }
235
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000236 MacroAssembler assembler(isolate, NULL, 0,
237 v8::internal::CodeObjectRequired::kYes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000238 MacroAssembler* masm = &assembler;
239
240 __ mov(a4, a0);
241 for (int i = 0; i < 64; i++) {
242 // Load constant.
243 __ li(a5, Operand(refConstants[i]));
244 __ sd(a5, MemOperand(a4));
245 __ Daddu(a4, a4, Operand(kPointerSize));
246 }
247
248 __ jr(ra);
249 __ nop();
250
251 CodeDesc desc;
252 masm->GetCode(&desc);
253 Handle<Code> code = isolate->factory()->NewCode(
254 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
255
256 ::F f = FUNCTION_CAST< ::F>(code->entry());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000257 (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(result), 0, 0,
258 0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000259 // Check results.
260 for (int i = 0; i < 64; i++) {
261 CHECK(refConstants[i] == result[i]);
262 }
263}
264
265
266TEST(LoadAddress) {
267 CcTest::InitializeVM();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000268 Isolate* isolate = CcTest::i_isolate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269 HandleScope handles(isolate);
270
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000271 MacroAssembler assembler(isolate, NULL, 0,
272 v8::internal::CodeObjectRequired::kYes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000273 MacroAssembler* masm = &assembler;
274 Label to_jump, skip;
275 __ mov(a4, a0);
276
277 __ Branch(&skip);
278 __ bind(&to_jump);
279 __ nop();
280 __ nop();
281 __ jr(ra);
282 __ nop();
283 __ bind(&skip);
284 __ li(a4, Operand(masm->jump_address(&to_jump)), ADDRESS_LOAD);
285 int check_size = masm->InstructionsGeneratedSince(&skip);
286 CHECK_EQ(check_size, 4);
287 __ jr(a4);
288 __ nop();
289 __ stop("invalid");
290 __ stop("invalid");
291 __ stop("invalid");
292 __ stop("invalid");
293 __ stop("invalid");
294
295
296 CodeDesc desc;
297 masm->GetCode(&desc);
298 Handle<Code> code = isolate->factory()->NewCode(
299 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
300
301 ::F f = FUNCTION_CAST< ::F>(code->entry());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000302 (void)CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000303 // Check results.
304}
305
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000306
307TEST(jump_tables4) {
308 // Similar to test-assembler-mips jump_tables1, with extra test for branch
309 // trampoline required before emission of the dd table (where trampolines are
310 // blocked), and proper transition to long-branch mode.
311 // Regression test for v8:4294.
312 CcTest::InitializeVM();
313 Isolate* isolate = CcTest::i_isolate();
314 HandleScope scope(isolate);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100315 MacroAssembler assembler(isolate, nullptr, 0,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000316 v8::internal::CodeObjectRequired::kYes);
317 MacroAssembler* masm = &assembler;
318
319 const int kNumCases = 512;
320 int values[kNumCases];
321 isolate->random_number_generator()->NextBytes(values, sizeof(values));
322 Label labels[kNumCases];
Ben Murdoch097c5b22016-05-18 11:27:45 +0100323 Label near_start, end, done;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000324
Ben Murdoch097c5b22016-05-18 11:27:45 +0100325 __ Push(ra);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000326 __ mov(v0, zero_reg);
327
328 __ Branch(&end);
329 __ bind(&near_start);
330
331 // Generate slightly less than 32K instructions, which will soon require
332 // trampoline for branch distance fixup.
333 for (int i = 0; i < 32768 - 256; ++i) {
334 __ addiu(v0, v0, 1);
335 }
336
Ben Murdoch097c5b22016-05-18 11:27:45 +0100337 __ GenerateSwitchTable(a0, kNumCases,
338 [&labels](size_t i) { return labels + i; });
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000339
340 for (int i = 0; i < kNumCases; ++i) {
341 __ bind(&labels[i]);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100342 __ li(v0, values[i]);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000343 __ Branch(&done);
344 }
345
346 __ bind(&done);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100347 __ Pop(ra);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000348 __ jr(ra);
349 __ nop();
350
351 __ bind(&end);
352 __ Branch(&near_start);
353
354 CodeDesc desc;
355 masm->GetCode(&desc);
356 Handle<Code> code = isolate->factory()->NewCode(
357 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
358#ifdef OBJECT_PRINT
359 code->Print(std::cout);
360#endif
361 F1 f = FUNCTION_CAST<F1>(code->entry());
362 for (int i = 0; i < kNumCases; ++i) {
363 int64_t res = reinterpret_cast<int64_t>(
364 CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
365 ::printf("f(%d) = %" PRId64 "\n", i, res);
366 CHECK_EQ(values[i], res);
367 }
368}
369
370
371TEST(jump_tables5) {
372 if (kArchVariant != kMips64r6) return;
373
374 // Similar to test-assembler-mips jump_tables1, with extra test for emitting a
375 // compact branch instruction before emission of the dd table.
376 CcTest::InitializeVM();
377 Isolate* isolate = CcTest::i_isolate();
378 HandleScope scope(isolate);
379 MacroAssembler assembler(isolate, nullptr, 0,
380 v8::internal::CodeObjectRequired::kYes);
381 MacroAssembler* masm = &assembler;
382
383 const int kNumCases = 512;
384 int values[kNumCases];
385 isolate->random_number_generator()->NextBytes(values, sizeof(values));
386 Label labels[kNumCases];
387 Label done;
388
Ben Murdoch097c5b22016-05-18 11:27:45 +0100389 __ Push(ra);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000390
Ben Murdoch097c5b22016-05-18 11:27:45 +0100391 // Opposite of Align(8) as we have unaligned number of instructions in the
392 // following block before the first dd().
393 if ((masm->pc_offset() & 7) == 0) {
394 __ nop();
395 }
396
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000397 {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100398 __ BlockTrampolinePoolFor(kNumCases * 2 + 6 + 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000399 PredictableCodeSizeScope predictable(
Ben Murdoch097c5b22016-05-18 11:27:45 +0100400 masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000401
Ben Murdoch097c5b22016-05-18 11:27:45 +0100402 __ addiupc(at, 6 + 1);
Ben Murdochda12d292016-06-02 14:46:10 +0100403 __ Dlsa(at, at, a0, 3);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100404 __ ld(at, MemOperand(at));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000405 __ jalr(at);
406 __ nop(); // Branch delay slot nop.
407 __ bc(&done);
408 // A nop instruction must be generated by the forbidden slot guard
409 // (Assembler::dd(Label*)) so the first label goes to an 8 bytes aligned
410 // location.
411 for (int i = 0; i < kNumCases; ++i) {
412 __ dd(&labels[i]);
413 }
414 }
415
416 for (int i = 0; i < kNumCases; ++i) {
417 __ bind(&labels[i]);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100418 __ li(v0, values[i]);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419 __ jr(ra);
420 __ nop();
421 }
422
423 __ bind(&done);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100424 __ Pop(ra);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000425 __ jr(ra);
426 __ nop();
427
428 CodeDesc desc;
429 masm->GetCode(&desc);
430 Handle<Code> code = isolate->factory()->NewCode(
431 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
432#ifdef OBJECT_PRINT
433 code->Print(std::cout);
434#endif
435 F1 f = FUNCTION_CAST<F1>(code->entry());
436 for (int i = 0; i < kNumCases; ++i) {
437 int64_t res = reinterpret_cast<int64_t>(
438 CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
439 ::printf("f(%d) = %" PRId64 "\n", i, res);
440 CHECK_EQ(values[i], res);
441 }
442}
443
444
445static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
446 Isolate* isolate = CcTest::i_isolate();
447 HandleScope scope(isolate);
448 MacroAssembler assembler(isolate, nullptr, 0,
449 v8::internal::CodeObjectRequired::kYes);
450 MacroAssembler* masm = &assembler;
451
452 __ Lsa(v0, a0, a1, sa);
453 __ jr(ra);
454 __ nop();
455
456 CodeDesc desc;
457 assembler.GetCode(&desc);
458 Handle<Code> code = isolate->factory()->NewCode(
459 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
460
461 F1 f = FUNCTION_CAST<F1>(code->entry());
462
463 uint64_t res = reinterpret_cast<uint64_t>(
464 CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
465
466 return res;
467}
468
469
470TEST(Lsa) {
471 CcTest::InitializeVM();
472 struct TestCaseLsa {
473 int32_t rt;
474 int32_t rs;
475 uint8_t sa;
476 uint64_t expected_res;
477 };
478
479 struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
480 {0x4, 0x1, 1, 0x6},
481 {0x4, 0x1, 2, 0x8},
482 {0x4, 0x1, 3, 0xc},
483 {0x4, 0x1, 4, 0x14},
484 {0x4, 0x1, 5, 0x24},
485 {0x0, 0x1, 1, 0x2},
486 {0x0, 0x1, 2, 0x4},
487 {0x0, 0x1, 3, 0x8},
488 {0x0, 0x1, 4, 0x10},
489 {0x0, 0x1, 5, 0x20},
490 {0x4, 0x0, 1, 0x4},
491 {0x4, 0x0, 2, 0x4},
492 {0x4, 0x0, 3, 0x4},
493 {0x4, 0x0, 4, 0x4},
494 {0x4, 0x0, 5, 0x4},
495
496 // Shift overflow.
497 {0x4, INT32_MAX, 1, 0x2},
498 {0x4, INT32_MAX >> 1, 2, 0x0},
499 {0x4, INT32_MAX >> 2, 3, 0xfffffffffffffffc},
500 {0x4, INT32_MAX >> 3, 4, 0xfffffffffffffff4},
501 {0x4, INT32_MAX >> 4, 5, 0xffffffffffffffe4},
502
503 // Signed addition overflow.
504 {INT32_MAX - 1, 0x1, 1, 0xffffffff80000000},
505 {INT32_MAX - 3, 0x1, 2, 0xffffffff80000000},
506 {INT32_MAX - 7, 0x1, 3, 0xffffffff80000000},
507 {INT32_MAX - 15, 0x1, 4, 0xffffffff80000000},
508 {INT32_MAX - 31, 0x1, 5, 0xffffffff80000000},
509
510 // Addition overflow.
511 {-2, 0x1, 1, 0x0},
512 {-4, 0x1, 2, 0x0},
513 {-8, 0x1, 3, 0x0},
514 {-16, 0x1, 4, 0x0},
515 {-32, 0x1, 5, 0x0}};
516
517 size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
518 for (size_t i = 0; i < nr_test_cases; ++i) {
519 uint64_t res = run_lsa(tc[i].rt, tc[i].rs, tc[i].sa);
520 PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Lsa(v0, %x, %x, %hhu)\n",
521 tc[i].expected_res, res, tc[i].rt, tc[i].rs, tc[i].sa);
522 CHECK_EQ(tc[i].expected_res, res);
523 }
524}
525
526
527static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
528 Isolate* isolate = CcTest::i_isolate();
529 HandleScope scope(isolate);
530 MacroAssembler assembler(isolate, nullptr, 0,
531 v8::internal::CodeObjectRequired::kYes);
532 MacroAssembler* masm = &assembler;
533
534 __ Dlsa(v0, a0, a1, sa);
535 __ jr(ra);
536 __ nop();
537
538 CodeDesc desc;
539 assembler.GetCode(&desc);
540 Handle<Code> code = isolate->factory()->NewCode(
541 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
542
543 ::F f = FUNCTION_CAST<::F>(code->entry());
544
545 uint64_t res = reinterpret_cast<uint64_t>(
546 CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
547
548 return res;
549}
550
551
552TEST(Dlsa) {
553 CcTest::InitializeVM();
554 struct TestCaseLsa {
555 int64_t rt;
556 int64_t rs;
557 uint8_t sa;
558 uint64_t expected_res;
559 };
560
561 struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
562 {0x4, 0x1, 1, 0x6},
563 {0x4, 0x1, 2, 0x8},
564 {0x4, 0x1, 3, 0xc},
565 {0x4, 0x1, 4, 0x14},
566 {0x4, 0x1, 5, 0x24},
567 {0x0, 0x1, 1, 0x2},
568 {0x0, 0x1, 2, 0x4},
569 {0x0, 0x1, 3, 0x8},
570 {0x0, 0x1, 4, 0x10},
571 {0x0, 0x1, 5, 0x20},
572 {0x4, 0x0, 1, 0x4},
573 {0x4, 0x0, 2, 0x4},
574 {0x4, 0x0, 3, 0x4},
575 {0x4, 0x0, 4, 0x4},
576 {0x4, 0x0, 5, 0x4},
577
578 // Shift overflow.
579 {0x4, INT64_MAX, 1, 0x2},
580 {0x4, INT64_MAX >> 1, 2, 0x0},
581 {0x4, INT64_MAX >> 2, 3, 0xfffffffffffffffc},
582 {0x4, INT64_MAX >> 3, 4, 0xfffffffffffffff4},
583 {0x4, INT64_MAX >> 4, 5, 0xffffffffffffffe4},
584
585 // Signed addition overflow.
586 {INT64_MAX - 1, 0x1, 1, 0x8000000000000000},
587 {INT64_MAX - 3, 0x1, 2, 0x8000000000000000},
588 {INT64_MAX - 7, 0x1, 3, 0x8000000000000000},
589 {INT64_MAX - 15, 0x1, 4, 0x8000000000000000},
590 {INT64_MAX - 31, 0x1, 5, 0x8000000000000000},
591
592 // Addition overflow.
593 {-2, 0x1, 1, 0x0},
594 {-4, 0x1, 2, 0x0},
595 {-8, 0x1, 3, 0x0},
596 {-16, 0x1, 4, 0x0},
597 {-32, 0x1, 5, 0x0}};
598
599 size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
600 for (size_t i = 0; i < nr_test_cases; ++i) {
601 uint64_t res = run_dlsa(tc[i].rt, tc[i].rs, tc[i].sa);
602 PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Dlsa(v0, %" PRIx64 ", %" PRIx64
603 ", %hhu)\n",
604 tc[i].expected_res, res, tc[i].rt, tc[i].rs, tc[i].sa);
605 CHECK_EQ(tc[i].expected_res, res);
606 }
607}
608
Ben Murdochc5610432016-08-08 18:44:38 +0100609static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
Ben Murdochda12d292016-06-02 14:46:10 +0100610 static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
611 0x7fffffff, 0x80000000, 0x80000001,
612 0x80ffff00, 0x8fffffff, 0xffffffff};
613 return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
614}
615
Ben Murdochc5610432016-08-08 18:44:38 +0100616static const std::vector<int32_t> cvt_trunc_int32_test_values() {
Ben Murdochda12d292016-06-02 14:46:10 +0100617 static const int32_t kValues[] = {
618 static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
619 static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
620 static_cast<int32_t>(0x80000000), static_cast<int32_t>(0x80000001),
621 static_cast<int32_t>(0x80ffff00), static_cast<int32_t>(0x8fffffff),
622 static_cast<int32_t>(0xffffffff)};
623 return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
624}
625
Ben Murdochc5610432016-08-08 18:44:38 +0100626static const std::vector<uint64_t> cvt_trunc_uint64_test_values() {
Ben Murdochda12d292016-06-02 14:46:10 +0100627 static const uint64_t kValues[] = {
628 0x0000000000000000, 0x0000000000000001, 0x0000ffffffff0000,
629 0x7fffffffffffffff, 0x8000000000000000, 0x8000000000000001,
630 0x8000ffffffff0000, 0x8fffffffffffffff, 0xffffffffffffffff};
631 return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
632}
633
Ben Murdochc5610432016-08-08 18:44:38 +0100634static const std::vector<int64_t> cvt_trunc_int64_test_values() {
Ben Murdochda12d292016-06-02 14:46:10 +0100635 static const int64_t kValues[] = {static_cast<int64_t>(0x0000000000000000),
636 static_cast<int64_t>(0x0000000000000001),
637 static_cast<int64_t>(0x0000ffffffff0000),
638 static_cast<int64_t>(0x7fffffffffffffff),
639 static_cast<int64_t>(0x8000000000000000),
640 static_cast<int64_t>(0x8000000000000001),
641 static_cast<int64_t>(0x8000ffffffff0000),
642 static_cast<int64_t>(0x8fffffffffffffff),
643 static_cast<int64_t>(0xffffffffffffffff)};
644 return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
645}
646
647// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
Ben Murdochc5610432016-08-08 18:44:38 +0100648#define FOR_INPUTS(ctype, itype, var, test_vector) \
649 std::vector<ctype> var##_vec = test_vector(); \
Ben Murdochda12d292016-06-02 14:46:10 +0100650 for (std::vector<ctype>::iterator var = var##_vec.begin(); \
651 var != var##_vec.end(); ++var)
652
Ben Murdochc5610432016-08-08 18:44:38 +0100653#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
654 std::vector<ctype> var##_vec = test_vector(); \
655 std::vector<ctype>::iterator var; \
656 std::vector<ctype>::reverse_iterator var2; \
657 for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
658 var != var##_vec.end(); ++var, ++var2)
659
660#define FOR_ENUM_INPUTS(var, type, test_vector) \
661 FOR_INPUTS(enum type, type, var, test_vector)
662#define FOR_STRUCT_INPUTS(var, type, test_vector) \
663 FOR_INPUTS(struct type, type, var, test_vector)
664#define FOR_INT32_INPUTS(var, test_vector) \
665 FOR_INPUTS(int32_t, int32, var, test_vector)
666#define FOR_INT32_INPUTS2(var, var2, test_vector) \
667 FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
668#define FOR_INT64_INPUTS(var, test_vector) \
669 FOR_INPUTS(int64_t, int64, var, test_vector)
670#define FOR_UINT32_INPUTS(var, test_vector) \
671 FOR_INPUTS(uint32_t, uint32, var, test_vector)
672#define FOR_UINT64_INPUTS(var, test_vector) \
673 FOR_INPUTS(uint64_t, uint64, var, test_vector)
Ben Murdochda12d292016-06-02 14:46:10 +0100674
675template <typename RET_TYPE, typename IN_TYPE, typename Func>
676RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
677 typedef RET_TYPE (*F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
678
679 Isolate* isolate = CcTest::i_isolate();
680 HandleScope scope(isolate);
681 MacroAssembler assm(isolate, nullptr, 0,
682 v8::internal::CodeObjectRequired::kYes);
683 MacroAssembler* masm = &assm;
684
685 GenerateConvertInstructionFunc(masm);
686 __ dmfc1(v0, f2);
687 __ jr(ra);
688 __ nop();
689
690 CodeDesc desc;
691 assm.GetCode(&desc);
692 Handle<Code> code = isolate->factory()->NewCode(
693 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
694
695 F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
696
697 return reinterpret_cast<RET_TYPE>(
698 CALL_GENERATED_CODE(isolate, f, x, 0, 0, 0, 0));
699}
700
701TEST(Cvt_s_uw_Trunc_uw_s) {
702 CcTest::InitializeVM();
Ben Murdochc5610432016-08-08 18:44:38 +0100703 FOR_UINT32_INPUTS(i, cvt_trunc_uint32_test_values) {
Ben Murdochda12d292016-06-02 14:46:10 +0100704 uint32_t input = *i;
705 CHECK_EQ(static_cast<float>(input),
706 run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
707 __ Cvt_s_uw(f0, a0);
708 __ mthc1(zero_reg, f2);
709 __ Trunc_uw_s(f2, f0, f1);
710 }));
711 }
712}
713
714TEST(Cvt_s_ul_Trunc_ul_s) {
715 CcTest::InitializeVM();
Ben Murdochc5610432016-08-08 18:44:38 +0100716 FOR_UINT64_INPUTS(i, cvt_trunc_uint64_test_values) {
Ben Murdochda12d292016-06-02 14:46:10 +0100717 uint64_t input = *i;
718 CHECK_EQ(static_cast<float>(input),
719 run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
720 __ Cvt_s_ul(f0, a0);
721 __ Trunc_ul_s(f2, f0, f1, v0);
722 }));
723 }
724}
725
726TEST(Cvt_d_ul_Trunc_ul_d) {
727 CcTest::InitializeVM();
Ben Murdochc5610432016-08-08 18:44:38 +0100728 FOR_UINT64_INPUTS(i, cvt_trunc_uint64_test_values) {
Ben Murdochda12d292016-06-02 14:46:10 +0100729 uint64_t input = *i;
730 CHECK_EQ(static_cast<double>(input),
731 run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
732 __ Cvt_d_ul(f0, a0);
733 __ Trunc_ul_d(f2, f0, f1, v0);
734 }));
735 }
736}
737
738TEST(cvt_d_l_Trunc_l_d) {
739 CcTest::InitializeVM();
Ben Murdochc5610432016-08-08 18:44:38 +0100740 FOR_INT64_INPUTS(i, cvt_trunc_int64_test_values) {
Ben Murdochda12d292016-06-02 14:46:10 +0100741 int64_t input = *i;
742 CHECK_EQ(static_cast<double>(input),
743 run_Cvt<int64_t>(input, [](MacroAssembler* masm) {
744 __ dmtc1(a0, f4);
745 __ cvt_d_l(f0, f4);
746 __ Trunc_l_d(f2, f0);
747 }));
748 }
749}
750
751TEST(cvt_d_l_Trunc_l_ud) {
752 CcTest::InitializeVM();
Ben Murdochc5610432016-08-08 18:44:38 +0100753 FOR_INT64_INPUTS(i, cvt_trunc_int64_test_values) {
Ben Murdochda12d292016-06-02 14:46:10 +0100754 int64_t input = *i;
755 uint64_t abs_input = (input < 0) ? -input : input;
756 CHECK_EQ(static_cast<double>(abs_input),
757 run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
758 __ dmtc1(a0, f4);
759 __ cvt_d_l(f0, f4);
760 __ Trunc_l_ud(f2, f0, f6);
761 }));
762 }
763}
764
765TEST(cvt_d_w_Trunc_w_d) {
766 CcTest::InitializeVM();
Ben Murdochc5610432016-08-08 18:44:38 +0100767 FOR_INT32_INPUTS(i, cvt_trunc_int32_test_values) {
Ben Murdochda12d292016-06-02 14:46:10 +0100768 int32_t input = *i;
769 CHECK_EQ(static_cast<double>(input),
770 run_Cvt<int64_t>(input, [](MacroAssembler* masm) {
771 __ mtc1(a0, f4);
772 __ cvt_d_w(f0, f4);
773 __ Trunc_w_d(f2, f0);
774 __ mfc1(v1, f2);
775 __ dmtc1(v1, f2);
776 }));
777 }
778}
779
Ben Murdochc5610432016-08-08 18:44:38 +0100780static const std::vector<int32_t> overflow_int32_test_values() {
781 static const int32_t kValues[] = {
782 static_cast<int32_t>(0xf0000000), static_cast<int32_t>(0x00000001),
783 static_cast<int32_t>(0xff000000), static_cast<int32_t>(0x0000f000),
784 static_cast<int32_t>(0x0f000000), static_cast<int32_t>(0x991234ab),
785 static_cast<int32_t>(0xb0ffff01), static_cast<int32_t>(0x00006fff),
786 static_cast<int32_t>(0xffffffff)};
787 return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
788}
789
790static const std::vector<int64_t> overflow_int64_test_values() {
791 static const int64_t kValues[] = {static_cast<int64_t>(0xf000000000000000),
792 static_cast<int64_t>(0x0000000000000001),
793 static_cast<int64_t>(0xff00000000000000),
794 static_cast<int64_t>(0x0000f00111111110),
795 static_cast<int64_t>(0x0f00001000000000),
796 static_cast<int64_t>(0x991234ab12a96731),
797 static_cast<int64_t>(0xb0ffff0f0f0f0f01),
798 static_cast<int64_t>(0x00006fffffffffff),
799 static_cast<int64_t>(0xffffffffffffffff)};
800 return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
801}
802
803enum OverflowBranchType {
804 kAddBranchOverflow,
805 kSubBranchOverflow,
806};
807
808struct OverflowRegisterCombination {
809 Register dst;
810 Register left;
811 Register right;
812 Register scratch;
813};
814
815static const std::vector<enum OverflowBranchType> overflow_branch_type() {
816 static const enum OverflowBranchType kValues[] = {kAddBranchOverflow,
817 kSubBranchOverflow};
818 return std::vector<enum OverflowBranchType>(&kValues[0],
819 &kValues[arraysize(kValues)]);
820}
821
822static const std::vector<struct OverflowRegisterCombination>
823overflow_register_combination() {
824 static const struct OverflowRegisterCombination kValues[] = {
825 {t0, t1, t2, t3}, {t0, t0, t2, t3}, {t0, t1, t0, t3}, {t0, t1, t1, t3}};
826 return std::vector<struct OverflowRegisterCombination>(
827 &kValues[0], &kValues[arraysize(kValues)]);
828}
829
830template <typename T>
831static bool IsAddOverflow(T x, T y) {
832 DCHECK(std::numeric_limits<T>::is_integer);
833 T max = std::numeric_limits<T>::max();
834 T min = std::numeric_limits<T>::min();
835
836 return (x > 0 && y > (max - x)) || (x < 0 && y < (min - x));
837}
838
839template <typename T>
840static bool IsSubOverflow(T x, T y) {
841 DCHECK(std::numeric_limits<T>::is_integer);
842 T max = std::numeric_limits<T>::max();
843 T min = std::numeric_limits<T>::min();
844
845 return (y > 0 && x < (min + y)) || (y < 0 && x > (max + y));
846}
847
848template <typename IN_TYPE, typename Func>
849static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
850 Func GenerateOverflowInstructions) {
851 typedef int64_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
852
853 Isolate* isolate = CcTest::i_isolate();
854 HandleScope scope(isolate);
855 MacroAssembler assm(isolate, nullptr, 0,
856 v8::internal::CodeObjectRequired::kYes);
857 MacroAssembler* masm = &assm;
858
859 GenerateOverflowInstructions(masm, valLeft, valRight);
860 __ jr(ra);
861 __ nop();
862
863 CodeDesc desc;
864 assm.GetCode(&desc);
865 Handle<Code> code = isolate->factory()->NewCode(
866 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
867
868 F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
869
870 int64_t r =
871 reinterpret_cast<int64_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
872
873 DCHECK(r == 0 || r == 1);
874 return r;
875}
876
877TEST(BranchOverflowInt32BothLabels) {
878 FOR_INT32_INPUTS(i, overflow_int32_test_values) {
879 FOR_INT32_INPUTS(j, overflow_int32_test_values) {
880 FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
881 FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
882 overflow_register_combination) {
883 int32_t ii = *i;
884 int32_t jj = *j;
885 enum OverflowBranchType branchType = *br;
886 struct OverflowRegisterCombination rc = *regComb;
887
888 // If left and right register are same then left and right
889 // test values must also be same, otherwise we skip the test
890 if (rc.left.code() == rc.right.code()) {
891 if (ii != jj) {
892 continue;
893 }
894 }
895
896 bool res1 = runOverflow<int32_t>(
897 ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
898 int32_t valRight) {
899 Label overflow, no_overflow, end;
900 __ li(rc.left, valLeft);
901 __ li(rc.right, valRight);
902 switch (branchType) {
903 case kAddBranchOverflow:
904 __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
905 &no_overflow, rc.scratch);
906 break;
907 case kSubBranchOverflow:
908 __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
909 &no_overflow, rc.scratch);
910 break;
911 }
912 __ li(v0, 2);
913 __ Branch(&end);
914 __ bind(&overflow);
915 __ li(v0, 1);
916 __ Branch(&end);
917 __ bind(&no_overflow);
918 __ li(v0, 0);
919 __ bind(&end);
920 });
921
922 bool res2 = runOverflow<int32_t>(
923 ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
924 int32_t valRight) {
925 Label overflow, no_overflow, end;
926 __ li(rc.left, valLeft);
927 switch (branchType) {
928 case kAddBranchOverflow:
929 __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
930 &overflow, &no_overflow, rc.scratch);
931 break;
932 case kSubBranchOverflow:
933 __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
934 &overflow, &no_overflow, rc.scratch);
935 break;
936 }
937 __ li(v0, 2);
938 __ Branch(&end);
939 __ bind(&overflow);
940 __ li(v0, 1);
941 __ Branch(&end);
942 __ bind(&no_overflow);
943 __ li(v0, 0);
944 __ bind(&end);
945 });
946
947 switch (branchType) {
948 case kAddBranchOverflow:
949 CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
950 CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
951 break;
952 case kSubBranchOverflow:
953 CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
954 CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
955 break;
956 default:
957 UNREACHABLE();
958 }
959 }
960 }
961 }
962 }
963}
964
965TEST(BranchOverflowInt32LeftLabel) {
966 FOR_INT32_INPUTS(i, overflow_int32_test_values) {
967 FOR_INT32_INPUTS(j, overflow_int32_test_values) {
968 FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
969 FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
970 overflow_register_combination) {
971 int32_t ii = *i;
972 int32_t jj = *j;
973 enum OverflowBranchType branchType = *br;
974 struct OverflowRegisterCombination rc = *regComb;
975
976 // If left and right register are same then left and right
977 // test values must also be same, otherwise we skip the test
978 if (rc.left.code() == rc.right.code()) {
979 if (ii != jj) {
980 continue;
981 }
982 }
983
984 bool res1 = runOverflow<int32_t>(
985 ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
986 int32_t valRight) {
987 Label overflow, end;
988 __ li(rc.left, valLeft);
989 __ li(rc.right, valRight);
990 switch (branchType) {
991 case kAddBranchOverflow:
992 __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
993 rc.scratch);
994 break;
995 case kSubBranchOverflow:
996 __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
997 rc.scratch);
998 break;
999 }
1000 __ li(v0, 0);
1001 __ Branch(&end);
1002 __ bind(&overflow);
1003 __ li(v0, 1);
1004 __ bind(&end);
1005 });
1006
1007 bool res2 = runOverflow<int32_t>(
1008 ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
1009 int32_t valRight) {
1010 Label overflow, end;
1011 __ li(rc.left, valLeft);
1012 switch (branchType) {
1013 case kAddBranchOverflow:
1014 __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
1015 &overflow, NULL, rc.scratch);
1016 break;
1017 case kSubBranchOverflow:
1018 __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
1019 &overflow, NULL, rc.scratch);
1020 break;
1021 }
1022 __ li(v0, 0);
1023 __ Branch(&end);
1024 __ bind(&overflow);
1025 __ li(v0, 1);
1026 __ bind(&end);
1027 });
1028
1029 switch (branchType) {
1030 case kAddBranchOverflow:
1031 CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
1032 CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
1033 break;
1034 case kSubBranchOverflow:
1035 CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
1036 CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
1037 break;
1038 default:
1039 UNREACHABLE();
1040 }
1041 }
1042 }
1043 }
1044 }
1045}
1046
1047TEST(BranchOverflowInt32RightLabel) {
1048 FOR_INT32_INPUTS(i, overflow_int32_test_values) {
1049 FOR_INT32_INPUTS(j, overflow_int32_test_values) {
1050 FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
1051 FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
1052 overflow_register_combination) {
1053 int32_t ii = *i;
1054 int32_t jj = *j;
1055 enum OverflowBranchType branchType = *br;
1056 struct OverflowRegisterCombination rc = *regComb;
1057
1058 // If left and right register are same then left and right
1059 // test values must also be same, otherwise we skip the test
1060 if (rc.left.code() == rc.right.code()) {
1061 if (ii != jj) {
1062 continue;
1063 }
1064 }
1065
1066 bool res1 = runOverflow<int32_t>(
1067 ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
1068 int32_t valRight) {
1069 Label no_overflow, end;
1070 __ li(rc.left, valLeft);
1071 __ li(rc.right, valRight);
1072 switch (branchType) {
1073 case kAddBranchOverflow:
1074 __ AddBranchOvf(rc.dst, rc.left, rc.right, NULL,
1075 &no_overflow, rc.scratch);
1076 break;
1077 case kSubBranchOverflow:
1078 __ SubBranchOvf(rc.dst, rc.left, rc.right, NULL,
1079 &no_overflow, rc.scratch);
1080 break;
1081 }
1082 __ li(v0, 1);
1083 __ Branch(&end);
1084 __ bind(&no_overflow);
1085 __ li(v0, 0);
1086 __ bind(&end);
1087 });
1088
1089 bool res2 = runOverflow<int32_t>(
1090 ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
1091 int32_t valRight) {
1092 Label no_overflow, end;
1093 __ li(rc.left, valLeft);
1094 switch (branchType) {
1095 case kAddBranchOverflow:
1096 __ AddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
1097 &no_overflow, rc.scratch);
1098 break;
1099 case kSubBranchOverflow:
1100 __ SubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
1101 &no_overflow, rc.scratch);
1102 break;
1103 }
1104 __ li(v0, 1);
1105 __ Branch(&end);
1106 __ bind(&no_overflow);
1107 __ li(v0, 0);
1108 __ bind(&end);
1109 });
1110
1111 switch (branchType) {
1112 case kAddBranchOverflow:
1113 CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
1114 CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
1115 break;
1116 case kSubBranchOverflow:
1117 CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
1118 CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
1119 break;
1120 default:
1121 UNREACHABLE();
1122 }
1123 }
1124 }
1125 }
1126 }
1127}
1128
1129TEST(BranchOverflowInt64BothLabels) {
1130 FOR_INT64_INPUTS(i, overflow_int64_test_values) {
1131 FOR_INT64_INPUTS(j, overflow_int64_test_values) {
1132 FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
1133 FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
1134 overflow_register_combination) {
1135 int64_t ii = *i;
1136 int64_t jj = *j;
1137 enum OverflowBranchType branchType = *br;
1138 struct OverflowRegisterCombination rc = *regComb;
1139
1140 // If left and right register are same then left and right
1141 // test values must also be same, otherwise we skip the test
1142 if (rc.left.code() == rc.right.code()) {
1143 if (ii != jj) {
1144 continue;
1145 }
1146 }
1147
1148 bool res1 = runOverflow<int64_t>(
1149 ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
1150 int64_t valRight) {
1151 Label overflow, no_overflow, end;
1152 __ li(rc.left, valLeft);
1153 __ li(rc.right, valRight);
1154 switch (branchType) {
1155 case kAddBranchOverflow:
1156 __ DaddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
1157 &no_overflow, rc.scratch);
1158 break;
1159 case kSubBranchOverflow:
1160 __ DsubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
1161 &no_overflow, rc.scratch);
1162 break;
1163 }
1164 __ li(v0, 2);
1165 __ Branch(&end);
1166 __ bind(&overflow);
1167 __ li(v0, 1);
1168 __ Branch(&end);
1169 __ bind(&no_overflow);
1170 __ li(v0, 0);
1171 __ bind(&end);
1172 });
1173
1174 bool res2 = runOverflow<int64_t>(
1175 ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
1176 int64_t valRight) {
1177 Label overflow, no_overflow, end;
1178 __ li(rc.left, valLeft);
1179 switch (branchType) {
1180 case kAddBranchOverflow:
1181 __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight),
1182 &overflow, &no_overflow, rc.scratch);
1183 break;
1184 case kSubBranchOverflow:
1185 __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight),
1186 &overflow, &no_overflow, rc.scratch);
1187 break;
1188 }
1189 __ li(v0, 2);
1190 __ Branch(&end);
1191 __ bind(&overflow);
1192 __ li(v0, 1);
1193 __ Branch(&end);
1194 __ bind(&no_overflow);
1195 __ li(v0, 0);
1196 __ bind(&end);
1197 });
1198
1199 switch (branchType) {
1200 case kAddBranchOverflow:
1201 CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res1);
1202 CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res2);
1203 break;
1204 case kSubBranchOverflow:
1205 CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res1);
1206 CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res2);
1207 break;
1208 default:
1209 UNREACHABLE();
1210 }
1211 }
1212 }
1213 }
1214 }
1215}
1216
1217TEST(BranchOverflowInt64LeftLabel) {
1218 FOR_INT64_INPUTS(i, overflow_int64_test_values) {
1219 FOR_INT64_INPUTS(j, overflow_int64_test_values) {
1220 FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
1221 FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
1222 overflow_register_combination) {
1223 int64_t ii = *i;
1224 int64_t jj = *j;
1225 enum OverflowBranchType branchType = *br;
1226 struct OverflowRegisterCombination rc = *regComb;
1227
1228 // If left and right register are same then left and right
1229 // test values must also be same, otherwise we skip the test
1230 if (rc.left.code() == rc.right.code()) {
1231 if (ii != jj) {
1232 continue;
1233 }
1234 }
1235
1236 bool res1 = runOverflow<int64_t>(
1237 ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
1238 int64_t valRight) {
1239 Label overflow, end;
1240 __ li(rc.left, valLeft);
1241 __ li(rc.right, valRight);
1242 switch (branchType) {
1243 case kAddBranchOverflow:
1244 __ DaddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
1245 rc.scratch);
1246 break;
1247 case kSubBranchOverflow:
1248 __ DsubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
1249 rc.scratch);
1250 break;
1251 }
1252 __ li(v0, 0);
1253 __ Branch(&end);
1254 __ bind(&overflow);
1255 __ li(v0, 1);
1256 __ bind(&end);
1257 });
1258
1259 bool res2 = runOverflow<int64_t>(
1260 ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
1261 int64_t valRight) {
1262 Label overflow, end;
1263 __ li(rc.left, valLeft);
1264 switch (branchType) {
1265 case kAddBranchOverflow:
1266 __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight),
1267 &overflow, NULL, rc.scratch);
1268 break;
1269 case kSubBranchOverflow:
1270 __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight),
1271 &overflow, NULL, rc.scratch);
1272 break;
1273 }
1274 __ li(v0, 0);
1275 __ Branch(&end);
1276 __ bind(&overflow);
1277 __ li(v0, 1);
1278 __ bind(&end);
1279 });
1280
1281 switch (branchType) {
1282 case kAddBranchOverflow:
1283 CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res1);
1284 CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res2);
1285 break;
1286 case kSubBranchOverflow:
1287 CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res1);
1288 CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res2);
1289 break;
1290 default:
1291 UNREACHABLE();
1292 }
1293 }
1294 }
1295 }
1296 }
1297}
1298
1299TEST(BranchOverflowInt64RightLabel) {
1300 FOR_INT64_INPUTS(i, overflow_int64_test_values) {
1301 FOR_INT64_INPUTS(j, overflow_int64_test_values) {
1302 FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
1303 FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
1304 overflow_register_combination) {
1305 int64_t ii = *i;
1306 int64_t jj = *j;
1307 enum OverflowBranchType branchType = *br;
1308 struct OverflowRegisterCombination rc = *regComb;
1309
1310 // If left and right register are same then left and right
1311 // test values must also be same, otherwise we skip the test
1312 if (rc.left.code() == rc.right.code()) {
1313 if (ii != jj) {
1314 continue;
1315 }
1316 }
1317
1318 bool res1 = runOverflow<int64_t>(
1319 ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
1320 int64_t valRight) {
1321 Label no_overflow, end;
1322 __ li(rc.left, valLeft);
1323 __ li(rc.right, valRight);
1324 switch (branchType) {
1325 case kAddBranchOverflow:
1326 __ DaddBranchOvf(rc.dst, rc.left, rc.right, NULL,
1327 &no_overflow, rc.scratch);
1328 break;
1329 case kSubBranchOverflow:
1330 __ DsubBranchOvf(rc.dst, rc.left, rc.right, NULL,
1331 &no_overflow, rc.scratch);
1332 break;
1333 }
1334 __ li(v0, 1);
1335 __ Branch(&end);
1336 __ bind(&no_overflow);
1337 __ li(v0, 0);
1338 __ bind(&end);
1339 });
1340
1341 bool res2 = runOverflow<int64_t>(
1342 ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
1343 int64_t valRight) {
1344 Label no_overflow, end;
1345 __ li(rc.left, valLeft);
1346 switch (branchType) {
1347 case kAddBranchOverflow:
1348 __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
1349 &no_overflow, rc.scratch);
1350 break;
1351 case kSubBranchOverflow:
1352 __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
1353 &no_overflow, rc.scratch);
1354 break;
1355 }
1356 __ li(v0, 1);
1357 __ Branch(&end);
1358 __ bind(&no_overflow);
1359 __ li(v0, 0);
1360 __ bind(&end);
1361 });
1362
1363 switch (branchType) {
1364 case kAddBranchOverflow:
1365 CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res1);
1366 CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res2);
1367 break;
1368 case kSubBranchOverflow:
1369 CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res1);
1370 CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res2);
1371 break;
1372 default:
1373 UNREACHABLE();
1374 }
1375 }
1376 }
1377 }
1378 }
1379}
1380
Ben Murdochda12d292016-06-02 14:46:10 +01001381TEST(min_max_nan) {
1382 CcTest::InitializeVM();
1383 Isolate* isolate = CcTest::i_isolate();
1384 HandleScope scope(isolate);
1385 MacroAssembler assembler(isolate, nullptr, 0,
1386 v8::internal::CodeObjectRequired::kYes);
1387 MacroAssembler* masm = &assembler;
1388
1389 struct TestFloat {
1390 double a;
1391 double b;
1392 double c;
1393 double d;
1394 float e;
1395 float f;
1396 float g;
1397 float h;
1398 };
1399
1400 TestFloat test;
1401 const double dnan = std::numeric_limits<double>::quiet_NaN();
1402 const double dinf = std::numeric_limits<double>::infinity();
1403 const double dminf = -std::numeric_limits<double>::infinity();
1404 const float fnan = std::numeric_limits<float>::quiet_NaN();
1405 const float finf = std::numeric_limits<float>::infinity();
1406 const float fminf = std::numeric_limits<float>::infinity();
1407 const int kTableLength = 13;
1408
1409 double inputsa[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, dinf, dminf,
1410 dinf, dnan, 3.0, dinf, dnan, dnan};
1411 double inputsb[kTableLength] = {3.0, 2.0, 0.0, -0.0, dinf, 42.0, dinf,
1412 dminf, 3.0, dnan, dnan, dinf, dnan};
1413 double outputsdmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0,
1414 42.0, dminf, dminf, dnan, dnan,
1415 dnan, dnan, dnan};
1416 double outputsdmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf,
1417 dinf, dnan, dnan, dnan, dnan, dnan};
1418
1419 float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf,
1420 finf, fnan, 3.0, finf, fnan, fnan};
1421 float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf,
1422 fminf, 3.0, fnan, fnan, finf, fnan};
1423 float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf,
1424 fminf, fnan, fnan, fnan, fnan, fnan};
1425 float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf,
1426 finf, fnan, fnan, fnan, fnan, fnan};
1427
1428 auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
1429 __ bind(nan);
1430 __ LoadRoot(at, Heap::kNanValueRootIndex);
1431 __ ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
1432 __ Branch(back);
1433 };
1434
1435 auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) {
1436 __ bind(nan);
1437 __ Move(dst, fnan);
1438 __ Branch(back);
1439 };
1440
1441 Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan;
1442 Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan;
1443
1444 __ push(s6);
1445 __ InitializeRootRegister();
1446 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
1447 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
1448 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
1449 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
1450 __ MinNaNCheck_d(f10, f4, f8, &handle_mind_nan);
1451 __ bind(&back_mind_nan);
1452 __ MaxNaNCheck_d(f12, f4, f8, &handle_maxd_nan);
1453 __ bind(&back_maxd_nan);
1454 __ MinNaNCheck_s(f14, f2, f6, &handle_mins_nan);
1455 __ bind(&back_mins_nan);
1456 __ MaxNaNCheck_s(f16, f2, f6, &handle_maxs_nan);
1457 __ bind(&back_maxs_nan);
1458 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
1459 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
1460 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
1461 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
1462 __ pop(s6);
1463 __ jr(ra);
1464 __ nop();
1465
1466 handle_dnan(f10, &handle_mind_nan, &back_mind_nan);
1467 handle_dnan(f12, &handle_maxd_nan, &back_maxd_nan);
1468 handle_snan(f14, &handle_mins_nan, &back_mins_nan);
1469 handle_snan(f16, &handle_maxs_nan, &back_maxs_nan);
1470
1471 CodeDesc desc;
1472 masm->GetCode(&desc);
1473 Handle<Code> code = isolate->factory()->NewCode(
1474 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1475 ::F3 f = FUNCTION_CAST<::F3>(code->entry());
1476 for (int i = 0; i < kTableLength; i++) {
1477 test.a = inputsa[i];
1478 test.b = inputsb[i];
1479 test.e = inputse[i];
1480 test.f = inputsf[i];
1481
1482 CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
1483
1484 CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
1485 CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
1486 CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
1487 CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
1488 }
1489}
1490
Ben Murdochc5610432016-08-08 18:44:38 +01001491template <typename IN_TYPE, typename Func>
1492bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
1493 IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
1494 typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
1495
1496 Isolate* isolate = CcTest::i_isolate();
1497 HandleScope scope(isolate);
1498 MacroAssembler assm(isolate, nullptr, 0,
1499 v8::internal::CodeObjectRequired::kYes);
1500 MacroAssembler* masm = &assm;
1501 IN_TYPE res;
1502
1503 GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
1504 __ jr(ra);
1505 __ nop();
1506
1507 CodeDesc desc;
1508 assm.GetCode(&desc);
1509 Handle<Code> code = isolate->factory()->NewCode(
1510 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1511
1512 F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
1513
1514 MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
1515 CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
1516 MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
1517
1518 return res == value;
1519}
1520
1521static const std::vector<uint64_t> unsigned_test_values() {
1522 static const uint64_t kValues[] = {
1523 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
1524 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
1525 };
1526 return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
1527}
1528
1529static const std::vector<int32_t> unsigned_test_offset() {
1530 static const int32_t kValues[] = {// value, offset
1531 -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
1532 return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
1533}
1534
1535static const std::vector<int32_t> unsigned_test_offset_increment() {
1536 static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
1537 return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
1538}
1539
1540TEST(Ulh) {
1541 CcTest::InitializeVM();
1542
1543 static const int kBufferSize = 300 * KB;
1544 char memory_buffer[kBufferSize];
1545 char* buffer_middle = memory_buffer + (kBufferSize / 2);
1546
1547 FOR_UINT64_INPUTS(i, unsigned_test_values) {
1548 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
1549 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
1550 uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
1551 int32_t in_offset = *j1 + *k1;
1552 int32_t out_offset = *j2 + *k2;
1553
1554 CHECK_EQ(true, run_Unaligned<uint16_t>(
1555 buffer_middle, in_offset, out_offset, value,
1556 [](MacroAssembler* masm, int32_t in_offset,
1557 int32_t out_offset) {
1558 __ Ulh(v0, MemOperand(a0, in_offset));
1559 __ Ush(v0, MemOperand(a0, out_offset), v0);
1560 }));
1561 CHECK_EQ(true, run_Unaligned<uint16_t>(
1562 buffer_middle, in_offset, out_offset, value,
1563 [](MacroAssembler* masm, int32_t in_offset,
1564 int32_t out_offset) {
1565 __ mov(t0, a0);
1566 __ Ulh(a0, MemOperand(a0, in_offset));
1567 __ Ush(a0, MemOperand(t0, out_offset), v0);
1568 }));
1569 CHECK_EQ(true, run_Unaligned<uint16_t>(
1570 buffer_middle, in_offset, out_offset, value,
1571 [](MacroAssembler* masm, int32_t in_offset,
1572 int32_t out_offset) {
1573 __ mov(t0, a0);
1574 __ Ulhu(a0, MemOperand(a0, in_offset));
1575 __ Ush(a0, MemOperand(t0, out_offset), t1);
1576 }));
1577 CHECK_EQ(true, run_Unaligned<uint16_t>(
1578 buffer_middle, in_offset, out_offset, value,
1579 [](MacroAssembler* masm, int32_t in_offset,
1580 int32_t out_offset) {
1581 __ Ulhu(v0, MemOperand(a0, in_offset));
1582 __ Ush(v0, MemOperand(a0, out_offset), t1);
1583 }));
1584 }
1585 }
1586 }
1587}
1588
1589TEST(Ulh_bitextension) {
1590 CcTest::InitializeVM();
1591
1592 static const int kBufferSize = 300 * KB;
1593 char memory_buffer[kBufferSize];
1594 char* buffer_middle = memory_buffer + (kBufferSize / 2);
1595
1596 FOR_UINT64_INPUTS(i, unsigned_test_values) {
1597 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
1598 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
1599 uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
1600 int32_t in_offset = *j1 + *k1;
1601 int32_t out_offset = *j2 + *k2;
1602
1603 CHECK_EQ(true, run_Unaligned<uint16_t>(
1604 buffer_middle, in_offset, out_offset, value,
1605 [](MacroAssembler* masm, int32_t in_offset,
1606 int32_t out_offset) {
1607 Label success, fail, end, different;
1608 __ Ulh(t0, MemOperand(a0, in_offset));
1609 __ Ulhu(t1, MemOperand(a0, in_offset));
1610 __ Branch(&different, ne, t0, Operand(t1));
1611
1612 // If signed and unsigned values are same, check
1613 // the upper bits to see if they are zero
1614 __ sra(t0, t0, 15);
1615 __ Branch(&success, eq, t0, Operand(zero_reg));
1616 __ Branch(&fail);
1617
1618 // If signed and unsigned values are different,
1619 // check that the upper bits are complementary
1620 __ bind(&different);
1621 __ sra(t1, t1, 15);
1622 __ Branch(&fail, ne, t1, Operand(1));
1623 __ sra(t0, t0, 15);
1624 __ addiu(t0, t0, 1);
1625 __ Branch(&fail, ne, t0, Operand(zero_reg));
1626 // Fall through to success
1627
1628 __ bind(&success);
1629 __ Ulh(t0, MemOperand(a0, in_offset));
1630 __ Ush(t0, MemOperand(a0, out_offset), v0);
1631 __ Branch(&end);
1632 __ bind(&fail);
1633 __ Ush(zero_reg, MemOperand(a0, out_offset), v0);
1634 __ bind(&end);
1635 }));
1636 }
1637 }
1638 }
1639}
1640
1641TEST(Ulw) {
1642 CcTest::InitializeVM();
1643
1644 static const int kBufferSize = 300 * KB;
1645 char memory_buffer[kBufferSize];
1646 char* buffer_middle = memory_buffer + (kBufferSize / 2);
1647
1648 FOR_UINT64_INPUTS(i, unsigned_test_values) {
1649 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
1650 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
1651 uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
1652 int32_t in_offset = *j1 + *k1;
1653 int32_t out_offset = *j2 + *k2;
1654
1655 CHECK_EQ(true, run_Unaligned<uint32_t>(
1656 buffer_middle, in_offset, out_offset, value,
1657 [](MacroAssembler* masm, int32_t in_offset,
1658 int32_t out_offset) {
1659 __ Ulw(v0, MemOperand(a0, in_offset));
1660 __ Usw(v0, MemOperand(a0, out_offset));
1661 }));
1662 CHECK_EQ(true,
1663 run_Unaligned<uint32_t>(
1664 buffer_middle, in_offset, out_offset, (uint32_t)value,
1665 [](MacroAssembler* masm, int32_t in_offset,
1666 int32_t out_offset) {
1667 __ mov(t0, a0);
1668 __ Ulw(a0, MemOperand(a0, in_offset));
1669 __ Usw(a0, MemOperand(t0, out_offset));
1670 }));
1671 CHECK_EQ(true, run_Unaligned<uint32_t>(
1672 buffer_middle, in_offset, out_offset, value,
1673 [](MacroAssembler* masm, int32_t in_offset,
1674 int32_t out_offset) {
1675 __ Ulwu(v0, MemOperand(a0, in_offset));
1676 __ Usw(v0, MemOperand(a0, out_offset));
1677 }));
1678 CHECK_EQ(true,
1679 run_Unaligned<uint32_t>(
1680 buffer_middle, in_offset, out_offset, (uint32_t)value,
1681 [](MacroAssembler* masm, int32_t in_offset,
1682 int32_t out_offset) {
1683 __ mov(t0, a0);
1684 __ Ulwu(a0, MemOperand(a0, in_offset));
1685 __ Usw(a0, MemOperand(t0, out_offset));
1686 }));
1687 }
1688 }
1689 }
1690}
1691
1692TEST(Ulw_extension) {
1693 CcTest::InitializeVM();
1694
1695 static const int kBufferSize = 300 * KB;
1696 char memory_buffer[kBufferSize];
1697 char* buffer_middle = memory_buffer + (kBufferSize / 2);
1698
1699 FOR_UINT64_INPUTS(i, unsigned_test_values) {
1700 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
1701 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
1702 uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
1703 int32_t in_offset = *j1 + *k1;
1704 int32_t out_offset = *j2 + *k2;
1705
1706 CHECK_EQ(true, run_Unaligned<uint32_t>(
1707 buffer_middle, in_offset, out_offset, value,
1708 [](MacroAssembler* masm, int32_t in_offset,
1709 int32_t out_offset) {
1710 Label success, fail, end, different;
1711 __ Ulw(t0, MemOperand(a0, in_offset));
1712 __ Ulwu(t1, MemOperand(a0, in_offset));
1713 __ Branch(&different, ne, t0, Operand(t1));
1714
1715 // If signed and unsigned values are same, check
1716 // the upper bits to see if they are zero
1717 __ dsra(t0, t0, 31);
1718 __ Branch(&success, eq, t0, Operand(zero_reg));
1719 __ Branch(&fail);
1720
1721 // If signed and unsigned values are different,
1722 // check that the upper bits are complementary
1723 __ bind(&different);
1724 __ dsra(t1, t1, 31);
1725 __ Branch(&fail, ne, t1, Operand(1));
1726 __ dsra(t0, t0, 31);
1727 __ daddiu(t0, t0, 1);
1728 __ Branch(&fail, ne, t0, Operand(zero_reg));
1729 // Fall through to success
1730
1731 __ bind(&success);
1732 __ Ulw(t0, MemOperand(a0, in_offset));
1733 __ Usw(t0, MemOperand(a0, out_offset));
1734 __ Branch(&end);
1735 __ bind(&fail);
1736 __ Usw(zero_reg, MemOperand(a0, out_offset));
1737 __ bind(&end);
1738 }));
1739 }
1740 }
1741 }
1742}
1743
1744TEST(Uld) {
1745 CcTest::InitializeVM();
1746
1747 static const int kBufferSize = 300 * KB;
1748 char memory_buffer[kBufferSize];
1749 char* buffer_middle = memory_buffer + (kBufferSize / 2);
1750
1751 FOR_UINT64_INPUTS(i, unsigned_test_values) {
1752 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
1753 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
1754 uint64_t value = *i;
1755 int32_t in_offset = *j1 + *k1;
1756 int32_t out_offset = *j2 + *k2;
1757
1758 CHECK_EQ(true, run_Unaligned<uint64_t>(
1759 buffer_middle, in_offset, out_offset, value,
1760 [](MacroAssembler* masm, int32_t in_offset,
1761 int32_t out_offset) {
1762 __ Uld(v0, MemOperand(a0, in_offset));
1763 __ Usd(v0, MemOperand(a0, out_offset));
1764 }));
1765 CHECK_EQ(true,
1766 run_Unaligned<uint64_t>(
1767 buffer_middle, in_offset, out_offset, (uint32_t)value,
1768 [](MacroAssembler* masm, int32_t in_offset,
1769 int32_t out_offset) {
1770 __ mov(t0, a0);
1771 __ Uld(a0, MemOperand(a0, in_offset));
1772 __ Usd(a0, MemOperand(t0, out_offset));
1773 }));
1774 }
1775 }
1776 }
1777}
1778
1779TEST(Ulwc1) {
1780 CcTest::InitializeVM();
1781
1782 static const int kBufferSize = 300 * KB;
1783 char memory_buffer[kBufferSize];
1784 char* buffer_middle = memory_buffer + (kBufferSize / 2);
1785
1786 FOR_UINT64_INPUTS(i, unsigned_test_values) {
1787 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
1788 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
1789 float value = static_cast<float>(*i & 0xFFFFFFFF);
1790 int32_t in_offset = *j1 + *k1;
1791 int32_t out_offset = *j2 + *k2;
1792
1793 CHECK_EQ(true, run_Unaligned<float>(
1794 buffer_middle, in_offset, out_offset, value,
1795 [](MacroAssembler* masm, int32_t in_offset,
1796 int32_t out_offset) {
1797 __ Ulwc1(f0, MemOperand(a0, in_offset), t0);
1798 __ Uswc1(f0, MemOperand(a0, out_offset), t0);
1799 }));
1800 }
1801 }
1802 }
1803}
1804
1805TEST(Uldc1) {
1806 CcTest::InitializeVM();
1807
1808 static const int kBufferSize = 300 * KB;
1809 char memory_buffer[kBufferSize];
1810 char* buffer_middle = memory_buffer + (kBufferSize / 2);
1811
1812 FOR_UINT64_INPUTS(i, unsigned_test_values) {
1813 FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
1814 FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
1815 double value = static_cast<double>(*i);
1816 int32_t in_offset = *j1 + *k1;
1817 int32_t out_offset = *j2 + *k2;
1818
1819 CHECK_EQ(true, run_Unaligned<double>(
1820 buffer_middle, in_offset, out_offset, value,
1821 [](MacroAssembler* masm, int32_t in_offset,
1822 int32_t out_offset) {
1823 __ Uldc1(f0, MemOperand(a0, in_offset), t0);
1824 __ Usdc1(f0, MemOperand(a0, out_offset), t0);
1825 }));
1826 }
1827 }
1828 }
1829}
1830
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001831#undef __