blob: 773862710d5446d64fe43e75ef79383d919b277a [file] [log] [blame]
Dave Allison65fcc2c2014-04-28 13:45:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010017#include <dirent.h>
Andreas Gampefd114702015-05-13 17:00:41 -070018#include <errno.h>
Dave Allison65fcc2c2014-04-28 13:45:27 -070019#include <fstream>
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010020#include <map>
Andreas Gampefd114702015-05-13 17:00:41 -070021#include <string.h>
22#include <sys/types.h>
Dave Allison65fcc2c2014-04-28 13:45:27 -070023
24#include "gtest/gtest.h"
25#include "utils/arm/assembler_thumb2.h"
26#include "base/hex_dump.h"
27#include "common_runtime_test.h"
28
29namespace art {
30namespace arm {
31
32// Include results file (generated manually)
33#include "assembler_thumb_test_expected.cc.inc"
34
Dave Allisond20ddb22014-06-05 14:16:30 -070035#ifndef HAVE_ANDROID_OS
Dave Allison45fdb932014-06-25 12:37:10 -070036// This controls whether the results are printed to the
37// screen or compared against the expected output.
38// To generate new expected output, set this to true and
39// copy the output into the .cc.inc file in the form
40// of the other results.
41//
42// When this is false, the results are not printed to the
43// output, but are compared against the expected results
44// in the .cc.inc file.
Dave Allison65fcc2c2014-04-28 13:45:27 -070045static constexpr bool kPrintResults = false;
Dave Allisond20ddb22014-06-05 14:16:30 -070046#endif
Dave Allison65fcc2c2014-04-28 13:45:27 -070047
48void SetAndroidData() {
49 const char* data = getenv("ANDROID_DATA");
50 if (data == nullptr) {
51 setenv("ANDROID_DATA", "/tmp", 1);
52 }
53}
54
Dave Allison45fdb932014-06-25 12:37:10 -070055int CompareIgnoringSpace(const char* s1, const char* s2) {
56 while (*s1 != '\0') {
57 while (isspace(*s1)) ++s1;
58 while (isspace(*s2)) ++s2;
59 if (*s1 == '\0' || *s1 != *s2) {
60 break;
61 }
62 ++s1;
63 ++s2;
64 }
65 return *s1 - *s2;
66}
67
Dave Allison65fcc2c2014-04-28 13:45:27 -070068void dump(std::vector<uint8_t>& code, const char* testname) {
69 // This will only work on the host. There is no as, objcopy or objdump on the
70 // device.
71#ifndef HAVE_ANDROID_OS
72 static bool results_ok = false;
73 static std::string toolsdir;
74
75 if (!results_ok) {
76 setup_results();
David Srbecky3e52aa42015-04-12 07:45:18 +010077 toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
Dave Allison65fcc2c2014-04-28 13:45:27 -070078 SetAndroidData();
79 results_ok = true;
80 }
81
82 ScratchFile file;
83
84 const char* filename = file.GetFilename().c_str();
85
86 std::ofstream out(filename);
87 if (out) {
88 out << ".section \".text\"\n";
89 out << ".syntax unified\n";
90 out << ".arch armv7-a\n";
91 out << ".thumb\n";
92 out << ".thumb_func\n";
93 out << ".type " << testname << ", #function\n";
94 out << ".global " << testname << "\n";
95 out << testname << ":\n";
96 out << ".fnstart\n";
97
98 for (uint32_t i = 0 ; i < code.size(); ++i) {
99 out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
100 }
101 out << ".fnend\n";
102 out << ".size " << testname << ", .-" << testname << "\n";
103 }
104 out.close();
105
Andreas Gampe4470c1d2014-07-21 18:32:59 -0700106 char cmd[1024];
Dave Allison65fcc2c2014-04-28 13:45:27 -0700107
108 // Assemble the .S
David Srbecky3e52aa42015-04-12 07:45:18 +0100109 snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
Andreas Gampefd114702015-05-13 17:00:41 -0700110 int cmd_result = system(cmd);
111 ASSERT_EQ(cmd_result, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700112
113 // Remove the $d symbols to prevent the disassembler dumping the instructions
114 // as .word
David Srbecky3e52aa42015-04-12 07:45:18 +0100115 snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
Andreas Gampefd114702015-05-13 17:00:41 -0700116 int cmd_result2 = system(cmd);
117 ASSERT_EQ(cmd_result2, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700118
119 // Disassemble.
120
David Srbecky3e52aa42015-04-12 07:45:18 +0100121 snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
122 toolsdir.c_str(), filename);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700123 if (kPrintResults) {
124 // Print the results only, don't check. This is used to generate new output for inserting
125 // into the .inc file.
Andreas Gampefd114702015-05-13 17:00:41 -0700126 int cmd_result3 = system(cmd);
127 ASSERT_EQ(cmd_result3, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700128 } else {
129 // Check the results match the appropriate results in the .inc file.
130 FILE *fp = popen(cmd, "r");
131 ASSERT_TRUE(fp != nullptr);
132
133 std::map<std::string, const char**>::iterator results = test_results.find(testname);
134 ASSERT_NE(results, test_results.end());
135
136 uint32_t lineindex = 0;
137
138 while (!feof(fp)) {
139 char testline[256];
140 char *s = fgets(testline, sizeof(testline), fp);
141 if (s == nullptr) {
142 break;
143 }
Dave Allison45fdb932014-06-25 12:37:10 -0700144 if (CompareIgnoringSpace(results->second[lineindex], testline) != 0) {
145 LOG(FATAL) << "Output is not as expected at line: " << lineindex
146 << results->second[lineindex] << "/" << testline;
147 }
Dave Allison65fcc2c2014-04-28 13:45:27 -0700148 ++lineindex;
149 }
150 // Check that we are at the end.
151 ASSERT_TRUE(results->second[lineindex] == nullptr);
152 fclose(fp);
153 }
154
155 char buf[FILENAME_MAX];
156 snprintf(buf, sizeof(buf), "%s.o", filename);
157 unlink(buf);
158
159 snprintf(buf, sizeof(buf), "%s.oo", filename);
160 unlink(buf);
161#endif
162}
163
164#define __ assembler->
165
166TEST(Thumb2AssemblerTest, SimpleMov) {
167 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
168
169 __ mov(R0, ShifterOperand(R1));
170 __ mov(R8, ShifterOperand(R9));
171
172 __ mov(R0, ShifterOperand(1));
173 __ mov(R8, ShifterOperand(9));
174
175 size_t cs = __ CodeSize();
176 std::vector<uint8_t> managed_code(cs);
177 MemoryRegion code(&managed_code[0], managed_code.size());
178 __ FinalizeInstructions(code);
179 dump(managed_code, "SimpleMov");
180 delete assembler;
181}
182
183TEST(Thumb2AssemblerTest, SimpleMov32) {
184 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
185 assembler->Force32Bit();
186
187 __ mov(R0, ShifterOperand(R1));
188 __ mov(R8, ShifterOperand(R9));
189
190 size_t cs = __ CodeSize();
191 std::vector<uint8_t> managed_code(cs);
192 MemoryRegion code(&managed_code[0], managed_code.size());
193 __ FinalizeInstructions(code);
194 dump(managed_code, "SimpleMov32");
195 delete assembler;
196}
197
198TEST(Thumb2AssemblerTest, SimpleMovAdd) {
199 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
200
201 __ mov(R0, ShifterOperand(R1));
202 __ add(R0, R1, ShifterOperand(R2));
203 __ add(R0, R1, ShifterOperand());
204
205 size_t cs = __ CodeSize();
206 std::vector<uint8_t> managed_code(cs);
207 MemoryRegion code(&managed_code[0], managed_code.size());
208 __ FinalizeInstructions(code);
209 dump(managed_code, "SimpleMovAdd");
210 delete assembler;
211}
212
213TEST(Thumb2AssemblerTest, DataProcessingRegister) {
214 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
215
216 __ mov(R0, ShifterOperand(R1));
217 __ mvn(R0, ShifterOperand(R1));
218
219 // 32 bit variants.
220 __ add(R0, R1, ShifterOperand(R2));
221 __ sub(R0, R1, ShifterOperand(R2));
222 __ and_(R0, R1, ShifterOperand(R2));
223 __ orr(R0, R1, ShifterOperand(R2));
224 __ eor(R0, R1, ShifterOperand(R2));
225 __ bic(R0, R1, ShifterOperand(R2));
226 __ adc(R0, R1, ShifterOperand(R2));
227 __ sbc(R0, R1, ShifterOperand(R2));
228 __ rsb(R0, R1, ShifterOperand(R2));
229
230 // 16 bit variants.
231 __ add(R0, R1, ShifterOperand());
232 __ sub(R0, R1, ShifterOperand());
Andreas Gampe7b7e5242015-02-02 19:17:11 -0800233 __ and_(R0, R0, ShifterOperand(R1));
234 __ orr(R0, R0, ShifterOperand(R1));
235 __ eor(R0, R0, ShifterOperand(R1));
236 __ bic(R0, R0, ShifterOperand(R1));
237 __ adc(R0, R0, ShifterOperand(R1));
238 __ sbc(R0, R0, ShifterOperand(R1));
239 __ rsb(R0, R0, ShifterOperand(R1));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700240
241 __ tst(R0, ShifterOperand(R1));
242 __ teq(R0, ShifterOperand(R1));
243 __ cmp(R0, ShifterOperand(R1));
244 __ cmn(R0, ShifterOperand(R1));
245
246 __ movs(R0, ShifterOperand(R1));
247 __ mvns(R0, ShifterOperand(R1));
248
Nicolas Geoffray3c7bb982014-07-23 16:04:16 +0100249 // 32 bit variants.
250 __ add(R12, R1, ShifterOperand(R0));
251
Dave Allison65fcc2c2014-04-28 13:45:27 -0700252 size_t cs = __ CodeSize();
253 std::vector<uint8_t> managed_code(cs);
254 MemoryRegion code(&managed_code[0], managed_code.size());
255 __ FinalizeInstructions(code);
256 dump(managed_code, "DataProcessingRegister");
257 delete assembler;
258}
259
260TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
261 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
262
263 __ mov(R0, ShifterOperand(0x55));
264 __ mvn(R0, ShifterOperand(0x55));
265 __ add(R0, R1, ShifterOperand(0x55));
266 __ sub(R0, R1, ShifterOperand(0x55));
267 __ and_(R0, R1, ShifterOperand(0x55));
268 __ orr(R0, R1, ShifterOperand(0x55));
269 __ eor(R0, R1, ShifterOperand(0x55));
270 __ bic(R0, R1, ShifterOperand(0x55));
271 __ adc(R0, R1, ShifterOperand(0x55));
272 __ sbc(R0, R1, ShifterOperand(0x55));
273 __ rsb(R0, R1, ShifterOperand(0x55));
274
275 __ tst(R0, ShifterOperand(0x55));
276 __ teq(R0, ShifterOperand(0x55));
277 __ cmp(R0, ShifterOperand(0x55));
278 __ cmn(R0, ShifterOperand(0x55));
279
280 __ add(R0, R1, ShifterOperand(5));
281 __ sub(R0, R1, ShifterOperand(5));
282
283 __ movs(R0, ShifterOperand(0x55));
284 __ mvns(R0, ShifterOperand(0x55));
285
286 size_t cs = __ CodeSize();
287 std::vector<uint8_t> managed_code(cs);
288 MemoryRegion code(&managed_code[0], managed_code.size());
289 __ FinalizeInstructions(code);
290 dump(managed_code, "DataProcessingImmediate");
291 delete assembler;
292}
293
294TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
295 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
296
297 __ mov(R0, ShifterOperand(0x550055));
298 __ mvn(R0, ShifterOperand(0x550055));
299 __ add(R0, R1, ShifterOperand(0x550055));
300 __ sub(R0, R1, ShifterOperand(0x550055));
301 __ and_(R0, R1, ShifterOperand(0x550055));
302 __ orr(R0, R1, ShifterOperand(0x550055));
303 __ eor(R0, R1, ShifterOperand(0x550055));
304 __ bic(R0, R1, ShifterOperand(0x550055));
305 __ adc(R0, R1, ShifterOperand(0x550055));
306 __ sbc(R0, R1, ShifterOperand(0x550055));
307 __ rsb(R0, R1, ShifterOperand(0x550055));
308
309 __ tst(R0, ShifterOperand(0x550055));
310 __ teq(R0, ShifterOperand(0x550055));
311 __ cmp(R0, ShifterOperand(0x550055));
312 __ cmn(R0, ShifterOperand(0x550055));
313
314 size_t cs = __ CodeSize();
315 std::vector<uint8_t> managed_code(cs);
316 MemoryRegion code(&managed_code[0], managed_code.size());
317 __ FinalizeInstructions(code);
318 dump(managed_code, "DataProcessingModifiedImmediate");
319 delete assembler;
320}
321
322
323TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
324 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
325
326 __ mov(R0, ShifterOperand(0x550055));
327 __ mov(R0, ShifterOperand(0x55005500));
328 __ mov(R0, ShifterOperand(0x55555555));
329 __ mov(R0, ShifterOperand(0xd5000000)); // rotated to first position
330 __ mov(R0, ShifterOperand(0x6a000000)); // rotated to second position
331 __ mov(R0, ShifterOperand(0x350)); // rotated to 2nd last position
332 __ mov(R0, ShifterOperand(0x1a8)); // rotated to last position
333
334 size_t cs = __ CodeSize();
335 std::vector<uint8_t> managed_code(cs);
336 MemoryRegion code(&managed_code[0], managed_code.size());
337 __ FinalizeInstructions(code);
338 dump(managed_code, "DataProcessingModifiedImmediates");
339 delete assembler;
340}
341
342TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
343 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
344
345 __ mov(R3, ShifterOperand(R4, LSL, 4));
346 __ mov(R3, ShifterOperand(R4, LSR, 5));
347 __ mov(R3, ShifterOperand(R4, ASR, 6));
348 __ mov(R3, ShifterOperand(R4, ROR, 7));
349 __ mov(R3, ShifterOperand(R4, ROR));
350
351 // 32 bit variants.
352 __ mov(R8, ShifterOperand(R4, LSL, 4));
353 __ mov(R8, ShifterOperand(R4, LSR, 5));
354 __ mov(R8, ShifterOperand(R4, ASR, 6));
355 __ mov(R8, ShifterOperand(R4, ROR, 7));
356 __ mov(R8, ShifterOperand(R4, RRX));
357
358 size_t cs = __ CodeSize();
359 std::vector<uint8_t> managed_code(cs);
360 MemoryRegion code(&managed_code[0], managed_code.size());
361 __ FinalizeInstructions(code);
362 dump(managed_code, "DataProcessingShiftedRegister");
363 delete assembler;
364}
365
366
367TEST(Thumb2AssemblerTest, BasicLoad) {
368 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
369
370 __ ldr(R3, Address(R4, 24));
371 __ ldrb(R3, Address(R4, 24));
372 __ ldrh(R3, Address(R4, 24));
373 __ ldrsb(R3, Address(R4, 24));
374 __ ldrsh(R3, Address(R4, 24));
375
376 __ ldr(R3, Address(SP, 24));
377
378 // 32 bit variants
379 __ ldr(R8, Address(R4, 24));
380 __ ldrb(R8, Address(R4, 24));
381 __ ldrh(R8, Address(R4, 24));
382 __ ldrsb(R8, Address(R4, 24));
383 __ ldrsh(R8, Address(R4, 24));
384
385 size_t cs = __ CodeSize();
386 std::vector<uint8_t> managed_code(cs);
387 MemoryRegion code(&managed_code[0], managed_code.size());
388 __ FinalizeInstructions(code);
389 dump(managed_code, "BasicLoad");
390 delete assembler;
391}
392
393
394TEST(Thumb2AssemblerTest, BasicStore) {
395 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
396
397 __ str(R3, Address(R4, 24));
398 __ strb(R3, Address(R4, 24));
399 __ strh(R3, Address(R4, 24));
400
401 __ str(R3, Address(SP, 24));
402
403 // 32 bit variants.
404 __ str(R8, Address(R4, 24));
405 __ strb(R8, Address(R4, 24));
406 __ strh(R8, Address(R4, 24));
407
408 size_t cs = __ CodeSize();
409 std::vector<uint8_t> managed_code(cs);
410 MemoryRegion code(&managed_code[0], managed_code.size());
411 __ FinalizeInstructions(code);
412 dump(managed_code, "BasicStore");
413 delete assembler;
414}
415
416TEST(Thumb2AssemblerTest, ComplexLoad) {
417 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
418
419 __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
420 __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
421 __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
422 __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
423 __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
424 __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
425
426 __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
427 __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
428 __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
429 __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
430 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
431 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
432
433 __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
434 __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
435 __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
436 __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
437 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
438 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
439
440 __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
441 __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
442 __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
443 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
444 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
445 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
446
447 __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
448 __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
449 __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
450 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
451 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
452 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
453
454 size_t cs = __ CodeSize();
455 std::vector<uint8_t> managed_code(cs);
456 MemoryRegion code(&managed_code[0], managed_code.size());
457 __ FinalizeInstructions(code);
458 dump(managed_code, "ComplexLoad");
459 delete assembler;
460}
461
462
463TEST(Thumb2AssemblerTest, ComplexStore) {
464 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
465
466 __ str(R3, Address(R4, 24, Address::Mode::Offset));
467 __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
468 __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
469 __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
470 __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
471 __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
472
473 __ strb(R3, Address(R4, 24, Address::Mode::Offset));
474 __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
475 __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
476 __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
477 __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
478 __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
479
480 __ strh(R3, Address(R4, 24, Address::Mode::Offset));
481 __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
482 __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
483 __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
484 __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
485 __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
486
487 size_t cs = __ CodeSize();
488 std::vector<uint8_t> managed_code(cs);
489 MemoryRegion code(&managed_code[0], managed_code.size());
490 __ FinalizeInstructions(code);
491 dump(managed_code, "ComplexStore");
492 delete assembler;
493}
494
495TEST(Thumb2AssemblerTest, NegativeLoadStore) {
496 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
497
498 __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
499 __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
500 __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
501 __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
502 __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
503 __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
504
505 __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
506 __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
507 __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
508 __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
509 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
510 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
511
512 __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
513 __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
514 __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
515 __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
516 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
517 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
518
519 __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
520 __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
521 __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
522 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
523 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
524 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
525
526 __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
527 __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
528 __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
529 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
530 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
531 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
532
533 __ str(R3, Address(R4, -24, Address::Mode::Offset));
534 __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
535 __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
536 __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
537 __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
538 __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
539
540 __ strb(R3, Address(R4, -24, Address::Mode::Offset));
541 __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
542 __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
543 __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
544 __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
545 __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
546
547 __ strh(R3, Address(R4, -24, Address::Mode::Offset));
548 __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
549 __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
550 __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
551 __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
552 __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
553
554 size_t cs = __ CodeSize();
555 std::vector<uint8_t> managed_code(cs);
556 MemoryRegion code(&managed_code[0], managed_code.size());
557 __ FinalizeInstructions(code);
558 dump(managed_code, "NegativeLoadStore");
559 delete assembler;
560}
561
562TEST(Thumb2AssemblerTest, SimpleLoadStoreDual) {
563 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
564
565 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
566 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
567
568 size_t cs = __ CodeSize();
569 std::vector<uint8_t> managed_code(cs);
570 MemoryRegion code(&managed_code[0], managed_code.size());
571 __ FinalizeInstructions(code);
572 dump(managed_code, "SimpleLoadStoreDual");
573 delete assembler;
574}
575
576TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) {
577 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
578
579 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
580 __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
581 __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
582 __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
583 __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
584 __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
585
586 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
587 __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
588 __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
589 __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
590 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
591 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
592
593 size_t cs = __ CodeSize();
594 std::vector<uint8_t> managed_code(cs);
595 MemoryRegion code(&managed_code[0], managed_code.size());
596 __ FinalizeInstructions(code);
597 dump(managed_code, "ComplexLoadStoreDual");
598 delete assembler;
599}
600
601TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) {
602 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
603
604 __ strd(R2, Address(R0, -24, Address::Mode::Offset));
605 __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
606 __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
607 __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
608 __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
609 __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
610
611 __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
612 __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
613 __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
614 __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
615 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
616 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
617
618 size_t cs = __ CodeSize();
619 std::vector<uint8_t> managed_code(cs);
620 MemoryRegion code(&managed_code[0], managed_code.size());
621 __ FinalizeInstructions(code);
622 dump(managed_code, "NegativeLoadStoreDual");
623 delete assembler;
624}
625
626TEST(Thumb2AssemblerTest, SimpleBranch) {
627 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
628
629 Label l1;
630 __ mov(R0, ShifterOperand(2));
631 __ Bind(&l1);
632 __ mov(R1, ShifterOperand(1));
633 __ b(&l1);
634 Label l2;
635 __ b(&l2);
636 __ mov(R1, ShifterOperand(2));
637 __ Bind(&l2);
638 __ mov(R0, ShifterOperand(3));
639
640 Label l3;
641 __ mov(R0, ShifterOperand(2));
642 __ Bind(&l3);
643 __ mov(R1, ShifterOperand(1));
644 __ b(&l3, EQ);
645
646 Label l4;
647 __ b(&l4, EQ);
648 __ mov(R1, ShifterOperand(2));
649 __ Bind(&l4);
650 __ mov(R0, ShifterOperand(3));
651
652 // 2 linked labels.
653 Label l5;
654 __ b(&l5);
655 __ mov(R1, ShifterOperand(4));
656 __ b(&l5);
657 __ mov(R1, ShifterOperand(5));
658 __ Bind(&l5);
659 __ mov(R0, ShifterOperand(6));
660
661 size_t cs = __ CodeSize();
662 std::vector<uint8_t> managed_code(cs);
663 MemoryRegion code(&managed_code[0], managed_code.size());
664 __ FinalizeInstructions(code);
665 dump(managed_code, "SimpleBranch");
666 delete assembler;
667}
668
669TEST(Thumb2AssemblerTest, LongBranch) {
670 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
671 assembler->Force32Bit();
672 // 32 bit branches.
673 Label l1;
674 __ mov(R0, ShifterOperand(2));
675 __ Bind(&l1);
676 __ mov(R1, ShifterOperand(1));
677 __ b(&l1);
678
679 Label l2;
680 __ b(&l2);
681 __ mov(R1, ShifterOperand(2));
682 __ Bind(&l2);
683 __ mov(R0, ShifterOperand(3));
684
685 Label l3;
686 __ mov(R0, ShifterOperand(2));
687 __ Bind(&l3);
688 __ mov(R1, ShifterOperand(1));
689 __ b(&l3, EQ);
690
691 Label l4;
692 __ b(&l4, EQ);
693 __ mov(R1, ShifterOperand(2));
694 __ Bind(&l4);
695 __ mov(R0, ShifterOperand(3));
696
697 // 2 linked labels.
698 Label l5;
699 __ b(&l5);
700 __ mov(R1, ShifterOperand(4));
701 __ b(&l5);
702 __ mov(R1, ShifterOperand(5));
703 __ Bind(&l5);
704 __ mov(R0, ShifterOperand(6));
705
706 size_t cs = __ CodeSize();
707 std::vector<uint8_t> managed_code(cs);
708 MemoryRegion code(&managed_code[0], managed_code.size());
709 __ FinalizeInstructions(code);
710 dump(managed_code, "LongBranch");
711 delete assembler;
712}
713
714TEST(Thumb2AssemblerTest, LoadMultiple) {
715 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
716
717 // 16 bit.
718 __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
719
720 // 32 bit.
721 __ ldm(DB_W, R4, (1 << LR | 1 << R11));
722 __ ldm(DB, R4, (1 << LR | 1 << R11));
723
724 // Single reg is converted to ldr
725 __ ldm(DB_W, R4, (1 << R5));
726
727 size_t cs = __ CodeSize();
728 std::vector<uint8_t> managed_code(cs);
729 MemoryRegion code(&managed_code[0], managed_code.size());
730 __ FinalizeInstructions(code);
731 dump(managed_code, "LoadMultiple");
732 delete assembler;
733}
734
735TEST(Thumb2AssemblerTest, StoreMultiple) {
736 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
737
738 // 16 bit.
739 __ stm(IA_W, R4, (1 << R0 | 1 << R3));
740
741 // 32 bit.
742 __ stm(IA_W, R4, (1 << LR | 1 << R11));
743 __ stm(IA, R4, (1 << LR | 1 << R11));
744
745 // Single reg is converted to str
746 __ stm(IA_W, R4, (1 << R5));
747 __ stm(IA, R4, (1 << R5));
748
749 size_t cs = __ CodeSize();
750 std::vector<uint8_t> managed_code(cs);
751 MemoryRegion code(&managed_code[0], managed_code.size());
752 __ FinalizeInstructions(code);
753 dump(managed_code, "StoreMultiple");
754 delete assembler;
755}
756
757TEST(Thumb2AssemblerTest, MovWMovT) {
758 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
759
760 __ movw(R4, 0); // 16 bit.
761 __ movw(R4, 0x34); // 16 bit.
762 __ movw(R9, 0x34); // 32 bit due to high register.
763 __ movw(R3, 0x1234); // 32 bit due to large value.
764 __ movw(R9, 0xffff); // 32 bit due to large value and high register.
765
766 // Always 32 bit.
767 __ movt(R0, 0);
768 __ movt(R0, 0x1234);
769 __ movt(R1, 0xffff);
770
771 size_t cs = __ CodeSize();
772 std::vector<uint8_t> managed_code(cs);
773 MemoryRegion code(&managed_code[0], managed_code.size());
774 __ FinalizeInstructions(code);
775 dump(managed_code, "MovWMovT");
776 delete assembler;
777}
778
779TEST(Thumb2AssemblerTest, SpecialAddSub) {
780 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
781
782 __ add(R2, SP, ShifterOperand(0x50)); // 16 bit.
783 __ add(SP, SP, ShifterOperand(0x50)); // 16 bit.
784 __ add(R8, SP, ShifterOperand(0x50)); // 32 bit.
785
786 __ add(R2, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
787 __ add(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
788
789 __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit
790 __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit
791 __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit.
792
793 __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size
794
795 size_t cs = __ CodeSize();
796 std::vector<uint8_t> managed_code(cs);
797 MemoryRegion code(&managed_code[0], managed_code.size());
798 __ FinalizeInstructions(code);
799 dump(managed_code, "SpecialAddSub");
800 delete assembler;
801}
802
803TEST(Thumb2AssemblerTest, StoreToOffset) {
804 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
805
806 __ StoreToOffset(kStoreWord, R2, R4, 12); // Simple
807 __ StoreToOffset(kStoreWord, R2, R4, 0x2000); // Offset too big.
Nicolas Geoffray3c7bb982014-07-23 16:04:16 +0100808 __ StoreToOffset(kStoreWord, R0, R12, 12);
809 __ StoreToOffset(kStoreHalfword, R0, R12, 12);
810 __ StoreToOffset(kStoreByte, R2, R12, 12);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700811
812 size_t cs = __ CodeSize();
813 std::vector<uint8_t> managed_code(cs);
814 MemoryRegion code(&managed_code[0], managed_code.size());
815 __ FinalizeInstructions(code);
816 dump(managed_code, "StoreToOffset");
817 delete assembler;
818}
819
820
821TEST(Thumb2AssemblerTest, IfThen) {
822 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
823
824 __ it(EQ);
825 __ mov(R1, ShifterOperand(1), EQ);
826
827 __ it(EQ, kItThen);
828 __ mov(R1, ShifterOperand(1), EQ);
829 __ mov(R2, ShifterOperand(2), EQ);
830
831 __ it(EQ, kItElse);
832 __ mov(R1, ShifterOperand(1), EQ);
833 __ mov(R2, ShifterOperand(2), NE);
834
835 __ it(EQ, kItThen, kItElse);
836 __ mov(R1, ShifterOperand(1), EQ);
837 __ mov(R2, ShifterOperand(2), EQ);
838 __ mov(R3, ShifterOperand(3), NE);
839
840 __ it(EQ, kItElse, kItElse);
841 __ mov(R1, ShifterOperand(1), EQ);
842 __ mov(R2, ShifterOperand(2), NE);
843 __ mov(R3, ShifterOperand(3), NE);
844
845 __ it(EQ, kItThen, kItThen, kItElse);
846 __ mov(R1, ShifterOperand(1), EQ);
847 __ mov(R2, ShifterOperand(2), EQ);
848 __ mov(R3, ShifterOperand(3), EQ);
849 __ mov(R4, ShifterOperand(4), NE);
850
851 size_t cs = __ CodeSize();
852 std::vector<uint8_t> managed_code(cs);
853 MemoryRegion code(&managed_code[0], managed_code.size());
854 __ FinalizeInstructions(code);
855 dump(managed_code, "IfThen");
856 delete assembler;
857}
858
859TEST(Thumb2AssemblerTest, CbzCbnz) {
860 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
861
862 Label l1;
863 __ cbz(R2, &l1);
864 __ mov(R1, ShifterOperand(3));
865 __ mov(R2, ShifterOperand(3));
866 __ Bind(&l1);
867 __ mov(R2, ShifterOperand(4));
868
869 Label l2;
870 __ cbnz(R2, &l2);
871 __ mov(R8, ShifterOperand(3));
872 __ mov(R2, ShifterOperand(3));
873 __ Bind(&l2);
874 __ mov(R2, ShifterOperand(4));
875
876 size_t cs = __ CodeSize();
877 std::vector<uint8_t> managed_code(cs);
878 MemoryRegion code(&managed_code[0], managed_code.size());
879 __ FinalizeInstructions(code);
880 dump(managed_code, "CbzCbnz");
881 delete assembler;
882}
883
884TEST(Thumb2AssemblerTest, Multiply) {
885 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
886
887 __ mul(R0, R1, R0);
888 __ mul(R0, R1, R2);
889 __ mul(R8, R9, R8);
890 __ mul(R8, R9, R10);
891
892 __ mla(R0, R1, R2, R3);
893 __ mla(R8, R9, R8, R9);
894
895 __ mls(R0, R1, R2, R3);
896 __ mls(R8, R9, R8, R9);
897
898 __ umull(R0, R1, R2, R3);
899 __ umull(R8, R9, R10, R11);
900
901 size_t cs = __ CodeSize();
902 std::vector<uint8_t> managed_code(cs);
903 MemoryRegion code(&managed_code[0], managed_code.size());
904 __ FinalizeInstructions(code);
905 dump(managed_code, "Multiply");
906 delete assembler;
907}
908
909TEST(Thumb2AssemblerTest, Divide) {
910 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
911
912 __ sdiv(R0, R1, R2);
913 __ sdiv(R8, R9, R10);
914
915 __ udiv(R0, R1, R2);
916 __ udiv(R8, R9, R10);
917
918 size_t cs = __ CodeSize();
919 std::vector<uint8_t> managed_code(cs);
920 MemoryRegion code(&managed_code[0], managed_code.size());
921 __ FinalizeInstructions(code);
922 dump(managed_code, "Divide");
923 delete assembler;
924}
925
926TEST(Thumb2AssemblerTest, VMov) {
927 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
928
929 __ vmovs(S1, 1.0);
930 __ vmovd(D1, 1.0);
931
932 __ vmovs(S1, S2);
933 __ vmovd(D1, D2);
934
935 size_t cs = __ CodeSize();
936 std::vector<uint8_t> managed_code(cs);
937 MemoryRegion code(&managed_code[0], managed_code.size());
938 __ FinalizeInstructions(code);
939 dump(managed_code, "VMov");
940 delete assembler;
941}
942
943
944TEST(Thumb2AssemblerTest, BasicFloatingPoint) {
945 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
946
947 __ vadds(S0, S1, S2);
948 __ vsubs(S0, S1, S2);
949 __ vmuls(S0, S1, S2);
950 __ vmlas(S0, S1, S2);
951 __ vmlss(S0, S1, S2);
952 __ vdivs(S0, S1, S2);
953 __ vabss(S0, S1);
954 __ vnegs(S0, S1);
955 __ vsqrts(S0, S1);
956
957 __ vaddd(D0, D1, D2);
958 __ vsubd(D0, D1, D2);
959 __ vmuld(D0, D1, D2);
960 __ vmlad(D0, D1, D2);
961 __ vmlsd(D0, D1, D2);
962 __ vdivd(D0, D1, D2);
963 __ vabsd(D0, D1);
964 __ vnegd(D0, D1);
965 __ vsqrtd(D0, D1);
966
967 size_t cs = __ CodeSize();
968 std::vector<uint8_t> managed_code(cs);
969 MemoryRegion code(&managed_code[0], managed_code.size());
970 __ FinalizeInstructions(code);
971 dump(managed_code, "BasicFloatingPoint");
972 delete assembler;
973}
974
975TEST(Thumb2AssemblerTest, FloatingPointConversions) {
976 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
977
978 __ vcvtsd(S2, D2);
979 __ vcvtds(D2, S2);
980
981 __ vcvtis(S1, S2);
982 __ vcvtsi(S1, S2);
983
984 __ vcvtid(S1, D2);
985 __ vcvtdi(D1, S2);
986
987 __ vcvtus(S1, S2);
988 __ vcvtsu(S1, S2);
989
990 __ vcvtud(S1, D2);
991 __ vcvtdu(D1, S2);
992
993 size_t cs = __ CodeSize();
994 std::vector<uint8_t> managed_code(cs);
995 MemoryRegion code(&managed_code[0], managed_code.size());
996 __ FinalizeInstructions(code);
997 dump(managed_code, "FloatingPointConversions");
998 delete assembler;
999}
1000
1001TEST(Thumb2AssemblerTest, FloatingPointComparisons) {
1002 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1003
1004 __ vcmps(S0, S1);
1005 __ vcmpd(D0, D1);
1006
1007 __ vcmpsz(S2);
1008 __ vcmpdz(D2);
1009
1010 size_t cs = __ CodeSize();
1011 std::vector<uint8_t> managed_code(cs);
1012 MemoryRegion code(&managed_code[0], managed_code.size());
1013 __ FinalizeInstructions(code);
1014 dump(managed_code, "FloatingPointComparisons");
1015 delete assembler;
1016}
1017
1018TEST(Thumb2AssemblerTest, Calls) {
1019 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1020
1021 __ blx(LR);
1022 __ bx(LR);
1023
1024 size_t cs = __ CodeSize();
1025 std::vector<uint8_t> managed_code(cs);
1026 MemoryRegion code(&managed_code[0], managed_code.size());
1027 __ FinalizeInstructions(code);
1028 dump(managed_code, "Calls");
1029 delete assembler;
1030}
1031
1032TEST(Thumb2AssemblerTest, Breakpoint) {
1033 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1034
1035 __ bkpt(0);
1036
1037 size_t cs = __ CodeSize();
1038 std::vector<uint8_t> managed_code(cs);
1039 MemoryRegion code(&managed_code[0], managed_code.size());
1040 __ FinalizeInstructions(code);
1041 dump(managed_code, "Breakpoint");
1042 delete assembler;
1043}
1044
1045TEST(Thumb2AssemblerTest, StrR1) {
1046 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1047
1048 __ str(R1, Address(SP, 68));
1049 __ str(R1, Address(SP, 1068));
1050
1051 size_t cs = __ CodeSize();
1052 std::vector<uint8_t> managed_code(cs);
1053 MemoryRegion code(&managed_code[0], managed_code.size());
1054 __ FinalizeInstructions(code);
1055 dump(managed_code, "StrR1");
1056 delete assembler;
1057}
1058
1059TEST(Thumb2AssemblerTest, VPushPop) {
1060 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1061
1062 __ vpushs(S2, 4);
1063 __ vpushd(D2, 4);
1064
1065 __ vpops(S2, 4);
1066 __ vpopd(D2, 4);
1067
1068 size_t cs = __ CodeSize();
1069 std::vector<uint8_t> managed_code(cs);
1070 MemoryRegion code(&managed_code[0], managed_code.size());
1071 __ FinalizeInstructions(code);
1072 dump(managed_code, "VPushPop");
1073 delete assembler;
1074}
1075
1076TEST(Thumb2AssemblerTest, Max16BitBranch) {
1077 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1078
1079 Label l1;
1080 __ b(&l1);
1081 for (int i = 0 ; i < (1 << 11) ; i += 2) {
1082 __ mov(R3, ShifterOperand(i & 0xff));
1083 }
1084 __ Bind(&l1);
1085 __ mov(R1, ShifterOperand(R2));
1086
1087 size_t cs = __ CodeSize();
1088 std::vector<uint8_t> managed_code(cs);
1089 MemoryRegion code(&managed_code[0], managed_code.size());
1090 __ FinalizeInstructions(code);
1091 dump(managed_code, "Max16BitBranch");
1092 delete assembler;
1093}
1094
1095TEST(Thumb2AssemblerTest, Branch32) {
1096 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1097
1098 Label l1;
1099 __ b(&l1);
1100 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1101 __ mov(R3, ShifterOperand(i & 0xff));
1102 }
1103 __ Bind(&l1);
1104 __ mov(R1, ShifterOperand(R2));
1105
1106 size_t cs = __ CodeSize();
1107 std::vector<uint8_t> managed_code(cs);
1108 MemoryRegion code(&managed_code[0], managed_code.size());
1109 __ FinalizeInstructions(code);
1110 dump(managed_code, "Branch32");
1111 delete assembler;
1112}
1113
1114TEST(Thumb2AssemblerTest, CompareAndBranchMax) {
1115 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1116
1117 Label l1;
1118 __ cbz(R4, &l1);
1119 for (int i = 0 ; i < (1 << 7) ; i += 2) {
1120 __ mov(R3, ShifterOperand(i & 0xff));
1121 }
1122 __ Bind(&l1);
1123 __ mov(R1, ShifterOperand(R2));
1124
1125 size_t cs = __ CodeSize();
1126 std::vector<uint8_t> managed_code(cs);
1127 MemoryRegion code(&managed_code[0], managed_code.size());
1128 __ FinalizeInstructions(code);
1129 dump(managed_code, "CompareAndBranchMax");
1130 delete assembler;
1131}
1132
1133TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
1134 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1135
1136 Label l1;
1137 __ cbz(R4, &l1);
1138 for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1139 __ mov(R3, ShifterOperand(i & 0xff));
1140 }
1141 __ Bind(&l1);
1142 __ mov(R1, ShifterOperand(R2));
1143
1144 size_t cs = __ CodeSize();
1145 std::vector<uint8_t> managed_code(cs);
1146 MemoryRegion code(&managed_code[0], managed_code.size());
1147 __ FinalizeInstructions(code);
1148 dump(managed_code, "CompareAndBranchRelocation16");
1149 delete assembler;
1150}
1151
1152TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
1153 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1154
1155 Label l1;
1156 __ cbz(R4, &l1);
1157 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1158 __ mov(R3, ShifterOperand(i & 0xff));
1159 }
1160 __ Bind(&l1);
1161 __ mov(R1, ShifterOperand(R2));
1162
1163 size_t cs = __ CodeSize();
1164 std::vector<uint8_t> managed_code(cs);
1165 MemoryRegion code(&managed_code[0], managed_code.size());
1166 __ FinalizeInstructions(code);
1167 dump(managed_code, "CompareAndBranchRelocation32");
1168 delete assembler;
1169}
1170
1171TEST(Thumb2AssemblerTest, MixedBranch32) {
1172 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1173
1174 Label l1;
1175 Label l2;
1176 __ b(&l1); // Forwards.
1177 __ Bind(&l2);
1178
1179 // Space to force relocation.
1180 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1181 __ mov(R3, ShifterOperand(i & 0xff));
1182 }
1183 __ b(&l2); // Backwards.
1184 __ Bind(&l1);
1185 __ mov(R1, ShifterOperand(R2));
1186
1187 size_t cs = __ CodeSize();
1188 std::vector<uint8_t> managed_code(cs);
1189 MemoryRegion code(&managed_code[0], managed_code.size());
1190 __ FinalizeInstructions(code);
1191 dump(managed_code, "MixedBranch32");
1192 delete assembler;
1193}
1194
Dave Allison45fdb932014-06-25 12:37:10 -07001195TEST(Thumb2AssemblerTest, Shifts) {
1196 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1197
1198 // 16 bit
1199 __ Lsl(R0, R1, 5);
1200 __ Lsr(R0, R1, 5);
1201 __ Asr(R0, R1, 5);
1202
1203 __ Lsl(R0, R0, R1);
1204 __ Lsr(R0, R0, R1);
1205 __ Asr(R0, R0, R1);
1206
1207 // 32 bit due to high registers.
1208 __ Lsl(R8, R1, 5);
1209 __ Lsr(R0, R8, 5);
1210 __ Asr(R8, R1, 5);
1211 __ Ror(R0, R8, 5);
1212
1213 // 32 bit due to different Rd and Rn.
1214 __ Lsl(R0, R1, R2);
1215 __ Lsr(R0, R1, R2);
1216 __ Asr(R0, R1, R2);
1217 __ Ror(R0, R1, R2);
1218
1219 // 32 bit due to use of high registers.
1220 __ Lsl(R8, R1, R2);
1221 __ Lsr(R0, R8, R2);
1222 __ Asr(R0, R1, R8);
1223
1224 // S bit (all 32 bit)
1225
1226 // 32 bit due to high registers.
1227 __ Lsl(R8, R1, 5, true);
1228 __ Lsr(R0, R8, 5, true);
1229 __ Asr(R8, R1, 5, true);
1230 __ Ror(R0, R8, 5, true);
1231
1232 // 32 bit due to different Rd and Rn.
1233 __ Lsl(R0, R1, R2, true);
1234 __ Lsr(R0, R1, R2, true);
1235 __ Asr(R0, R1, R2, true);
1236 __ Ror(R0, R1, R2, true);
1237
1238 // 32 bit due to use of high registers.
1239 __ Lsl(R8, R1, R2, true);
1240 __ Lsr(R0, R8, R2, true);
1241 __ Asr(R0, R1, R8, true);
1242
1243 size_t cs = __ CodeSize();
1244 std::vector<uint8_t> managed_code(cs);
1245 MemoryRegion code(&managed_code[0], managed_code.size());
1246 __ FinalizeInstructions(code);
1247 dump(managed_code, "Shifts");
1248 delete assembler;
1249}
1250
1251TEST(Thumb2AssemblerTest, LoadStoreRegOffset) {
1252 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1253
1254 // 16 bit.
1255 __ ldr(R0, Address(R1, R2));
1256 __ str(R0, Address(R1, R2));
1257
1258 // 32 bit due to shift.
1259 __ ldr(R0, Address(R1, R2, LSL, 1));
1260 __ str(R0, Address(R1, R2, LSL, 1));
1261
1262 __ ldr(R0, Address(R1, R2, LSL, 3));
1263 __ str(R0, Address(R1, R2, LSL, 3));
1264
1265 // 32 bit due to high register use.
1266 __ ldr(R8, Address(R1, R2));
1267 __ str(R8, Address(R1, R2));
1268
1269 __ ldr(R1, Address(R8, R2));
1270 __ str(R2, Address(R8, R2));
1271
1272 __ ldr(R0, Address(R1, R8));
1273 __ str(R0, Address(R1, R8));
1274
1275 size_t cs = __ CodeSize();
1276 std::vector<uint8_t> managed_code(cs);
1277 MemoryRegion code(&managed_code[0], managed_code.size());
1278 __ FinalizeInstructions(code);
1279 dump(managed_code, "LoadStoreRegOffset");
1280 delete assembler;
1281}
1282
1283TEST(Thumb2AssemblerTest, LoadStoreLiteral) {
1284 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1285
1286 __ ldr(R0, Address(4));
1287 __ str(R0, Address(4));
1288
1289 __ ldr(R0, Address(-8));
1290 __ str(R0, Address(-8));
1291
1292 // Limits.
1293 __ ldr(R0, Address(0x3ff)); // 10 bits (16 bit).
1294 __ ldr(R0, Address(0x7ff)); // 11 bits (32 bit).
1295 __ str(R0, Address(0x3ff)); // 32 bit (no 16 bit str(literal)).
1296 __ str(R0, Address(0x7ff)); // 11 bits (32 bit).
1297
1298 size_t cs = __ CodeSize();
1299 std::vector<uint8_t> managed_code(cs);
1300 MemoryRegion code(&managed_code[0], managed_code.size());
1301 __ FinalizeInstructions(code);
1302 dump(managed_code, "LoadStoreLiteral");
1303 delete assembler;
1304}
1305
Dave Allison0bb9ade2014-06-26 17:57:36 -07001306TEST(Thumb2AssemblerTest, LoadStoreLimits) {
1307 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1308
1309 __ ldr(R0, Address(R4, 124)); // 16 bit.
1310 __ ldr(R0, Address(R4, 128)); // 32 bit.
1311
1312 __ ldrb(R0, Address(R4, 31)); // 16 bit.
1313 __ ldrb(R0, Address(R4, 32)); // 32 bit.
1314
1315 __ ldrh(R0, Address(R4, 62)); // 16 bit.
1316 __ ldrh(R0, Address(R4, 64)); // 32 bit.
1317
1318 __ ldrsb(R0, Address(R4, 31)); // 32 bit.
1319 __ ldrsb(R0, Address(R4, 32)); // 32 bit.
1320
1321 __ ldrsh(R0, Address(R4, 62)); // 32 bit.
1322 __ ldrsh(R0, Address(R4, 64)); // 32 bit.
1323
1324 __ str(R0, Address(R4, 124)); // 16 bit.
1325 __ str(R0, Address(R4, 128)); // 32 bit.
1326
1327 __ strb(R0, Address(R4, 31)); // 16 bit.
1328 __ strb(R0, Address(R4, 32)); // 32 bit.
1329
1330 __ strh(R0, Address(R4, 62)); // 16 bit.
1331 __ strh(R0, Address(R4, 64)); // 32 bit.
1332
1333 size_t cs = __ CodeSize();
1334 std::vector<uint8_t> managed_code(cs);
1335 MemoryRegion code(&managed_code[0], managed_code.size());
1336 __ FinalizeInstructions(code);
1337 dump(managed_code, "LoadStoreLimits");
1338 delete assembler;
1339}
1340
Dave Allison65fcc2c2014-04-28 13:45:27 -07001341#undef __
1342} // namespace arm
1343} // namespace art