blob: 3b05173d881cc79bfff0665c49d82790c0348e56 [file] [log] [blame]
Dave Allison65fcc2c2014-04-28 13:45:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010017#include <dirent.h>
Andreas Gampefd114702015-05-13 17:00:41 -070018#include <errno.h>
Dave Allison65fcc2c2014-04-28 13:45:27 -070019#include <fstream>
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010020#include <map>
Andreas Gampefd114702015-05-13 17:00:41 -070021#include <string.h>
22#include <sys/types.h>
Dave Allison65fcc2c2014-04-28 13:45:27 -070023
24#include "gtest/gtest.h"
25#include "utils/arm/assembler_thumb2.h"
Artem Serov12e097c2016-08-08 15:13:26 +010026
27#include "jni/quick/calling_convention.h"
28#include "utils/arm/jni_macro_assembler_arm_vixl.h"
29
Dave Allison65fcc2c2014-04-28 13:45:27 -070030#include "base/hex_dump.h"
31#include "common_runtime_test.h"
32
33namespace art {
34namespace arm {
35
36// Include results file (generated manually)
37#include "assembler_thumb_test_expected.cc.inc"
38
Bilyan Borisovbb661c02016-04-04 16:27:32 +010039#ifndef ART_TARGET_ANDROID
Dave Allison45fdb932014-06-25 12:37:10 -070040// This controls whether the results are printed to the
41// screen or compared against the expected output.
42// To generate new expected output, set this to true and
43// copy the output into the .cc.inc file in the form
44// of the other results.
45//
46// When this is false, the results are not printed to the
47// output, but are compared against the expected results
48// in the .cc.inc file.
Dave Allison65fcc2c2014-04-28 13:45:27 -070049static constexpr bool kPrintResults = false;
Dave Allisond20ddb22014-06-05 14:16:30 -070050#endif
Dave Allison65fcc2c2014-04-28 13:45:27 -070051
52void SetAndroidData() {
53 const char* data = getenv("ANDROID_DATA");
54 if (data == nullptr) {
55 setenv("ANDROID_DATA", "/tmp", 1);
56 }
57}
58
Dave Allison45fdb932014-06-25 12:37:10 -070059int CompareIgnoringSpace(const char* s1, const char* s2) {
60 while (*s1 != '\0') {
61 while (isspace(*s1)) ++s1;
62 while (isspace(*s2)) ++s2;
63 if (*s1 == '\0' || *s1 != *s2) {
64 break;
65 }
66 ++s1;
67 ++s2;
68 }
69 return *s1 - *s2;
70}
71
Vladimir Markocf93a5c2015-06-16 11:33:24 +000072void InitResults() {
73 if (test_results.empty()) {
74 setup_results();
75 }
76}
77
78std::string GetToolsDir() {
Bilyan Borisovbb661c02016-04-04 16:27:32 +010079#ifndef ART_TARGET_ANDROID
Vladimir Markocf93a5c2015-06-16 11:33:24 +000080 // This will only work on the host. There is no as, objcopy or objdump on the device.
Dave Allison65fcc2c2014-04-28 13:45:27 -070081 static std::string toolsdir;
82
Vladimir Markocf93a5c2015-06-16 11:33:24 +000083 if (toolsdir.empty()) {
Dave Allison65fcc2c2014-04-28 13:45:27 -070084 setup_results();
David Srbecky3e52aa42015-04-12 07:45:18 +010085 toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
Dave Allison65fcc2c2014-04-28 13:45:27 -070086 SetAndroidData();
Dave Allison65fcc2c2014-04-28 13:45:27 -070087 }
88
Vladimir Markocf93a5c2015-06-16 11:33:24 +000089 return toolsdir;
90#else
91 return std::string();
92#endif
93}
94
95void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) {
Bilyan Borisovbb661c02016-04-04 16:27:32 +010096#ifndef ART_TARGET_ANDROID
Vladimir Markocf93a5c2015-06-16 11:33:24 +000097 static std::string toolsdir = GetToolsDir();
98
Dave Allison65fcc2c2014-04-28 13:45:27 -070099 ScratchFile file;
100
101 const char* filename = file.GetFilename().c_str();
102
103 std::ofstream out(filename);
104 if (out) {
105 out << ".section \".text\"\n";
106 out << ".syntax unified\n";
107 out << ".arch armv7-a\n";
108 out << ".thumb\n";
109 out << ".thumb_func\n";
110 out << ".type " << testname << ", #function\n";
111 out << ".global " << testname << "\n";
112 out << testname << ":\n";
113 out << ".fnstart\n";
114
115 for (uint32_t i = 0 ; i < code.size(); ++i) {
116 out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
117 }
118 out << ".fnend\n";
119 out << ".size " << testname << ", .-" << testname << "\n";
120 }
121 out.close();
122
Andreas Gampe4470c1d2014-07-21 18:32:59 -0700123 char cmd[1024];
Dave Allison65fcc2c2014-04-28 13:45:27 -0700124
125 // Assemble the .S
David Srbecky3e52aa42015-04-12 07:45:18 +0100126 snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
Andreas Gampefd114702015-05-13 17:00:41 -0700127 int cmd_result = system(cmd);
128 ASSERT_EQ(cmd_result, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700129
130 // Remove the $d symbols to prevent the disassembler dumping the instructions
131 // as .word
David Srbecky3e52aa42015-04-12 07:45:18 +0100132 snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
Andreas Gampefd114702015-05-13 17:00:41 -0700133 int cmd_result2 = system(cmd);
134 ASSERT_EQ(cmd_result2, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700135
136 // Disassemble.
137
David Srbecky3e52aa42015-04-12 07:45:18 +0100138 snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
139 toolsdir.c_str(), filename);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700140 if (kPrintResults) {
141 // Print the results only, don't check. This is used to generate new output for inserting
Vladimir Markof5c09c32015-12-17 12:08:08 +0000142 // into the .inc file, so let's add the appropriate prefix/suffix needed in the C++ code.
143 strcat(cmd, " | sed '-es/^/ \"/' | sed '-es/$/\\\\n\",/'");
Andreas Gampefd114702015-05-13 17:00:41 -0700144 int cmd_result3 = system(cmd);
145 ASSERT_EQ(cmd_result3, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700146 } else {
147 // Check the results match the appropriate results in the .inc file.
148 FILE *fp = popen(cmd, "r");
149 ASSERT_TRUE(fp != nullptr);
150
Dave Allison65fcc2c2014-04-28 13:45:27 -0700151 uint32_t lineindex = 0;
152
153 while (!feof(fp)) {
154 char testline[256];
155 char *s = fgets(testline, sizeof(testline), fp);
156 if (s == nullptr) {
157 break;
158 }
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000159 if (CompareIgnoringSpace(results[lineindex], testline) != 0) {
Dave Allison45fdb932014-06-25 12:37:10 -0700160 LOG(FATAL) << "Output is not as expected at line: " << lineindex
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000161 << results[lineindex] << "/" << testline;
Dave Allison45fdb932014-06-25 12:37:10 -0700162 }
Dave Allison65fcc2c2014-04-28 13:45:27 -0700163 ++lineindex;
164 }
165 // Check that we are at the end.
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000166 ASSERT_TRUE(results[lineindex] == nullptr);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700167 fclose(fp);
168 }
169
170 char buf[FILENAME_MAX];
171 snprintf(buf, sizeof(buf), "%s.o", filename);
172 unlink(buf);
173
174 snprintf(buf, sizeof(buf), "%s.oo", filename);
175 unlink(buf);
Bilyan Borisovbb661c02016-04-04 16:27:32 +0100176#endif // ART_TARGET_ANDROID
Dave Allison65fcc2c2014-04-28 13:45:27 -0700177}
178
179#define __ assembler->
180
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000181void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname,
182 const char* const* results) {
183 __ FinalizeCode();
184 size_t cs = __ CodeSize();
185 std::vector<uint8_t> managed_code(cs);
186 MemoryRegion code(&managed_code[0], managed_code.size());
187 __ FinalizeInstructions(code);
188
189 DumpAndCheck(managed_code, testname, results);
190}
191
192void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) {
193 InitResults();
194 std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
195 ASSERT_NE(results, test_results.end());
196
197 EmitAndCheck(assembler, testname, results->second);
198}
199
200#undef __
201
Vladimir Marko93205e32016-04-13 11:59:46 +0100202class Thumb2AssemblerTest : public ::testing::Test {
203 public:
204 Thumb2AssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
205
206 ArenaPool pool;
207 ArenaAllocator arena;
208 arm::Thumb2Assembler assembler;
209};
210
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000211#define __ assembler.
212
Vladimir Marko93205e32016-04-13 11:59:46 +0100213TEST_F(Thumb2AssemblerTest, SimpleMov) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100214 __ movs(R0, ShifterOperand(R1));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700215 __ mov(R0, ShifterOperand(R1));
216 __ mov(R8, ShifterOperand(R9));
217
218 __ mov(R0, ShifterOperand(1));
219 __ mov(R8, ShifterOperand(9));
220
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000221 EmitAndCheck(&assembler, "SimpleMov");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700222}
223
Vladimir Marko93205e32016-04-13 11:59:46 +0100224TEST_F(Thumb2AssemblerTest, SimpleMov32) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000225 __ Force32Bit();
Dave Allison65fcc2c2014-04-28 13:45:27 -0700226
227 __ mov(R0, ShifterOperand(R1));
228 __ mov(R8, ShifterOperand(R9));
229
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000230 EmitAndCheck(&assembler, "SimpleMov32");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700231}
232
Vladimir Marko93205e32016-04-13 11:59:46 +0100233TEST_F(Thumb2AssemblerTest, SimpleMovAdd) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700234 __ mov(R0, ShifterOperand(R1));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100235 __ adds(R0, R1, ShifterOperand(R2));
236 __ add(R0, R1, ShifterOperand(0));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700237
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000238 EmitAndCheck(&assembler, "SimpleMovAdd");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700239}
240
Vladimir Marko93205e32016-04-13 11:59:46 +0100241TEST_F(Thumb2AssemblerTest, DataProcessingRegister) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100242 // 32 bit variants using low registers.
243 __ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
244 __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
245 __ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
246 __ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
247 __ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
Vladimir Markod2b4ca22015-09-14 15:13:26 +0100248 __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100249 __ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
250 __ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
251 __ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
252 __ sbc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
253 __ rsb(R0, R1, ShifterOperand(R2), AL, kCcKeep);
254 __ teq(R0, ShifterOperand(R1));
255
256 // 16 bit variants using low registers.
257 __ movs(R0, ShifterOperand(R1));
258 __ mov(R0, ShifterOperand(R1), AL, kCcKeep);
259 __ mvns(R0, ShifterOperand(R1));
260 __ add(R0, R0, ShifterOperand(R1), AL, kCcKeep);
261 __ adds(R0, R1, ShifterOperand(R2));
262 __ subs(R0, R1, ShifterOperand(R2));
263 __ adcs(R0, R0, ShifterOperand(R1));
264 __ sbcs(R0, R0, ShifterOperand(R1));
265 __ ands(R0, R0, ShifterOperand(R1));
266 __ orrs(R0, R0, ShifterOperand(R1));
267 __ eors(R0, R0, ShifterOperand(R1));
268 __ bics(R0, R0, ShifterOperand(R1));
269 __ tst(R0, ShifterOperand(R1));
270 __ cmp(R0, ShifterOperand(R1));
271 __ cmn(R0, ShifterOperand(R1));
272
273 // 16-bit variants using high registers.
274 __ mov(R1, ShifterOperand(R8), AL, kCcKeep);
275 __ mov(R9, ShifterOperand(R0), AL, kCcKeep);
276 __ mov(R8, ShifterOperand(R9), AL, kCcKeep);
277 __ add(R1, R1, ShifterOperand(R8), AL, kCcKeep);
278 __ add(R9, R9, ShifterOperand(R0), AL, kCcKeep);
279 __ add(R8, R8, ShifterOperand(R9), AL, kCcKeep);
280 __ cmp(R0, ShifterOperand(R9));
281 __ cmp(R8, ShifterOperand(R1));
282 __ cmp(R9, ShifterOperand(R8));
283
284 // The 16-bit RSBS Rd, Rn, #0, also known as NEGS Rd, Rn is specified using
285 // an immediate (0) but emitted without any, so we test it here.
286 __ rsbs(R0, R1, ShifterOperand(0));
287 __ rsbs(R0, R0, ShifterOperand(0)); // Check Rd == Rn code path.
288
289 // 32 bit variants using high registers that would be 16-bit if using low registers.
290 __ movs(R0, ShifterOperand(R8));
291 __ mvns(R0, ShifterOperand(R8));
292 __ add(R0, R1, ShifterOperand(R8), AL, kCcKeep);
293 __ adds(R0, R1, ShifterOperand(R8));
294 __ subs(R0, R1, ShifterOperand(R8));
295 __ adcs(R0, R0, ShifterOperand(R8));
296 __ sbcs(R0, R0, ShifterOperand(R8));
297 __ ands(R0, R0, ShifterOperand(R8));
298 __ orrs(R0, R0, ShifterOperand(R8));
299 __ eors(R0, R0, ShifterOperand(R8));
300 __ bics(R0, R0, ShifterOperand(R8));
301 __ tst(R0, ShifterOperand(R8));
302 __ cmn(R0, ShifterOperand(R8));
303 __ rsbs(R0, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit.
304 __ rsbs(R8, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit (Rd == Rn).
305
306 // 32-bit variants of instructions that would be 16-bit outside IT block.
307 __ it(arm::EQ);
308 __ mvns(R0, ShifterOperand(R1), arm::EQ);
309 __ it(arm::EQ);
310 __ adds(R0, R1, ShifterOperand(R2), arm::EQ);
311 __ it(arm::EQ);
312 __ subs(R0, R1, ShifterOperand(R2), arm::EQ);
313 __ it(arm::EQ);
314 __ adcs(R0, R0, ShifterOperand(R1), arm::EQ);
315 __ it(arm::EQ);
316 __ sbcs(R0, R0, ShifterOperand(R1), arm::EQ);
317 __ it(arm::EQ);
318 __ ands(R0, R0, ShifterOperand(R1), arm::EQ);
319 __ it(arm::EQ);
320 __ orrs(R0, R0, ShifterOperand(R1), arm::EQ);
321 __ it(arm::EQ);
322 __ eors(R0, R0, ShifterOperand(R1), arm::EQ);
323 __ it(arm::EQ);
324 __ bics(R0, R0, ShifterOperand(R1), arm::EQ);
325
326 // 16-bit variants of instructions that would be 32-bit outside IT block.
327 __ it(arm::EQ);
328 __ mvn(R0, ShifterOperand(R1), arm::EQ, kCcKeep);
329 __ it(arm::EQ);
330 __ add(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
331 __ it(arm::EQ);
332 __ sub(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
333 __ it(arm::EQ);
334 __ adc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
335 __ it(arm::EQ);
336 __ sbc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
337 __ it(arm::EQ);
338 __ and_(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
339 __ it(arm::EQ);
340 __ orr(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
341 __ it(arm::EQ);
342 __ eor(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
343 __ it(arm::EQ);
344 __ bic(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
345
346 // 16 bit variants selected for the default kCcDontCare.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700347 __ mov(R0, ShifterOperand(R1));
348 __ mvn(R0, ShifterOperand(R1));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100349 __ add(R0, R0, ShifterOperand(R1));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700350 __ add(R0, R1, ShifterOperand(R2));
351 __ sub(R0, R1, ShifterOperand(R2));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100352 __ adc(R0, R0, ShifterOperand(R1));
353 __ sbc(R0, R0, ShifterOperand(R1));
Andreas Gampe7b7e5242015-02-02 19:17:11 -0800354 __ and_(R0, R0, ShifterOperand(R1));
355 __ orr(R0, R0, ShifterOperand(R1));
356 __ eor(R0, R0, ShifterOperand(R1));
357 __ bic(R0, R0, ShifterOperand(R1));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100358 __ mov(R1, ShifterOperand(R8));
359 __ mov(R9, ShifterOperand(R0));
360 __ mov(R8, ShifterOperand(R9));
361 __ add(R1, R1, ShifterOperand(R8));
362 __ add(R9, R9, ShifterOperand(R0));
363 __ add(R8, R8, ShifterOperand(R9));
364 __ rsb(R0, R1, ShifterOperand(0));
365 __ rsb(R0, R0, ShifterOperand(0));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700366
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100367 // And an arbitrary 32-bit instruction using IP.
368 __ add(R12, R1, ShifterOperand(R0), AL, kCcKeep);
Nicolas Geoffray3c7bb982014-07-23 16:04:16 +0100369
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000370 EmitAndCheck(&assembler, "DataProcessingRegister");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700371}
372
Vladimir Marko93205e32016-04-13 11:59:46 +0100373TEST_F(Thumb2AssemblerTest, DataProcessingImmediate) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700374 __ mov(R0, ShifterOperand(0x55));
375 __ mvn(R0, ShifterOperand(0x55));
376 __ add(R0, R1, ShifterOperand(0x55));
377 __ sub(R0, R1, ShifterOperand(0x55));
378 __ and_(R0, R1, ShifterOperand(0x55));
379 __ orr(R0, R1, ShifterOperand(0x55));
Vladimir Markod2b4ca22015-09-14 15:13:26 +0100380 __ orn(R0, R1, ShifterOperand(0x55));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700381 __ eor(R0, R1, ShifterOperand(0x55));
382 __ bic(R0, R1, ShifterOperand(0x55));
383 __ adc(R0, R1, ShifterOperand(0x55));
384 __ sbc(R0, R1, ShifterOperand(0x55));
385 __ rsb(R0, R1, ShifterOperand(0x55));
386
387 __ tst(R0, ShifterOperand(0x55));
388 __ teq(R0, ShifterOperand(0x55));
389 __ cmp(R0, ShifterOperand(0x55));
390 __ cmn(R0, ShifterOperand(0x55));
391
392 __ add(R0, R1, ShifterOperand(5));
393 __ sub(R0, R1, ShifterOperand(5));
394
395 __ movs(R0, ShifterOperand(0x55));
396 __ mvns(R0, ShifterOperand(0x55));
397
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100398 __ adds(R0, R1, ShifterOperand(5));
399 __ subs(R0, R1, ShifterOperand(5));
400
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000401 EmitAndCheck(&assembler, "DataProcessingImmediate");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700402}
403
Vladimir Marko93205e32016-04-13 11:59:46 +0100404TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700405 __ mov(R0, ShifterOperand(0x550055));
406 __ mvn(R0, ShifterOperand(0x550055));
407 __ add(R0, R1, ShifterOperand(0x550055));
408 __ sub(R0, R1, ShifterOperand(0x550055));
409 __ and_(R0, R1, ShifterOperand(0x550055));
410 __ orr(R0, R1, ShifterOperand(0x550055));
Vladimir Markod2b4ca22015-09-14 15:13:26 +0100411 __ orn(R0, R1, ShifterOperand(0x550055));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700412 __ eor(R0, R1, ShifterOperand(0x550055));
413 __ bic(R0, R1, ShifterOperand(0x550055));
414 __ adc(R0, R1, ShifterOperand(0x550055));
415 __ sbc(R0, R1, ShifterOperand(0x550055));
416 __ rsb(R0, R1, ShifterOperand(0x550055));
417
418 __ tst(R0, ShifterOperand(0x550055));
419 __ teq(R0, ShifterOperand(0x550055));
420 __ cmp(R0, ShifterOperand(0x550055));
421 __ cmn(R0, ShifterOperand(0x550055));
422
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000423 EmitAndCheck(&assembler, "DataProcessingModifiedImmediate");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700424}
425
426
Vladimir Marko93205e32016-04-13 11:59:46 +0100427TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700428 __ mov(R0, ShifterOperand(0x550055));
429 __ mov(R0, ShifterOperand(0x55005500));
430 __ mov(R0, ShifterOperand(0x55555555));
431 __ mov(R0, ShifterOperand(0xd5000000)); // rotated to first position
432 __ mov(R0, ShifterOperand(0x6a000000)); // rotated to second position
433 __ mov(R0, ShifterOperand(0x350)); // rotated to 2nd last position
434 __ mov(R0, ShifterOperand(0x1a8)); // rotated to last position
435
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000436 EmitAndCheck(&assembler, "DataProcessingModifiedImmediates");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700437}
438
Vladimir Marko93205e32016-04-13 11:59:46 +0100439TEST_F(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100440 // 16-bit variants.
441 __ movs(R3, ShifterOperand(R4, LSL, 4));
442 __ movs(R3, ShifterOperand(R4, LSR, 5));
443 __ movs(R3, ShifterOperand(R4, ASR, 6));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700444
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100445 // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
446 __ movs(R3, ShifterOperand(R4, ROR, 7));
447
448 // 32-bit RRX because RRX has no 16-bit version.
449 __ movs(R3, ShifterOperand(R4, RRX));
450
451 // 32 bit variants (not setting condition codes).
452 __ mov(R3, ShifterOperand(R4, LSL, 4), AL, kCcKeep);
453 __ mov(R3, ShifterOperand(R4, LSR, 5), AL, kCcKeep);
454 __ mov(R3, ShifterOperand(R4, ASR, 6), AL, kCcKeep);
455 __ mov(R3, ShifterOperand(R4, ROR, 7), AL, kCcKeep);
456 __ mov(R3, ShifterOperand(R4, RRX), AL, kCcKeep);
457
458 // 32 bit variants (high registers).
459 __ movs(R8, ShifterOperand(R4, LSL, 4));
460 __ movs(R8, ShifterOperand(R4, LSR, 5));
461 __ movs(R8, ShifterOperand(R4, ASR, 6));
462 __ movs(R8, ShifterOperand(R4, ROR, 7));
463 __ movs(R8, ShifterOperand(R4, RRX));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700464
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000465 EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700466}
467
Vladimir Marko93205e32016-04-13 11:59:46 +0100468TEST_F(Thumb2AssemblerTest, ShiftImmediate) {
Vladimir Markof9d741e2015-11-20 15:08:11 +0000469 // Note: This test produces the same results as DataProcessingShiftedRegister
470 // but it does so using shift functions instead of mov().
Vladimir Markof9d741e2015-11-20 15:08:11 +0000471
472 // 16-bit variants.
473 __ Lsl(R3, R4, 4);
474 __ Lsr(R3, R4, 5);
475 __ Asr(R3, R4, 6);
476
477 // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
478 __ Ror(R3, R4, 7);
479
480 // 32-bit RRX because RRX has no 16-bit version.
481 __ Rrx(R3, R4);
482
483 // 32 bit variants (not setting condition codes).
484 __ Lsl(R3, R4, 4, AL, kCcKeep);
485 __ Lsr(R3, R4, 5, AL, kCcKeep);
486 __ Asr(R3, R4, 6, AL, kCcKeep);
487 __ Ror(R3, R4, 7, AL, kCcKeep);
488 __ Rrx(R3, R4, AL, kCcKeep);
489
490 // 32 bit variants (high registers).
491 __ Lsls(R8, R4, 4);
492 __ Lsrs(R8, R4, 5);
493 __ Asrs(R8, R4, 6);
494 __ Rors(R8, R4, 7);
495 __ Rrxs(R8, R4);
496
497 EmitAndCheck(&assembler, "ShiftImmediate");
498}
Dave Allison65fcc2c2014-04-28 13:45:27 -0700499
Vladimir Marko93205e32016-04-13 11:59:46 +0100500TEST_F(Thumb2AssemblerTest, BasicLoad) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700501 __ ldr(R3, Address(R4, 24));
502 __ ldrb(R3, Address(R4, 24));
503 __ ldrh(R3, Address(R4, 24));
504 __ ldrsb(R3, Address(R4, 24));
505 __ ldrsh(R3, Address(R4, 24));
506
507 __ ldr(R3, Address(SP, 24));
508
509 // 32 bit variants
510 __ ldr(R8, Address(R4, 24));
511 __ ldrb(R8, Address(R4, 24));
512 __ ldrh(R8, Address(R4, 24));
513 __ ldrsb(R8, Address(R4, 24));
514 __ ldrsh(R8, Address(R4, 24));
515
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000516 EmitAndCheck(&assembler, "BasicLoad");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700517}
518
519
Vladimir Marko93205e32016-04-13 11:59:46 +0100520TEST_F(Thumb2AssemblerTest, BasicStore) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700521 __ str(R3, Address(R4, 24));
522 __ strb(R3, Address(R4, 24));
523 __ strh(R3, Address(R4, 24));
524
525 __ str(R3, Address(SP, 24));
526
527 // 32 bit variants.
528 __ str(R8, Address(R4, 24));
529 __ strb(R8, Address(R4, 24));
530 __ strh(R8, Address(R4, 24));
531
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000532 EmitAndCheck(&assembler, "BasicStore");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700533}
534
Vladimir Marko93205e32016-04-13 11:59:46 +0100535TEST_F(Thumb2AssemblerTest, ComplexLoad) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700536 __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
537 __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
538 __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
539 __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
540 __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
541 __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
542
543 __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
544 __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
545 __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
546 __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
547 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
548 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
549
550 __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
551 __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
552 __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
553 __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
554 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
555 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
556
557 __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
558 __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
559 __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
560 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
561 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
562 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
563
564 __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
565 __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
566 __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
567 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
568 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
569 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
570
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000571 EmitAndCheck(&assembler, "ComplexLoad");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700572}
573
574
Vladimir Marko93205e32016-04-13 11:59:46 +0100575TEST_F(Thumb2AssemblerTest, ComplexStore) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700576 __ str(R3, Address(R4, 24, Address::Mode::Offset));
577 __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
578 __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
579 __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
580 __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
581 __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
582
583 __ strb(R3, Address(R4, 24, Address::Mode::Offset));
584 __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
585 __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
586 __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
587 __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
588 __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
589
590 __ strh(R3, Address(R4, 24, Address::Mode::Offset));
591 __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
592 __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
593 __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
594 __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
595 __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
596
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000597 EmitAndCheck(&assembler, "ComplexStore");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700598}
599
Vladimir Marko93205e32016-04-13 11:59:46 +0100600TEST_F(Thumb2AssemblerTest, NegativeLoadStore) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700601 __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
602 __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
603 __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
604 __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
605 __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
606 __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
607
608 __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
609 __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
610 __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
611 __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
612 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
613 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
614
615 __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
616 __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
617 __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
618 __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
619 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
620 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
621
622 __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
623 __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
624 __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
625 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
626 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
627 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
628
629 __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
630 __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
631 __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
632 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
633 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
634 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
635
636 __ str(R3, Address(R4, -24, Address::Mode::Offset));
637 __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
638 __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
639 __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
640 __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
641 __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
642
643 __ strb(R3, Address(R4, -24, Address::Mode::Offset));
644 __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
645 __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
646 __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
647 __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
648 __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
649
650 __ strh(R3, Address(R4, -24, Address::Mode::Offset));
651 __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
652 __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
653 __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
654 __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
655 __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
656
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000657 EmitAndCheck(&assembler, "NegativeLoadStore");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700658}
659
Vladimir Marko93205e32016-04-13 11:59:46 +0100660TEST_F(Thumb2AssemblerTest, SimpleLoadStoreDual) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700661 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
662 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
663
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000664 EmitAndCheck(&assembler, "SimpleLoadStoreDual");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700665}
666
Vladimir Marko93205e32016-04-13 11:59:46 +0100667TEST_F(Thumb2AssemblerTest, ComplexLoadStoreDual) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700668 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
669 __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
670 __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
671 __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
672 __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
673 __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
674
675 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
676 __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
677 __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
678 __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
679 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
680 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
681
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000682 EmitAndCheck(&assembler, "ComplexLoadStoreDual");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700683}
684
Vladimir Marko93205e32016-04-13 11:59:46 +0100685TEST_F(Thumb2AssemblerTest, NegativeLoadStoreDual) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700686 __ strd(R2, Address(R0, -24, Address::Mode::Offset));
687 __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
688 __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
689 __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
690 __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
691 __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
692
693 __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
694 __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
695 __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
696 __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
697 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
698 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
699
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000700 EmitAndCheck(&assembler, "NegativeLoadStoreDual");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700701}
702
Vladimir Marko93205e32016-04-13 11:59:46 +0100703TEST_F(Thumb2AssemblerTest, SimpleBranch) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700704 Label l1;
705 __ mov(R0, ShifterOperand(2));
706 __ Bind(&l1);
707 __ mov(R1, ShifterOperand(1));
708 __ b(&l1);
709 Label l2;
710 __ b(&l2);
711 __ mov(R1, ShifterOperand(2));
712 __ Bind(&l2);
713 __ mov(R0, ShifterOperand(3));
714
715 Label l3;
716 __ mov(R0, ShifterOperand(2));
717 __ Bind(&l3);
718 __ mov(R1, ShifterOperand(1));
719 __ b(&l3, EQ);
720
721 Label l4;
722 __ b(&l4, EQ);
723 __ mov(R1, ShifterOperand(2));
724 __ Bind(&l4);
725 __ mov(R0, ShifterOperand(3));
726
727 // 2 linked labels.
728 Label l5;
729 __ b(&l5);
730 __ mov(R1, ShifterOperand(4));
731 __ b(&l5);
732 __ mov(R1, ShifterOperand(5));
733 __ Bind(&l5);
734 __ mov(R0, ShifterOperand(6));
735
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000736 EmitAndCheck(&assembler, "SimpleBranch");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700737}
738
Vladimir Marko93205e32016-04-13 11:59:46 +0100739TEST_F(Thumb2AssemblerTest, LongBranch) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000740 __ Force32Bit();
Dave Allison65fcc2c2014-04-28 13:45:27 -0700741 // 32 bit branches.
742 Label l1;
743 __ mov(R0, ShifterOperand(2));
744 __ Bind(&l1);
745 __ mov(R1, ShifterOperand(1));
746 __ b(&l1);
747
748 Label l2;
749 __ b(&l2);
750 __ mov(R1, ShifterOperand(2));
751 __ Bind(&l2);
752 __ mov(R0, ShifterOperand(3));
753
754 Label l3;
755 __ mov(R0, ShifterOperand(2));
756 __ Bind(&l3);
757 __ mov(R1, ShifterOperand(1));
758 __ b(&l3, EQ);
759
760 Label l4;
761 __ b(&l4, EQ);
762 __ mov(R1, ShifterOperand(2));
763 __ Bind(&l4);
764 __ mov(R0, ShifterOperand(3));
765
766 // 2 linked labels.
767 Label l5;
768 __ b(&l5);
769 __ mov(R1, ShifterOperand(4));
770 __ b(&l5);
771 __ mov(R1, ShifterOperand(5));
772 __ Bind(&l5);
773 __ mov(R0, ShifterOperand(6));
774
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000775 EmitAndCheck(&assembler, "LongBranch");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700776}
777
Vladimir Marko93205e32016-04-13 11:59:46 +0100778TEST_F(Thumb2AssemblerTest, LoadMultiple) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700779 // 16 bit.
780 __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
781
782 // 32 bit.
783 __ ldm(DB_W, R4, (1 << LR | 1 << R11));
784 __ ldm(DB, R4, (1 << LR | 1 << R11));
785
786 // Single reg is converted to ldr
787 __ ldm(DB_W, R4, (1 << R5));
788
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000789 EmitAndCheck(&assembler, "LoadMultiple");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700790}
791
Vladimir Marko93205e32016-04-13 11:59:46 +0100792TEST_F(Thumb2AssemblerTest, StoreMultiple) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700793 // 16 bit.
794 __ stm(IA_W, R4, (1 << R0 | 1 << R3));
795
796 // 32 bit.
797 __ stm(IA_W, R4, (1 << LR | 1 << R11));
798 __ stm(IA, R4, (1 << LR | 1 << R11));
799
800 // Single reg is converted to str
801 __ stm(IA_W, R4, (1 << R5));
802 __ stm(IA, R4, (1 << R5));
803
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000804 EmitAndCheck(&assembler, "StoreMultiple");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700805}
806
Vladimir Marko93205e32016-04-13 11:59:46 +0100807TEST_F(Thumb2AssemblerTest, MovWMovT) {
Vladimir Markob4536b72015-11-24 13:45:23 +0000808 // Always 32 bit.
809 __ movw(R4, 0);
810 __ movw(R4, 0x34);
811 __ movw(R9, 0x34);
812 __ movw(R3, 0x1234);
813 __ movw(R9, 0xffff);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700814
815 // Always 32 bit.
816 __ movt(R0, 0);
817 __ movt(R0, 0x1234);
818 __ movt(R1, 0xffff);
819
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000820 EmitAndCheck(&assembler, "MovWMovT");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700821}
822
Vladimir Marko93205e32016-04-13 11:59:46 +0100823TEST_F(Thumb2AssemblerTest, SpecialAddSub) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700824 __ add(R2, SP, ShifterOperand(0x50)); // 16 bit.
825 __ add(SP, SP, ShifterOperand(0x50)); // 16 bit.
826 __ add(R8, SP, ShifterOperand(0x50)); // 32 bit.
827
828 __ add(R2, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
829 __ add(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000830 __ add(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700831
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000832 __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit
833 __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit
834 __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700835
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000836 __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size
837 __ sub(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700838
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000839 EmitAndCheck(&assembler, "SpecialAddSub");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700840}
841
Vladimir Marko93205e32016-04-13 11:59:46 +0100842TEST_F(Thumb2AssemblerTest, LoadFromOffset) {
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000843 __ LoadFromOffset(kLoadWord, R2, R4, 12);
844 __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
845 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
846 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
847 __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
848 __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
849 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
850 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
851 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
852 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
853 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
854 __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
855 __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
856 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
857 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
858 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
859 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
860 __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
861
862 __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
863 __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
864
865 __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
866 __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
867 __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
868
869 EmitAndCheck(&assembler, "LoadFromOffset");
870}
871
Vladimir Marko93205e32016-04-13 11:59:46 +0100872TEST_F(Thumb2AssemblerTest, StoreToOffset) {
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000873 __ StoreToOffset(kStoreWord, R2, R4, 12);
874 __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
875 __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
876 __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
877 __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
878 __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
879 __ StoreToOffset(kStoreHalfword, R2, R4, 12);
880 __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
881 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
882 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
883 __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
884 __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
885 __ StoreToOffset(kStoreWordPair, R2, R4, 12);
886 __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
887 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
888 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
889 __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
890 __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
891
892 __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
893 __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
894
895 __ StoreToOffset(kStoreByte, R2, R4, 12);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700896
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000897 EmitAndCheck(&assembler, "StoreToOffset");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700898}
899
Vladimir Marko93205e32016-04-13 11:59:46 +0100900TEST_F(Thumb2AssemblerTest, IfThen) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700901 __ it(EQ);
902 __ mov(R1, ShifterOperand(1), EQ);
903
904 __ it(EQ, kItThen);
905 __ mov(R1, ShifterOperand(1), EQ);
906 __ mov(R2, ShifterOperand(2), EQ);
907
908 __ it(EQ, kItElse);
909 __ mov(R1, ShifterOperand(1), EQ);
910 __ mov(R2, ShifterOperand(2), NE);
911
912 __ it(EQ, kItThen, kItElse);
913 __ mov(R1, ShifterOperand(1), EQ);
914 __ mov(R2, ShifterOperand(2), EQ);
915 __ mov(R3, ShifterOperand(3), NE);
916
917 __ it(EQ, kItElse, kItElse);
918 __ mov(R1, ShifterOperand(1), EQ);
919 __ mov(R2, ShifterOperand(2), NE);
920 __ mov(R3, ShifterOperand(3), NE);
921
922 __ it(EQ, kItThen, kItThen, kItElse);
923 __ mov(R1, ShifterOperand(1), EQ);
924 __ mov(R2, ShifterOperand(2), EQ);
925 __ mov(R3, ShifterOperand(3), EQ);
926 __ mov(R4, ShifterOperand(4), NE);
927
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000928 EmitAndCheck(&assembler, "IfThen");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700929}
930
Vladimir Marko93205e32016-04-13 11:59:46 +0100931TEST_F(Thumb2AssemblerTest, CbzCbnz) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700932 Label l1;
933 __ cbz(R2, &l1);
934 __ mov(R1, ShifterOperand(3));
935 __ mov(R2, ShifterOperand(3));
936 __ Bind(&l1);
937 __ mov(R2, ShifterOperand(4));
938
939 Label l2;
940 __ cbnz(R2, &l2);
941 __ mov(R8, ShifterOperand(3));
942 __ mov(R2, ShifterOperand(3));
943 __ Bind(&l2);
944 __ mov(R2, ShifterOperand(4));
945
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000946 EmitAndCheck(&assembler, "CbzCbnz");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700947}
948
Vladimir Marko93205e32016-04-13 11:59:46 +0100949TEST_F(Thumb2AssemblerTest, Multiply) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700950 __ mul(R0, R1, R0);
951 __ mul(R0, R1, R2);
952 __ mul(R8, R9, R8);
953 __ mul(R8, R9, R10);
954
955 __ mla(R0, R1, R2, R3);
956 __ mla(R8, R9, R8, R9);
957
958 __ mls(R0, R1, R2, R3);
959 __ mls(R8, R9, R8, R9);
960
961 __ umull(R0, R1, R2, R3);
962 __ umull(R8, R9, R10, R11);
963
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000964 EmitAndCheck(&assembler, "Multiply");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700965}
966
Vladimir Marko93205e32016-04-13 11:59:46 +0100967TEST_F(Thumb2AssemblerTest, Divide) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700968 __ sdiv(R0, R1, R2);
969 __ sdiv(R8, R9, R10);
970
971 __ udiv(R0, R1, R2);
972 __ udiv(R8, R9, R10);
973
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000974 EmitAndCheck(&assembler, "Divide");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700975}
976
Vladimir Marko93205e32016-04-13 11:59:46 +0100977TEST_F(Thumb2AssemblerTest, VMov) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700978 __ vmovs(S1, 1.0);
979 __ vmovd(D1, 1.0);
980
981 __ vmovs(S1, S2);
982 __ vmovd(D1, D2);
983
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000984 EmitAndCheck(&assembler, "VMov");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700985}
986
987
Vladimir Marko93205e32016-04-13 11:59:46 +0100988TEST_F(Thumb2AssemblerTest, BasicFloatingPoint) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700989 __ vadds(S0, S1, S2);
990 __ vsubs(S0, S1, S2);
991 __ vmuls(S0, S1, S2);
992 __ vmlas(S0, S1, S2);
993 __ vmlss(S0, S1, S2);
994 __ vdivs(S0, S1, S2);
995 __ vabss(S0, S1);
996 __ vnegs(S0, S1);
997 __ vsqrts(S0, S1);
998
999 __ vaddd(D0, D1, D2);
1000 __ vsubd(D0, D1, D2);
1001 __ vmuld(D0, D1, D2);
1002 __ vmlad(D0, D1, D2);
1003 __ vmlsd(D0, D1, D2);
1004 __ vdivd(D0, D1, D2);
1005 __ vabsd(D0, D1);
1006 __ vnegd(D0, D1);
1007 __ vsqrtd(D0, D1);
1008
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001009 EmitAndCheck(&assembler, "BasicFloatingPoint");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001010}
1011
Vladimir Marko93205e32016-04-13 11:59:46 +01001012TEST_F(Thumb2AssemblerTest, FloatingPointConversions) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001013 __ vcvtsd(S2, D2);
1014 __ vcvtds(D2, S2);
1015
1016 __ vcvtis(S1, S2);
1017 __ vcvtsi(S1, S2);
1018
1019 __ vcvtid(S1, D2);
1020 __ vcvtdi(D1, S2);
1021
1022 __ vcvtus(S1, S2);
1023 __ vcvtsu(S1, S2);
1024
1025 __ vcvtud(S1, D2);
1026 __ vcvtdu(D1, S2);
1027
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001028 EmitAndCheck(&assembler, "FloatingPointConversions");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001029}
1030
Vladimir Marko93205e32016-04-13 11:59:46 +01001031TEST_F(Thumb2AssemblerTest, FloatingPointComparisons) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001032 __ vcmps(S0, S1);
1033 __ vcmpd(D0, D1);
1034
1035 __ vcmpsz(S2);
1036 __ vcmpdz(D2);
1037
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001038 EmitAndCheck(&assembler, "FloatingPointComparisons");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001039}
1040
Vladimir Marko93205e32016-04-13 11:59:46 +01001041TEST_F(Thumb2AssemblerTest, Calls) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001042 __ blx(LR);
1043 __ bx(LR);
1044
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001045 EmitAndCheck(&assembler, "Calls");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001046}
1047
Vladimir Marko93205e32016-04-13 11:59:46 +01001048TEST_F(Thumb2AssemblerTest, Breakpoint) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001049 __ bkpt(0);
1050
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001051 EmitAndCheck(&assembler, "Breakpoint");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001052}
1053
Vladimir Marko93205e32016-04-13 11:59:46 +01001054TEST_F(Thumb2AssemblerTest, StrR1) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001055 __ str(R1, Address(SP, 68));
1056 __ str(R1, Address(SP, 1068));
1057
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001058 EmitAndCheck(&assembler, "StrR1");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001059}
1060
Vladimir Marko93205e32016-04-13 11:59:46 +01001061TEST_F(Thumb2AssemblerTest, VPushPop) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001062 __ vpushs(S2, 4);
1063 __ vpushd(D2, 4);
1064
1065 __ vpops(S2, 4);
1066 __ vpopd(D2, 4);
1067
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001068 EmitAndCheck(&assembler, "VPushPop");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001069}
1070
Vladimir Marko93205e32016-04-13 11:59:46 +01001071TEST_F(Thumb2AssemblerTest, Max16BitBranch) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001072 Label l1;
1073 __ b(&l1);
1074 for (int i = 0 ; i < (1 << 11) ; i += 2) {
1075 __ mov(R3, ShifterOperand(i & 0xff));
1076 }
1077 __ Bind(&l1);
1078 __ mov(R1, ShifterOperand(R2));
1079
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001080 EmitAndCheck(&assembler, "Max16BitBranch");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001081}
1082
Vladimir Marko93205e32016-04-13 11:59:46 +01001083TEST_F(Thumb2AssemblerTest, Branch32) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001084 Label l1;
1085 __ b(&l1);
1086 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1087 __ mov(R3, ShifterOperand(i & 0xff));
1088 }
1089 __ Bind(&l1);
1090 __ mov(R1, ShifterOperand(R2));
1091
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001092 EmitAndCheck(&assembler, "Branch32");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001093}
1094
Vladimir Marko93205e32016-04-13 11:59:46 +01001095TEST_F(Thumb2AssemblerTest, CompareAndBranchMax) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001096 Label l1;
1097 __ cbz(R4, &l1);
1098 for (int i = 0 ; i < (1 << 7) ; i += 2) {
1099 __ mov(R3, ShifterOperand(i & 0xff));
1100 }
1101 __ Bind(&l1);
1102 __ mov(R1, ShifterOperand(R2));
1103
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001104 EmitAndCheck(&assembler, "CompareAndBranchMax");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001105}
1106
Vladimir Marko93205e32016-04-13 11:59:46 +01001107TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001108 Label l1;
1109 __ cbz(R4, &l1);
1110 for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1111 __ mov(R3, ShifterOperand(i & 0xff));
1112 }
1113 __ Bind(&l1);
1114 __ mov(R1, ShifterOperand(R2));
1115
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001116 EmitAndCheck(&assembler, "CompareAndBranchRelocation16");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001117}
1118
Vladimir Marko93205e32016-04-13 11:59:46 +01001119TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001120 Label l1;
1121 __ cbz(R4, &l1);
1122 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1123 __ mov(R3, ShifterOperand(i & 0xff));
1124 }
1125 __ Bind(&l1);
1126 __ mov(R1, ShifterOperand(R2));
1127
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001128 EmitAndCheck(&assembler, "CompareAndBranchRelocation32");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001129}
1130
Vladimir Marko93205e32016-04-13 11:59:46 +01001131TEST_F(Thumb2AssemblerTest, MixedBranch32) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001132 Label l1;
1133 Label l2;
1134 __ b(&l1); // Forwards.
1135 __ Bind(&l2);
1136
1137 // Space to force relocation.
1138 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1139 __ mov(R3, ShifterOperand(i & 0xff));
1140 }
1141 __ b(&l2); // Backwards.
1142 __ Bind(&l1);
1143 __ mov(R1, ShifterOperand(R2));
1144
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001145 EmitAndCheck(&assembler, "MixedBranch32");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001146}
1147
Vladimir Marko93205e32016-04-13 11:59:46 +01001148TEST_F(Thumb2AssemblerTest, Shifts) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001149 // 16 bit selected for CcDontCare.
Dave Allison45fdb932014-06-25 12:37:10 -07001150 __ Lsl(R0, R1, 5);
1151 __ Lsr(R0, R1, 5);
1152 __ Asr(R0, R1, 5);
1153
1154 __ Lsl(R0, R0, R1);
1155 __ Lsr(R0, R0, R1);
1156 __ Asr(R0, R0, R1);
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001157 __ Ror(R0, R0, R1);
1158
1159 // 16 bit with kCcSet.
1160 __ Lsls(R0, R1, 5);
1161 __ Lsrs(R0, R1, 5);
1162 __ Asrs(R0, R1, 5);
1163
1164 __ Lsls(R0, R0, R1);
1165 __ Lsrs(R0, R0, R1);
1166 __ Asrs(R0, R0, R1);
1167 __ Rors(R0, R0, R1);
1168
1169 // 32-bit with kCcKeep.
1170 __ Lsl(R0, R1, 5, AL, kCcKeep);
1171 __ Lsr(R0, R1, 5, AL, kCcKeep);
1172 __ Asr(R0, R1, 5, AL, kCcKeep);
1173
1174 __ Lsl(R0, R0, R1, AL, kCcKeep);
1175 __ Lsr(R0, R0, R1, AL, kCcKeep);
1176 __ Asr(R0, R0, R1, AL, kCcKeep);
1177 __ Ror(R0, R0, R1, AL, kCcKeep);
1178
1179 // 32-bit because ROR immediate doesn't have a 16-bit version like the other shifts.
1180 __ Ror(R0, R1, 5);
1181 __ Rors(R0, R1, 5);
1182 __ Ror(R0, R1, 5, AL, kCcKeep);
Dave Allison45fdb932014-06-25 12:37:10 -07001183
1184 // 32 bit due to high registers.
1185 __ Lsl(R8, R1, 5);
1186 __ Lsr(R0, R8, 5);
1187 __ Asr(R8, R1, 5);
1188 __ Ror(R0, R8, 5);
1189
1190 // 32 bit due to different Rd and Rn.
1191 __ Lsl(R0, R1, R2);
1192 __ Lsr(R0, R1, R2);
1193 __ Asr(R0, R1, R2);
1194 __ Ror(R0, R1, R2);
1195
1196 // 32 bit due to use of high registers.
1197 __ Lsl(R8, R1, R2);
1198 __ Lsr(R0, R8, R2);
1199 __ Asr(R0, R1, R8);
1200
1201 // S bit (all 32 bit)
1202
1203 // 32 bit due to high registers.
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001204 __ Lsls(R8, R1, 5);
1205 __ Lsrs(R0, R8, 5);
1206 __ Asrs(R8, R1, 5);
1207 __ Rors(R0, R8, 5);
Dave Allison45fdb932014-06-25 12:37:10 -07001208
1209 // 32 bit due to different Rd and Rn.
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001210 __ Lsls(R0, R1, R2);
1211 __ Lsrs(R0, R1, R2);
1212 __ Asrs(R0, R1, R2);
1213 __ Rors(R0, R1, R2);
Dave Allison45fdb932014-06-25 12:37:10 -07001214
1215 // 32 bit due to use of high registers.
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001216 __ Lsls(R8, R1, R2);
1217 __ Lsrs(R0, R8, R2);
1218 __ Asrs(R0, R1, R8);
Dave Allison45fdb932014-06-25 12:37:10 -07001219
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001220 EmitAndCheck(&assembler, "Shifts");
Dave Allison45fdb932014-06-25 12:37:10 -07001221}
1222
Vladimir Marko93205e32016-04-13 11:59:46 +01001223TEST_F(Thumb2AssemblerTest, LoadStoreRegOffset) {
Dave Allison45fdb932014-06-25 12:37:10 -07001224 // 16 bit.
1225 __ ldr(R0, Address(R1, R2));
1226 __ str(R0, Address(R1, R2));
1227
1228 // 32 bit due to shift.
1229 __ ldr(R0, Address(R1, R2, LSL, 1));
1230 __ str(R0, Address(R1, R2, LSL, 1));
1231
1232 __ ldr(R0, Address(R1, R2, LSL, 3));
1233 __ str(R0, Address(R1, R2, LSL, 3));
1234
1235 // 32 bit due to high register use.
1236 __ ldr(R8, Address(R1, R2));
1237 __ str(R8, Address(R1, R2));
1238
1239 __ ldr(R1, Address(R8, R2));
1240 __ str(R2, Address(R8, R2));
1241
1242 __ ldr(R0, Address(R1, R8));
1243 __ str(R0, Address(R1, R8));
1244
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001245 EmitAndCheck(&assembler, "LoadStoreRegOffset");
Dave Allison45fdb932014-06-25 12:37:10 -07001246}
1247
Vladimir Marko93205e32016-04-13 11:59:46 +01001248TEST_F(Thumb2AssemblerTest, LoadStoreLiteral) {
Dave Allison45fdb932014-06-25 12:37:10 -07001249 __ ldr(R0, Address(4));
1250 __ str(R0, Address(4));
1251
1252 __ ldr(R0, Address(-8));
1253 __ str(R0, Address(-8));
1254
1255 // Limits.
1256 __ ldr(R0, Address(0x3ff)); // 10 bits (16 bit).
1257 __ ldr(R0, Address(0x7ff)); // 11 bits (32 bit).
1258 __ str(R0, Address(0x3ff)); // 32 bit (no 16 bit str(literal)).
1259 __ str(R0, Address(0x7ff)); // 11 bits (32 bit).
1260
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001261 EmitAndCheck(&assembler, "LoadStoreLiteral");
Dave Allison45fdb932014-06-25 12:37:10 -07001262}
1263
Vladimir Marko93205e32016-04-13 11:59:46 +01001264TEST_F(Thumb2AssemblerTest, LoadStoreLimits) {
Dave Allison0bb9ade2014-06-26 17:57:36 -07001265 __ ldr(R0, Address(R4, 124)); // 16 bit.
1266 __ ldr(R0, Address(R4, 128)); // 32 bit.
1267
1268 __ ldrb(R0, Address(R4, 31)); // 16 bit.
1269 __ ldrb(R0, Address(R4, 32)); // 32 bit.
1270
1271 __ ldrh(R0, Address(R4, 62)); // 16 bit.
1272 __ ldrh(R0, Address(R4, 64)); // 32 bit.
1273
1274 __ ldrsb(R0, Address(R4, 31)); // 32 bit.
1275 __ ldrsb(R0, Address(R4, 32)); // 32 bit.
1276
1277 __ ldrsh(R0, Address(R4, 62)); // 32 bit.
1278 __ ldrsh(R0, Address(R4, 64)); // 32 bit.
1279
1280 __ str(R0, Address(R4, 124)); // 16 bit.
1281 __ str(R0, Address(R4, 128)); // 32 bit.
1282
1283 __ strb(R0, Address(R4, 31)); // 16 bit.
1284 __ strb(R0, Address(R4, 32)); // 32 bit.
1285
1286 __ strh(R0, Address(R4, 62)); // 16 bit.
1287 __ strh(R0, Address(R4, 64)); // 32 bit.
1288
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001289 EmitAndCheck(&assembler, "LoadStoreLimits");
Dave Allison0bb9ade2014-06-26 17:57:36 -07001290}
1291
Vladimir Marko93205e32016-04-13 11:59:46 +01001292TEST_F(Thumb2AssemblerTest, CompareAndBranch) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001293 Label label;
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00001294 __ CompareAndBranchIfZero(arm::R0, &label);
1295 __ CompareAndBranchIfZero(arm::R11, &label);
1296 __ CompareAndBranchIfNonZero(arm::R0, &label);
1297 __ CompareAndBranchIfNonZero(arm::R11, &label);
1298 __ Bind(&label);
1299
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001300 EmitAndCheck(&assembler, "CompareAndBranch");
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00001301}
1302
Vladimir Marko93205e32016-04-13 11:59:46 +01001303TEST_F(Thumb2AssemblerTest, AddConstant) {
Vladimir Markof5c09c32015-12-17 12:08:08 +00001304 // Low registers, Rd != Rn.
1305 __ AddConstant(R0, R1, 0); // MOV.
1306 __ AddConstant(R0, R1, 1); // 16-bit ADDS, encoding T1.
1307 __ AddConstant(R0, R1, 7); // 16-bit ADDS, encoding T1.
1308 __ AddConstant(R0, R1, 8); // 32-bit ADD, encoding T3.
1309 __ AddConstant(R0, R1, 255); // 32-bit ADD, encoding T3.
1310 __ AddConstant(R0, R1, 256); // 32-bit ADD, encoding T3.
1311 __ AddConstant(R0, R1, 257); // 32-bit ADD, encoding T4.
1312 __ AddConstant(R0, R1, 0xfff); // 32-bit ADD, encoding T4.
1313 __ AddConstant(R0, R1, 0x1000); // 32-bit ADD, encoding T3.
1314 __ AddConstant(R0, R1, 0x1001); // MVN+SUB.
1315 __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
1316 __ AddConstant(R0, R1, 0xffff); // MOVW+ADD.
1317 __ AddConstant(R0, R1, 0x10000); // 32-bit ADD, encoding T3.
1318 __ AddConstant(R0, R1, 0x10001); // 32-bit ADD, encoding T3.
1319 __ AddConstant(R0, R1, 0x10002); // MVN+SUB.
1320 __ AddConstant(R0, R1, 0x10003); // MOVW+MOVT+ADD.
1321 __ AddConstant(R0, R1, -1); // 16-bit SUBS.
1322 __ AddConstant(R0, R1, -7); // 16-bit SUBS.
1323 __ AddConstant(R0, R1, -8); // 32-bit SUB, encoding T3.
1324 __ AddConstant(R0, R1, -255); // 32-bit SUB, encoding T3.
1325 __ AddConstant(R0, R1, -256); // 32-bit SUB, encoding T3.
1326 __ AddConstant(R0, R1, -257); // 32-bit SUB, encoding T4.
1327 __ AddConstant(R0, R1, -0xfff); // 32-bit SUB, encoding T4.
1328 __ AddConstant(R0, R1, -0x1000); // 32-bit SUB, encoding T3.
1329 __ AddConstant(R0, R1, -0x1001); // MVN+ADD.
1330 __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
1331 __ AddConstant(R0, R1, -0xffff); // MOVW+SUB.
1332 __ AddConstant(R0, R1, -0x10000); // 32-bit SUB, encoding T3.
1333 __ AddConstant(R0, R1, -0x10001); // 32-bit SUB, encoding T3.
1334 __ AddConstant(R0, R1, -0x10002); // MVN+ADD.
1335 __ AddConstant(R0, R1, -0x10003); // MOVW+MOVT+ADD.
1336
1337 // Low registers, Rd == Rn.
1338 __ AddConstant(R0, R0, 0); // Nothing.
1339 __ AddConstant(R1, R1, 1); // 16-bit ADDS, encoding T2,
1340 __ AddConstant(R0, R0, 7); // 16-bit ADDS, encoding T2.
1341 __ AddConstant(R1, R1, 8); // 16-bit ADDS, encoding T2.
1342 __ AddConstant(R0, R0, 255); // 16-bit ADDS, encoding T2.
1343 __ AddConstant(R1, R1, 256); // 32-bit ADD, encoding T3.
1344 __ AddConstant(R0, R0, 257); // 32-bit ADD, encoding T4.
1345 __ AddConstant(R1, R1, 0xfff); // 32-bit ADD, encoding T4.
1346 __ AddConstant(R0, R0, 0x1000); // 32-bit ADD, encoding T3.
1347 __ AddConstant(R1, R1, 0x1001); // MVN+SUB.
1348 __ AddConstant(R0, R0, 0x1002); // MOVW+ADD.
1349 __ AddConstant(R1, R1, 0xffff); // MOVW+ADD.
1350 __ AddConstant(R0, R0, 0x10000); // 32-bit ADD, encoding T3.
1351 __ AddConstant(R1, R1, 0x10001); // 32-bit ADD, encoding T3.
1352 __ AddConstant(R0, R0, 0x10002); // MVN+SUB.
1353 __ AddConstant(R1, R1, 0x10003); // MOVW+MOVT+ADD.
1354 __ AddConstant(R0, R0, -1); // 16-bit SUBS, encoding T2.
1355 __ AddConstant(R1, R1, -7); // 16-bit SUBS, encoding T2.
1356 __ AddConstant(R0, R0, -8); // 16-bit SUBS, encoding T2.
1357 __ AddConstant(R1, R1, -255); // 16-bit SUBS, encoding T2.
1358 __ AddConstant(R0, R0, -256); // 32-bit SUB, encoding T3.
1359 __ AddConstant(R1, R1, -257); // 32-bit SUB, encoding T4.
1360 __ AddConstant(R0, R0, -0xfff); // 32-bit SUB, encoding T4.
1361 __ AddConstant(R1, R1, -0x1000); // 32-bit SUB, encoding T3.
1362 __ AddConstant(R0, R0, -0x1001); // MVN+ADD.
1363 __ AddConstant(R1, R1, -0x1002); // MOVW+SUB.
1364 __ AddConstant(R0, R0, -0xffff); // MOVW+SUB.
1365 __ AddConstant(R1, R1, -0x10000); // 32-bit SUB, encoding T3.
1366 __ AddConstant(R0, R0, -0x10001); // 32-bit SUB, encoding T3.
1367 __ AddConstant(R1, R1, -0x10002); // MVN+ADD.
1368 __ AddConstant(R0, R0, -0x10003); // MOVW+MOVT+ADD.
1369
1370 // High registers.
1371 __ AddConstant(R8, R8, 0); // Nothing.
1372 __ AddConstant(R8, R1, 1); // 32-bit ADD, encoding T3,
1373 __ AddConstant(R0, R8, 7); // 32-bit ADD, encoding T3.
1374 __ AddConstant(R8, R8, 8); // 32-bit ADD, encoding T3.
1375 __ AddConstant(R8, R1, 255); // 32-bit ADD, encoding T3.
1376 __ AddConstant(R0, R8, 256); // 32-bit ADD, encoding T3.
1377 __ AddConstant(R8, R8, 257); // 32-bit ADD, encoding T4.
1378 __ AddConstant(R8, R1, 0xfff); // 32-bit ADD, encoding T4.
1379 __ AddConstant(R0, R8, 0x1000); // 32-bit ADD, encoding T3.
1380 __ AddConstant(R8, R8, 0x1001); // MVN+SUB.
1381 __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
1382 __ AddConstant(R0, R8, 0xffff); // MOVW+ADD.
1383 __ AddConstant(R8, R8, 0x10000); // 32-bit ADD, encoding T3.
1384 __ AddConstant(R8, R1, 0x10001); // 32-bit ADD, encoding T3.
1385 __ AddConstant(R0, R8, 0x10002); // MVN+SUB.
1386 __ AddConstant(R0, R8, 0x10003); // MOVW+MOVT+ADD.
1387 __ AddConstant(R8, R8, -1); // 32-bit ADD, encoding T3.
1388 __ AddConstant(R8, R1, -7); // 32-bit SUB, encoding T3.
1389 __ AddConstant(R0, R8, -8); // 32-bit SUB, encoding T3.
1390 __ AddConstant(R8, R8, -255); // 32-bit SUB, encoding T3.
1391 __ AddConstant(R8, R1, -256); // 32-bit SUB, encoding T3.
1392 __ AddConstant(R0, R8, -257); // 32-bit SUB, encoding T4.
1393 __ AddConstant(R8, R8, -0xfff); // 32-bit SUB, encoding T4.
1394 __ AddConstant(R8, R1, -0x1000); // 32-bit SUB, encoding T3.
1395 __ AddConstant(R0, R8, -0x1001); // MVN+ADD.
1396 __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
1397 __ AddConstant(R8, R1, -0xffff); // MOVW+SUB.
1398 __ AddConstant(R0, R8, -0x10000); // 32-bit SUB, encoding T3.
1399 __ AddConstant(R8, R8, -0x10001); // 32-bit SUB, encoding T3.
1400 __ AddConstant(R8, R1, -0x10002); // MVN+SUB.
1401 __ AddConstant(R0, R8, -0x10003); // MOVW+MOVT+ADD.
1402
1403 // Low registers, Rd != Rn, kCcKeep.
1404 __ AddConstant(R0, R1, 0, AL, kCcKeep); // MOV.
1405 __ AddConstant(R0, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1406 __ AddConstant(R0, R1, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
1407 __ AddConstant(R0, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
1408 __ AddConstant(R0, R1, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
1409 __ AddConstant(R0, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
1410 __ AddConstant(R0, R1, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
1411 __ AddConstant(R0, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
1412 __ AddConstant(R0, R1, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1413 __ AddConstant(R0, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
1414 __ AddConstant(R0, R1, 0x1002, AL, kCcKeep); // MOVW+ADD.
1415 __ AddConstant(R0, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
1416 __ AddConstant(R0, R1, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1417 __ AddConstant(R0, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
1418 __ AddConstant(R0, R1, 0x10002, AL, kCcKeep); // MVN+SUB.
1419 __ AddConstant(R0, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1420 __ AddConstant(R0, R1, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1421 __ AddConstant(R0, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
1422 __ AddConstant(R0, R1, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
1423 __ AddConstant(R0, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
1424 __ AddConstant(R0, R1, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
1425 __ AddConstant(R0, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
1426 __ AddConstant(R0, R1, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
1427 __ AddConstant(R0, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1428 __ AddConstant(R0, R1, -0x1001, AL, kCcKeep); // MVN+ADD.
1429 __ AddConstant(R0, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
1430 __ AddConstant(R0, R1, -0xffff, AL, kCcKeep); // MOVW+SUB.
1431 __ AddConstant(R0, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1432 __ AddConstant(R0, R1, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
1433 __ AddConstant(R0, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
1434 __ AddConstant(R0, R1, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1435
1436 // Low registers, Rd == Rn, kCcKeep.
1437 __ AddConstant(R0, R0, 0, AL, kCcKeep); // Nothing.
1438 __ AddConstant(R1, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1439 __ AddConstant(R0, R0, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
1440 __ AddConstant(R1, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
1441 __ AddConstant(R0, R0, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
1442 __ AddConstant(R1, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
1443 __ AddConstant(R0, R0, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
1444 __ AddConstant(R1, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
1445 __ AddConstant(R0, R0, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1446 __ AddConstant(R1, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
1447 __ AddConstant(R0, R0, 0x1002, AL, kCcKeep); // MOVW+ADD.
1448 __ AddConstant(R1, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
1449 __ AddConstant(R0, R0, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1450 __ AddConstant(R1, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
1451 __ AddConstant(R0, R0, 0x10002, AL, kCcKeep); // MVN+SUB.
1452 __ AddConstant(R1, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1453 __ AddConstant(R0, R0, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1454 __ AddConstant(R1, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
1455 __ AddConstant(R0, R0, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
1456 __ AddConstant(R1, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
1457 __ AddConstant(R0, R0, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
1458 __ AddConstant(R1, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
1459 __ AddConstant(R0, R0, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
1460 __ AddConstant(R1, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1461 __ AddConstant(R0, R0, -0x1001, AL, kCcKeep); // MVN+ADD.
1462 __ AddConstant(R1, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
1463 __ AddConstant(R0, R0, -0xffff, AL, kCcKeep); // MOVW+SUB.
1464 __ AddConstant(R1, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1465 __ AddConstant(R0, R0, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
1466 __ AddConstant(R1, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
1467 __ AddConstant(R0, R0, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1468
1469 // Low registers, Rd != Rn, kCcSet.
1470 __ AddConstant(R0, R1, 0, AL, kCcSet); // 16-bit ADDS.
1471 __ AddConstant(R0, R1, 1, AL, kCcSet); // 16-bit ADDS.
1472 __ AddConstant(R0, R1, 7, AL, kCcSet); // 16-bit ADDS.
1473 __ AddConstant(R0, R1, 8, AL, kCcSet); // 32-bit ADDS, encoding T3.
1474 __ AddConstant(R0, R1, 255, AL, kCcSet); // 32-bit ADDS, encoding T3.
1475 __ AddConstant(R0, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
1476 __ AddConstant(R0, R1, 257, AL, kCcSet); // MVN+SUBS.
1477 __ AddConstant(R0, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
1478 __ AddConstant(R0, R1, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1479 __ AddConstant(R0, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
1480 __ AddConstant(R0, R1, 0x1002, AL, kCcSet); // MOVW+ADDS.
1481 __ AddConstant(R0, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
1482 __ AddConstant(R0, R1, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1483 __ AddConstant(R0, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
1484 __ AddConstant(R0, R1, 0x10002, AL, kCcSet); // MVN+SUBS.
1485 __ AddConstant(R0, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1486 __ AddConstant(R0, R1, -1, AL, kCcSet); // 16-bit SUBS.
1487 __ AddConstant(R0, R1, -7, AL, kCcSet); // 16-bit SUBS.
1488 __ AddConstant(R0, R1, -8, AL, kCcSet); // 32-bit SUBS, encoding T3.
1489 __ AddConstant(R0, R1, -255, AL, kCcSet); // 32-bit SUBS, encoding T3.
1490 __ AddConstant(R0, R1, -256, AL, kCcSet); // 32-bit SUBS, encoding T3.
1491 __ AddConstant(R0, R1, -257, AL, kCcSet); // MVN+ADDS.
1492 __ AddConstant(R0, R1, -0xfff, AL, kCcSet); // MOVW+SUBS.
1493 __ AddConstant(R0, R1, -0x1000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1494 __ AddConstant(R0, R1, -0x1001, AL, kCcSet); // MVN+ADDS.
1495 __ AddConstant(R0, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
1496 __ AddConstant(R0, R1, -0xffff, AL, kCcSet); // MOVW+SUBS.
1497 __ AddConstant(R0, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1498 __ AddConstant(R0, R1, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
1499 __ AddConstant(R0, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
1500 __ AddConstant(R0, R1, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1501
1502 // Low registers, Rd == Rn, kCcSet.
1503 __ AddConstant(R0, R0, 0, AL, kCcSet); // 16-bit ADDS, encoding T2.
1504 __ AddConstant(R1, R1, 1, AL, kCcSet); // 16-bit ADDS, encoding T2.
1505 __ AddConstant(R0, R0, 7, AL, kCcSet); // 16-bit ADDS, encoding T2.
1506 __ AddConstant(R1, R1, 8, AL, kCcSet); // 16-bit ADDS, encoding T2.
1507 __ AddConstant(R0, R0, 255, AL, kCcSet); // 16-bit ADDS, encoding T2.
1508 __ AddConstant(R1, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
1509 __ AddConstant(R0, R0, 257, AL, kCcSet); // MVN+SUBS.
1510 __ AddConstant(R1, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
1511 __ AddConstant(R0, R0, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1512 __ AddConstant(R1, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
1513 __ AddConstant(R0, R0, 0x1002, AL, kCcSet); // MOVW+ADDS.
1514 __ AddConstant(R1, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
1515 __ AddConstant(R0, R0, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1516 __ AddConstant(R1, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
1517 __ AddConstant(R0, R0, 0x10002, AL, kCcSet); // MVN+SUBS.
1518 __ AddConstant(R1, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1519 __ AddConstant(R0, R0, -1, AL, kCcSet); // 16-bit SUBS, encoding T2.
1520 __ AddConstant(R1, R1, -7, AL, kCcSet); // 16-bit SUBS, encoding T2.
1521 __ AddConstant(R0, R0, -8, AL, kCcSet); // 16-bit SUBS, encoding T2.
1522 __ AddConstant(R1, R1, -255, AL, kCcSet); // 16-bit SUBS, encoding T2.
1523 __ AddConstant(R0, R0, -256, AL, kCcSet); // 32-bit SUB, encoding T3.
1524 __ AddConstant(R1, R1, -257, AL, kCcSet); // MNV+ADDS.
1525 __ AddConstant(R0, R0, -0xfff, AL, kCcSet); // MOVW+SUBS.
1526 __ AddConstant(R1, R1, -0x1000, AL, kCcSet); // 32-bit SUB, encoding T3.
1527 __ AddConstant(R0, R0, -0x1001, AL, kCcSet); // MVN+ADDS.
1528 __ AddConstant(R1, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
1529 __ AddConstant(R0, R0, -0xffff, AL, kCcSet); // MOVW+SUBS.
1530 __ AddConstant(R1, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1531 __ AddConstant(R0, R0, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
1532 __ AddConstant(R1, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
1533 __ AddConstant(R0, R0, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1534
1535 __ it(EQ);
1536 __ AddConstant(R0, R1, 1, EQ, kCcSet); // 32-bit ADDS, encoding T3.
1537 __ it(NE);
1538 __ AddConstant(R0, R1, 1, NE, kCcKeep); // 16-bit ADDS, encoding T1.
1539 __ it(GE);
1540 __ AddConstant(R0, R0, 1, GE, kCcSet); // 32-bit ADDS, encoding T3.
1541 __ it(LE);
1542 __ AddConstant(R0, R0, 1, LE, kCcKeep); // 16-bit ADDS, encoding T2.
1543
1544 EmitAndCheck(&assembler, "AddConstant");
1545}
1546
Vladimir Marko93205e32016-04-13 11:59:46 +01001547TEST_F(Thumb2AssemblerTest, CmpConstant) {
Vladimir Markoac6ac102015-12-17 12:14:00 +00001548 __ CmpConstant(R0, 0); // 16-bit CMP.
1549 __ CmpConstant(R1, 1); // 16-bit CMP.
1550 __ CmpConstant(R0, 7); // 16-bit CMP.
1551 __ CmpConstant(R1, 8); // 16-bit CMP.
1552 __ CmpConstant(R0, 255); // 16-bit CMP.
1553 __ CmpConstant(R1, 256); // 32-bit CMP.
1554 __ CmpConstant(R0, 257); // MNV+CMN.
1555 __ CmpConstant(R1, 0xfff); // MOVW+CMP.
1556 __ CmpConstant(R0, 0x1000); // 32-bit CMP.
1557 __ CmpConstant(R1, 0x1001); // MNV+CMN.
1558 __ CmpConstant(R0, 0x1002); // MOVW+CMP.
1559 __ CmpConstant(R1, 0xffff); // MOVW+CMP.
1560 __ CmpConstant(R0, 0x10000); // 32-bit CMP.
1561 __ CmpConstant(R1, 0x10001); // 32-bit CMP.
1562 __ CmpConstant(R0, 0x10002); // MVN+CMN.
1563 __ CmpConstant(R1, 0x10003); // MOVW+MOVT+CMP.
1564 __ CmpConstant(R0, -1); // 32-bit CMP.
1565 __ CmpConstant(R1, -7); // CMN.
1566 __ CmpConstant(R0, -8); // CMN.
1567 __ CmpConstant(R1, -255); // CMN.
1568 __ CmpConstant(R0, -256); // CMN.
1569 __ CmpConstant(R1, -257); // MNV+CMP.
1570 __ CmpConstant(R0, -0xfff); // MOVW+CMN.
1571 __ CmpConstant(R1, -0x1000); // CMN.
1572 __ CmpConstant(R0, -0x1001); // MNV+CMP.
1573 __ CmpConstant(R1, -0x1002); // MOVW+CMN.
1574 __ CmpConstant(R0, -0xffff); // MOVW+CMN.
1575 __ CmpConstant(R1, -0x10000); // CMN.
1576 __ CmpConstant(R0, -0x10001); // CMN.
1577 __ CmpConstant(R1, -0x10002); // MVN+CMP.
1578 __ CmpConstant(R0, -0x10003); // MOVW+MOVT+CMP.
1579
1580 __ CmpConstant(R8, 0); // 32-bit CMP.
1581 __ CmpConstant(R9, 1); // 32-bit CMP.
1582 __ CmpConstant(R8, 7); // 32-bit CMP.
1583 __ CmpConstant(R9, 8); // 32-bit CMP.
1584 __ CmpConstant(R8, 255); // 32-bit CMP.
1585 __ CmpConstant(R9, 256); // 32-bit CMP.
1586 __ CmpConstant(R8, 257); // MNV+CMN
1587 __ CmpConstant(R9, 0xfff); // MOVW+CMP.
1588 __ CmpConstant(R8, 0x1000); // 32-bit CMP.
1589 __ CmpConstant(R9, 0x1001); // MVN+CMN.
1590 __ CmpConstant(R8, 0x1002); // MOVW+CMP.
1591 __ CmpConstant(R9, 0xffff); // MOVW+CMP.
1592 __ CmpConstant(R8, 0x10000); // 32-bit CMP.
1593 __ CmpConstant(R9, 0x10001); // 32-bit CMP.
1594 __ CmpConstant(R8, 0x10002); // MVN+CMN.
1595 __ CmpConstant(R9, 0x10003); // MOVW+MOVT+CMP.
1596 __ CmpConstant(R8, -1); // 32-bit CMP
1597 __ CmpConstant(R9, -7); // CMN.
1598 __ CmpConstant(R8, -8); // CMN.
1599 __ CmpConstant(R9, -255); // CMN.
1600 __ CmpConstant(R8, -256); // CMN.
1601 __ CmpConstant(R9, -257); // MNV+CMP.
1602 __ CmpConstant(R8, -0xfff); // MOVW+CMN.
1603 __ CmpConstant(R9, -0x1000); // CMN.
1604 __ CmpConstant(R8, -0x1001); // MVN+CMP.
1605 __ CmpConstant(R9, -0x1002); // MOVW+CMN.
1606 __ CmpConstant(R8, -0xffff); // MOVW+CMN.
1607 __ CmpConstant(R9, -0x10000); // CMN.
1608 __ CmpConstant(R8, -0x10001); // CMN.
1609 __ CmpConstant(R9, -0x10002); // MVN+CMP.
1610 __ CmpConstant(R8, -0x10003); // MOVW+MOVT+CMP.
1611
1612 EmitAndCheck(&assembler, "CmpConstant");
1613}
1614
Artem Serov12e097c2016-08-08 15:13:26 +01001615#define ENABLE_VIXL_TEST
1616
1617#ifdef ENABLE_VIXL_TEST
1618
1619#define ARM_VIXL
1620
1621#ifdef ARM_VIXL
1622typedef arm::ArmVIXLJNIMacroAssembler JniAssemblerType;
1623#else
1624typedef arm::Thumb2Assembler AssemblerType;
1625#endif
1626
Vladimir Marko0e851e22016-08-25 18:17:56 +01001627class ArmVIXLAssemblerTest : public ::testing::Test {
Artem Serov12e097c2016-08-08 15:13:26 +01001628 public:
Vladimir Marko0e851e22016-08-25 18:17:56 +01001629 ArmVIXLAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
Artem Serov12e097c2016-08-08 15:13:26 +01001630
1631 ArenaPool pool;
1632 ArenaAllocator arena;
1633 JniAssemblerType assembler;
1634};
1635
Dave Allison65fcc2c2014-04-28 13:45:27 -07001636#undef __
Artem Serov12e097c2016-08-08 15:13:26 +01001637#define __ assembler->
1638
1639void EmitAndCheck(JniAssemblerType* assembler, const char* testname,
1640 const char* const* results) {
1641 __ FinalizeCode();
1642 size_t cs = __ CodeSize();
1643 std::vector<uint8_t> managed_code(cs);
1644 MemoryRegion code(&managed_code[0], managed_code.size());
1645 __ FinalizeInstructions(code);
1646
1647 DumpAndCheck(managed_code, testname, results);
1648}
1649
1650void EmitAndCheck(JniAssemblerType* assembler, const char* testname) {
1651 InitResults();
1652 std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
1653 ASSERT_NE(results, test_results.end());
1654
1655 EmitAndCheck(assembler, testname, results->second);
1656}
1657
1658#undef __
1659#define __ assembler.
1660
Vladimir Marko0e851e22016-08-25 18:17:56 +01001661TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) {
Artem Serov12e097c2016-08-08 15:13:26 +01001662 const bool is_static = true;
1663 const bool is_synchronized = false;
Igor Murashkin367f3dd2016-09-01 17:00:24 -07001664 const bool is_critical_native = false;
Artem Serov12e097c2016-08-08 15:13:26 +01001665 const char* shorty = "IIFII";
1666
1667 ArenaPool pool;
1668 ArenaAllocator arena(&pool);
1669
1670 std::unique_ptr<JniCallingConvention> jni_conv(
Igor Murashkin367f3dd2016-09-01 17:00:24 -07001671 JniCallingConvention::Create(&arena,
1672 is_static,
1673 is_synchronized,
1674 is_critical_native,
1675 shorty,
1676 kThumb2));
Artem Serov12e097c2016-08-08 15:13:26 +01001677 std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
1678 ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
1679 const int frame_size(jni_conv->FrameSize());
1680 ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
1681
1682 const ManagedRegister method_register = ArmManagedRegister::FromCoreRegister(R0);
1683 const ManagedRegister scratch_register = ArmManagedRegister::FromCoreRegister(R12);
1684
1685 __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
1686 __ IncreaseFrameSize(32);
1687
1688 // Loads
1689 __ IncreaseFrameSize(4096);
1690 __ Load(method_register, FrameOffset(32), 4);
1691 __ Load(method_register, FrameOffset(124), 4);
1692 __ Load(method_register, FrameOffset(132), 4);
1693 __ Load(method_register, FrameOffset(1020), 4);
1694 __ Load(method_register, FrameOffset(1024), 4);
1695 __ Load(scratch_register, FrameOffset(4092), 4);
1696 __ Load(scratch_register, FrameOffset(4096), 4);
1697 __ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
Vladimir Marko0e851e22016-08-25 18:17:56 +01001698 __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference */ false);
Artem Serov12e097c2016-08-08 15:13:26 +01001699
1700 // Stores
1701 __ Store(FrameOffset(32), method_register, 4);
1702 __ Store(FrameOffset(124), method_register, 4);
1703 __ Store(FrameOffset(132), method_register, 4);
1704 __ Store(FrameOffset(1020), method_register, 4);
1705 __ Store(FrameOffset(1024), method_register, 4);
1706 __ Store(FrameOffset(4092), scratch_register, 4);
1707 __ Store(FrameOffset(4096), scratch_register, 4);
1708 __ StoreImmediateToFrame(FrameOffset(48), 0xFF, scratch_register);
1709 __ StoreImmediateToFrame(FrameOffset(48), 0xFFFFFF, scratch_register);
1710 __ StoreRawPtr(FrameOffset(48), scratch_register);
1711 __ StoreRef(FrameOffset(48), scratch_register);
1712 __ StoreSpanning(FrameOffset(48), method_register, FrameOffset(48), scratch_register);
1713 __ StoreStackOffsetToThread(ThreadOffset32(512), FrameOffset(4096), scratch_register);
1714 __ StoreStackPointerToThread(ThreadOffset32(512));
1715
1716 // Other
1717 __ Call(method_register, FrameOffset(48), scratch_register);
1718 __ Copy(FrameOffset(48), FrameOffset(44), scratch_register, 4);
1719 __ CopyRawPtrFromThread(FrameOffset(44), ThreadOffset32(512), scratch_register);
1720 __ CopyRef(FrameOffset(48), FrameOffset(44), scratch_register);
1721 __ GetCurrentThread(method_register);
1722 __ GetCurrentThread(FrameOffset(48), scratch_register);
1723 __ Move(scratch_register, method_register, 4);
1724 __ VerifyObject(scratch_register, false);
1725
1726 __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, true);
1727 __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, false);
1728 __ CreateHandleScopeEntry(method_register, FrameOffset(48), scratch_register, true);
1729 __ CreateHandleScopeEntry(FrameOffset(48), FrameOffset(64), scratch_register, true);
1730 __ CreateHandleScopeEntry(method_register, FrameOffset(0), scratch_register, true);
1731 __ CreateHandleScopeEntry(method_register, FrameOffset(1025), scratch_register, true);
1732 __ CreateHandleScopeEntry(scratch_register, FrameOffset(1025), scratch_register, true);
1733
1734 __ ExceptionPoll(scratch_register, 0);
1735
1736 __ DecreaseFrameSize(4096);
1737 __ DecreaseFrameSize(32);
1738 __ RemoveFrame(frame_size, callee_save_regs);
1739
1740 EmitAndCheck(&assembler, "VixlJniHelpers");
1741}
1742
1743#ifdef ARM_VIXL
1744#define R0 vixl::aarch32::r0
1745#define R2 vixl::aarch32::r2
1746#define R4 vixl::aarch32::r4
1747#define R12 vixl::aarch32::r12
1748#undef __
1749#define __ assembler.asm_.
1750#endif
1751
Vladimir Marko0e851e22016-08-25 18:17:56 +01001752TEST_F(ArmVIXLAssemblerTest, VixlLoadFromOffset) {
Artem Serov12e097c2016-08-08 15:13:26 +01001753 __ LoadFromOffset(kLoadWord, R2, R4, 12);
1754 __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
1755 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
1756 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
1757 __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
1758 __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
1759 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
1760 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
1761 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
1762 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
1763 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
1764 __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
1765 __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
1766 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
1767 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
1768 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
1769 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
1770 __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
1771
1772 __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
1773 __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
1774
1775 __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
1776 __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
1777 __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
1778
1779 EmitAndCheck(&assembler, "VixlLoadFromOffset");
1780}
1781
Vladimir Marko0e851e22016-08-25 18:17:56 +01001782TEST_F(ArmVIXLAssemblerTest, VixlStoreToOffset) {
Artem Serov12e097c2016-08-08 15:13:26 +01001783 __ StoreToOffset(kStoreWord, R2, R4, 12);
1784 __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
1785 __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
1786 __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
1787 __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
1788 __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
1789 __ StoreToOffset(kStoreHalfword, R2, R4, 12);
1790 __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
1791 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
1792 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
1793 __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
1794 __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
1795 __ StoreToOffset(kStoreWordPair, R2, R4, 12);
1796 __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
1797 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
1798 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
1799 __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
1800 __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
1801
1802 __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
1803 __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
1804
1805 __ StoreToOffset(kStoreByte, R2, R4, 12);
1806
1807 EmitAndCheck(&assembler, "VixlStoreToOffset");
1808}
1809
1810#undef __
1811#endif // ENABLE_VIXL_TEST
Dave Allison65fcc2c2014-04-28 13:45:27 -07001812} // namespace arm
1813} // namespace art