blob: 86f52aa465b0edc52540ff09aea9740da0684845 [file] [log] [blame]
Andreas Gampe525cde22014-04-22 15:44:50 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "common_runtime_test.h"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -070018#include "mirror/art_field-inl.h"
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -070019#include "mirror/string-inl.h"
Andreas Gampe525cde22014-04-22 15:44:50 -070020
21#include <cstdio>
22
23namespace art {
24
25
26class StubTest : public CommonRuntimeTest {
27 protected:
28 // We need callee-save methods set up in the Runtime for exceptions.
29 void SetUp() OVERRIDE {
30 // Do the normal setup.
31 CommonRuntimeTest::SetUp();
32
33 {
34 // Create callee-save methods
35 ScopedObjectAccess soa(Thread::Current());
36 for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
37 Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
38 if (!runtime_->HasCalleeSaveMethod(type)) {
39 runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(kRuntimeISA, type), type);
40 }
41 }
42 }
43 }
44
Andreas Gampe00c1e6d2014-04-25 15:47:13 -070045 void SetUpRuntimeOptions(Runtime::Options *options) OVERRIDE {
46 // Use a smaller heap
47 for (std::pair<std::string, const void*>& pair : *options) {
48 if (pair.first.find("-Xmx") == 0) {
49 pair.first = "-Xmx4M"; // Smallest we can go.
50 }
51 }
52 }
Andreas Gampe525cde22014-04-22 15:44:50 -070053
Mathieu Chartier119c6bd2014-05-09 14:11:47 -070054 // Helper function needed since TEST_F makes a new class.
55 Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
56 return &self->tlsPtr_;
57 }
58
Andreas Gampe525cde22014-04-22 15:44:50 -070059 size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
60 // Push a transition back into managed code onto the linked list in thread.
61 ManagedStack fragment;
62 self->PushManagedStackFragment(&fragment);
63
64 size_t result;
65#if defined(__i386__)
66 // TODO: Set the thread?
67 __asm__ __volatile__(
68 "pushl $0\n\t" // Push nullptr to terminate quick stack
69 "call *%%edi\n\t" // Call the stub
70 "addl $4, %%esp" // Pop nullptr
71 : "=a" (result)
72 // Use the result from eax
73 : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code)
74 // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
75 : ); // clobber.
76 // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
77 // but compilation fails when declaring that.
78#elif defined(__arm__)
79 __asm__ __volatile__(
Andreas Gampe00c1e6d2014-04-25 15:47:13 -070080 "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
81 ".cfi_adjust_cfa_offset 52\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -070082 "push {r9}\n\t"
83 ".cfi_adjust_cfa_offset 4\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -070084 "mov r9, #0\n\n"
85 "str r9, [sp, #-8]!\n\t" // Push nullptr to terminate stack, +8B padding so 16B aligned
86 ".cfi_adjust_cfa_offset 8\n\t"
87 "ldr r9, [sp, #8]\n\t"
88
89 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
90 "sub sp, sp, #20\n\t"
91 "str %[arg0], [sp]\n\t"
92 "str %[arg1], [sp, #4]\n\t"
93 "str %[arg2], [sp, #8]\n\t"
94 "str %[code], [sp, #12]\n\t"
95 "str %[self], [sp, #16]\n\t"
96 "ldr r0, [sp]\n\t"
97 "ldr r1, [sp, #4]\n\t"
98 "ldr r2, [sp, #8]\n\t"
99 "ldr r3, [sp, #12]\n\t"
100 "ldr r9, [sp, #16]\n\t"
101 "add sp, sp, #20\n\t"
102
103 "blx r3\n\t" // Call the stub
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700104 "add sp, sp, #12\n\t" // Pop nullptr and padding
105 ".cfi_adjust_cfa_offset -12\n\t"
106 "pop {r1-r12, lr}\n\t" // Restore state
107 ".cfi_adjust_cfa_offset -52\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700108 "mov %[result], r0\n\t" // Save the result
109 : [result] "=r" (result)
110 // Use the result from r0
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700111 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
Andreas Gampe525cde22014-04-22 15:44:50 -0700112 : ); // clobber.
113#elif defined(__aarch64__)
114 __asm__ __volatile__(
115 "sub sp, sp, #48\n\t" // Reserve stack space, 16B aligned
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700116 ".cfi_adjust_cfa_offset 48\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700117 "stp xzr, x1, [sp]\n\t" // nullptr(end of quick stack), x1
118 "stp x2, x3, [sp, #16]\n\t" // Save x2, x3
119 "stp x18, x30, [sp, #32]\n\t" // Save x18(xSELF), xLR
120
121 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
122 "sub sp, sp, #48\n\t"
123 "str %[arg0], [sp]\n\t"
124 "str %[arg1], [sp, #8]\n\t"
125 "str %[arg2], [sp, #16]\n\t"
126 "str %[code], [sp, #24]\n\t"
127 "str %[self], [sp, #32]\n\t"
128 "ldr x0, [sp]\n\t"
129 "ldr x1, [sp, #8]\n\t"
130 "ldr x2, [sp, #16]\n\t"
131 "ldr x3, [sp, #24]\n\t"
132 "ldr x18, [sp, #32]\n\t"
133 "add sp, sp, #48\n\t"
134
135 "blr x3\n\t" // Call the stub
Andreas Gampe525cde22014-04-22 15:44:50 -0700136 "ldp x1, x2, [sp, #8]\n\t" // Restore x1, x2
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700137 "ldp x3, x18, [sp, #24]\n\t" // Restore x3, xSELF
138 "ldr x30, [sp, #40]\n\t" // Restore xLR
Andreas Gampe525cde22014-04-22 15:44:50 -0700139 "add sp, sp, #48\n\t" // Free stack space
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700140 ".cfi_adjust_cfa_offset -48\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700141
Andreas Gampe525cde22014-04-22 15:44:50 -0700142 "mov %[result], x0\n\t" // Save the result
143 : [result] "=r" (result)
144 // Use the result from r0
145 : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700146 : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
Andreas Gampe525cde22014-04-22 15:44:50 -0700147#elif defined(__x86_64__)
148 // Note: Uses the native convention
149 // TODO: Set the thread?
150 __asm__ __volatile__(
151 "pushq $0\n\t" // Push nullptr to terminate quick stack
152 "pushq $0\n\t" // 16B alignment padding
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700153 ".cfi_adjust_cfa_offset 16\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700154 "call *%%rax\n\t" // Call the stub
Andreas Gampef4e910b2014-04-29 16:55:52 -0700155 "addq $16, %%rsp\n\t" // Pop nullptr and padding
156 ".cfi_adjust_cfa_offset -16\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700157 : "=a" (result)
158 // Use the result from rax
159 : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code)
160 // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
Andreas Gampef4e910b2014-04-29 16:55:52 -0700161 : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); // clobber all
Andreas Gampe525cde22014-04-22 15:44:50 -0700162 // TODO: Should we clobber the other registers?
Andreas Gampe525cde22014-04-22 15:44:50 -0700163#else
164 LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
165 result = 0;
166#endif
167 // Pop transition.
168 self->PopManagedStackFragment(fragment);
169 return result;
170 }
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700171
172 public:
173 // TODO: Set up a frame according to referrer's specs.
174 size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
175 mirror::ArtMethod* referrer) {
176 // Push a transition back into managed code onto the linked list in thread.
177 ManagedStack fragment;
178 self->PushManagedStackFragment(&fragment);
179
180 size_t result;
181#if defined(__i386__)
182 // TODO: Set the thread?
183 __asm__ __volatile__(
184 "pushl %[referrer]\n\t" // Store referrer
185 "call *%%edi\n\t" // Call the stub
186 "addl $4, %%esp" // Pop referrer
187 : "=a" (result)
188 // Use the result from eax
189 : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
190 // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
191 : ); // clobber.
192 // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
193 // but compilation fails when declaring that.
194#elif defined(__arm__)
195 __asm__ __volatile__(
196 "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
197 ".cfi_adjust_cfa_offset 52\n\t"
198 "push {r9}\n\t"
199 ".cfi_adjust_cfa_offset 4\n\t"
200 "mov r9, %[referrer]\n\n"
201 "str r9, [sp, #-8]!\n\t" // Push referrer, +8B padding so 16B aligned
202 ".cfi_adjust_cfa_offset 8\n\t"
203 "ldr r9, [sp, #8]\n\t"
204
205 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
206 "sub sp, sp, #20\n\t"
207 "str %[arg0], [sp]\n\t"
208 "str %[arg1], [sp, #4]\n\t"
209 "str %[arg2], [sp, #8]\n\t"
210 "str %[code], [sp, #12]\n\t"
211 "str %[self], [sp, #16]\n\t"
212 "ldr r0, [sp]\n\t"
213 "ldr r1, [sp, #4]\n\t"
214 "ldr r2, [sp, #8]\n\t"
215 "ldr r3, [sp, #12]\n\t"
216 "ldr r9, [sp, #16]\n\t"
217 "add sp, sp, #20\n\t"
218
219 "blx r3\n\t" // Call the stub
220 "add sp, sp, #12\n\t" // Pop nullptr and padding
221 ".cfi_adjust_cfa_offset -12\n\t"
222 "pop {r1-r12, lr}\n\t" // Restore state
223 ".cfi_adjust_cfa_offset -52\n\t"
224 "mov %[result], r0\n\t" // Save the result
225 : [result] "=r" (result)
226 // Use the result from r0
227 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
228 [referrer] "r"(referrer)
229 : ); // clobber.
230#elif defined(__aarch64__)
231 __asm__ __volatile__(
232 "sub sp, sp, #48\n\t" // Reserve stack space, 16B aligned
233 ".cfi_adjust_cfa_offset 48\n\t"
234 "stp %[referrer], x1, [sp]\n\t"// referrer, x1
235 "stp x2, x3, [sp, #16]\n\t" // Save x2, x3
236 "stp x18, x30, [sp, #32]\n\t" // Save x18(xSELF), xLR
237
238 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
239 "sub sp, sp, #48\n\t"
240 "str %[arg0], [sp]\n\t"
241 "str %[arg1], [sp, #8]\n\t"
242 "str %[arg2], [sp, #16]\n\t"
243 "str %[code], [sp, #24]\n\t"
244 "str %[self], [sp, #32]\n\t"
245 "ldr x0, [sp]\n\t"
246 "ldr x1, [sp, #8]\n\t"
247 "ldr x2, [sp, #16]\n\t"
248 "ldr x3, [sp, #24]\n\t"
249 "ldr x18, [sp, #32]\n\t"
250 "add sp, sp, #48\n\t"
251
252 "blr x3\n\t" // Call the stub
253 "ldp x1, x2, [sp, #8]\n\t" // Restore x1, x2
254 "ldp x3, x18, [sp, #24]\n\t" // Restore x3, xSELF
255 "ldr x30, [sp, #40]\n\t" // Restore xLR
256 "add sp, sp, #48\n\t" // Free stack space
257 ".cfi_adjust_cfa_offset -48\n\t"
258
259 "mov %[result], x0\n\t" // Save the result
260 : [result] "=r" (result)
261 // Use the result from r0
262 : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
263 [referrer] "r"(referrer)
264 : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
265#elif defined(__x86_64__)
266 // Note: Uses the native convention
267 // TODO: Set the thread?
268 __asm__ __volatile__(
269 "pushq %[referrer]\n\t" // Push referrer
270 "pushq (%%rsp)\n\t" // & 16B alignment padding
271 ".cfi_adjust_cfa_offset 16\n\t"
272 "call *%%rax\n\t" // Call the stub
273 "addq $16, %%rsp\n\t" // Pop nullptr and padding
274 ".cfi_adjust_cfa_offset -16\n\t"
275 : "=a" (result)
276 // Use the result from rax
277 : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "m"(referrer)
278 // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
279 : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); // clobber all
280 // TODO: Should we clobber the other registers?
281#else
282 LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
283 result = 0;
284#endif
285 // Pop transition.
286 self->PopManagedStackFragment(fragment);
287 return result;
288 }
289
290 // Method with 32b arg0, 64b arg1
291 size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
292 mirror::ArtMethod* referrer) {
293#if defined(__x86_64__) || defined(__aarch64__)
294 // Just pass through.
295 return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
296#else
297 // Need to split up arguments.
298 uint32_t lower = static_cast<uint32_t>(arg1 & 0xFFFFFFFF);
299 uint32_t upper = static_cast<uint32_t>((arg1 >> 32) & 0xFFFFFFFF);
300
301 return Invoke3WithReferrer(arg0, lower, upper, code, self, referrer);
302#endif
303 }
304
305 // Method with 32b arg0, 32b arg1, 64b arg2
306 size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
307 Thread* self, mirror::ArtMethod* referrer) {
308#if defined(__x86_64__) || defined(__aarch64__)
309 // Just pass through.
310 return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
311#else
312 // TODO: Needs 4-param invoke.
313 return 0;
314#endif
315 }
Andreas Gampe525cde22014-04-22 15:44:50 -0700316};
317
318
319#if defined(__i386__) || defined(__x86_64__)
320extern "C" void art_quick_memcpy(void);
321#endif
322
323TEST_F(StubTest, Memcpy) {
324#if defined(__i386__) || defined(__x86_64__)
325 Thread* self = Thread::Current();
326
327 uint32_t orig[20];
328 uint32_t trg[20];
329 for (size_t i = 0; i < 20; ++i) {
330 orig[i] = i;
331 trg[i] = 0;
332 }
333
334 Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
335 10 * sizeof(uint32_t), reinterpret_cast<uintptr_t>(&art_quick_memcpy), self);
336
337 EXPECT_EQ(orig[0], trg[0]);
338
339 for (size_t i = 1; i < 4; ++i) {
340 EXPECT_NE(orig[i], trg[i]);
341 }
342
343 for (size_t i = 4; i < 14; ++i) {
344 EXPECT_EQ(orig[i], trg[i]);
345 }
346
347 for (size_t i = 14; i < 20; ++i) {
348 EXPECT_NE(orig[i], trg[i]);
349 }
350
351 // TODO: Test overlapping?
352
353#else
354 LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
355 // Force-print to std::cout so it's also outside the logcat.
356 std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
357#endif
358}
359
Alexei Zavjalov80c79342014-05-02 16:45:40 +0700360#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700361extern "C" void art_quick_lock_object(void);
362#endif
363
364TEST_F(StubTest, LockObject) {
Alexei Zavjalov80c79342014-05-02 16:45:40 +0700365#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700366 static constexpr size_t kThinLockLoops = 100;
367
Andreas Gampe525cde22014-04-22 15:44:50 -0700368 Thread* self = Thread::Current();
369 // Create an object
370 ScopedObjectAccess soa(self);
371 // garbage is created during ClassLinker::Init
372
373 SirtRef<mirror::String> obj(soa.Self(),
374 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
375 LockWord lock = obj->GetLockWord(false);
376 LockWord::LockState old_state = lock.GetState();
377 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
378
379 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
380 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
381
382 LockWord lock_after = obj->GetLockWord(false);
383 LockWord::LockState new_state = lock_after.GetState();
384 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700385 EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
386
387 for (size_t i = 1; i < kThinLockLoops; ++i) {
388 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
389 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
390
391 // Check we're at lock count i
392
393 LockWord l_inc = obj->GetLockWord(false);
394 LockWord::LockState l_inc_state = l_inc.GetState();
395 EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
396 EXPECT_EQ(l_inc.ThinLockCount(), i);
397 }
398
399 // TODO: Improve this test. Somehow force it to go to fat locked. But that needs another thread.
400
401#else
402 LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
403 // Force-print to std::cout so it's also outside the logcat.
404 std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
405#endif
406}
407
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700408
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700409class RandGen {
410 public:
411 explicit RandGen(uint32_t seed) : val_(seed) {}
412
413 uint32_t next() {
414 val_ = val_ * 48271 % 2147483647 + 13;
415 return val_;
416 }
417
418 uint32_t val_;
419};
420
421
422#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
423extern "C" void art_quick_lock_object(void);
424extern "C" void art_quick_unlock_object(void);
425#endif
426
427TEST_F(StubTest, UnlockObject) {
428#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700429 static constexpr size_t kThinLockLoops = 100;
430
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700431 Thread* self = Thread::Current();
432 // Create an object
433 ScopedObjectAccess soa(self);
434 // garbage is created during ClassLinker::Init
435
436 SirtRef<mirror::String> obj(soa.Self(),
437 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
438 LockWord lock = obj->GetLockWord(false);
439 LockWord::LockState old_state = lock.GetState();
440 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
441
442 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
443 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
444
445 // This should be an illegal monitor state.
446 EXPECT_TRUE(self->IsExceptionPending());
447 self->ClearException();
448
449 LockWord lock_after = obj->GetLockWord(false);
450 LockWord::LockState new_state = lock_after.GetState();
451 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
Andreas Gampe525cde22014-04-22 15:44:50 -0700452
453 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
454 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
455
456 LockWord lock_after2 = obj->GetLockWord(false);
457 LockWord::LockState new_state2 = lock_after2.GetState();
458 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
459
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700460 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
461 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
462
463 LockWord lock_after3 = obj->GetLockWord(false);
464 LockWord::LockState new_state3 = lock_after3.GetState();
465 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
466
467 // Stress test:
468 // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
469 // each step.
470
471 RandGen r(0x1234);
472
473 constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
474 constexpr size_t kIterations = 10000; // Number of iterations
475
476 size_t counts[kNumberOfLocks];
477 SirtRef<mirror::String>* objects[kNumberOfLocks];
478
479 // Initialize = allocate.
480 for (size_t i = 0; i < kNumberOfLocks; ++i) {
481 counts[i] = 0;
482 objects[i] = new SirtRef<mirror::String>(soa.Self(),
483 mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
484 }
485
486 for (size_t i = 0; i < kIterations; ++i) {
487 // Select which lock to update.
488 size_t index = r.next() % kNumberOfLocks;
489
490 bool lock; // Whether to lock or unlock in this step.
491 if (counts[index] == 0) {
492 lock = true;
493 } else if (counts[index] == kThinLockLoops) {
494 lock = false;
495 } else {
496 // Randomly.
497 lock = r.next() % 2 == 0;
498 }
499
500 if (lock) {
501 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
502 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
503 counts[index]++;
504 } else {
505 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
506 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
507 counts[index]--;
508 }
509
510 EXPECT_FALSE(self->IsExceptionPending());
511
512 // Check the new state.
513 LockWord lock_iter = objects[index]->get()->GetLockWord(false);
514 LockWord::LockState iter_state = lock_iter.GetState();
515 if (counts[index] > 0) {
516 EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
517 EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
518 } else {
519 EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
520 }
521 }
522
523 // Unlock the remaining count times and then check it's unlocked. Then deallocate.
524 // Go reverse order to correctly handle SirtRefs.
525 for (size_t i = 0; i < kNumberOfLocks; ++i) {
526 size_t index = kNumberOfLocks - 1 - i;
527 size_t count = counts[index];
528 while (count > 0) {
529 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
530 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
531
532 count--;
533 }
534
535 LockWord lock_after4 = objects[index]->get()->GetLockWord(false);
536 LockWord::LockState new_state4 = lock_after4.GetState();
537 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state4);
538
539 delete objects[index];
540 }
541
Andreas Gampe525cde22014-04-22 15:44:50 -0700542 // TODO: Improve this test. Somehow force it to go to fat locked. But that needs another thread.
543
544#else
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700545 LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
Andreas Gampe525cde22014-04-22 15:44:50 -0700546 // Force-print to std::cout so it's also outside the logcat.
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700547 std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
Andreas Gampe525cde22014-04-22 15:44:50 -0700548#endif
549}
550
551
552#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
553extern "C" void art_quick_check_cast(void);
554#endif
555
556TEST_F(StubTest, CheckCast) {
557#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
558 Thread* self = Thread::Current();
559 // Find some classes.
560 ScopedObjectAccess soa(self);
561 // garbage is created during ClassLinker::Init
562
563 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
564 "[Ljava/lang/Object;"));
565 SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
566 "[Ljava/lang/String;"));
567
568 EXPECT_FALSE(self->IsExceptionPending());
569
570 Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c.get()), 0U,
571 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
572
573 EXPECT_FALSE(self->IsExceptionPending());
574
575 Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c2.get()), 0U,
576 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
577
578 EXPECT_FALSE(self->IsExceptionPending());
579
580 Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c2.get()), 0U,
581 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
582
583 EXPECT_FALSE(self->IsExceptionPending());
584
585 // TODO: Make the following work. But that would require correct managed frames.
586
587 Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c.get()), 0U,
588 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
589
590 EXPECT_TRUE(self->IsExceptionPending());
591 self->ClearException();
592
593#else
594 LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
595 // Force-print to std::cout so it's also outside the logcat.
596 std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
597#endif
598}
599
600
Andreas Gampef4e910b2014-04-29 16:55:52 -0700601#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700602extern "C" void art_quick_aput_obj_with_null_and_bound_check(void);
603// Do not check non-checked ones, we'd need handlers and stuff...
604#endif
605
606TEST_F(StubTest, APutObj) {
Hiroshi Yamauchid6881ae2014-04-28 17:21:48 -0700607 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
608
Andreas Gampef4e910b2014-04-29 16:55:52 -0700609#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700610 Thread* self = Thread::Current();
611 // Create an object
612 ScopedObjectAccess soa(self);
613 // garbage is created during ClassLinker::Init
614
615 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
616 "Ljava/lang/Object;"));
617 SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
618 "Ljava/lang/String;"));
619 SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
620 "[Ljava/lang/String;"));
621
622 // Build a string array of size 1
623 SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
Andreas Gampef4e910b2014-04-29 16:55:52 -0700624 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.get(), 10));
Andreas Gampe525cde22014-04-22 15:44:50 -0700625
626 // Build a string -> should be assignable
627 SirtRef<mirror::Object> str_obj(soa.Self(),
628 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
629
630 // Build a generic object -> should fail assigning
631 SirtRef<mirror::Object> obj_obj(soa.Self(), c->AllocObject(soa.Self()));
632
633 // Play with it...
634
635 // 1) Success cases
Andreas Gampef4e910b2014-04-29 16:55:52 -0700636 // 1.1) Assign str_obj to array[0..3]
Andreas Gampe525cde22014-04-22 15:44:50 -0700637
638 EXPECT_FALSE(self->IsExceptionPending());
639
640 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(str_obj.get()),
641 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
642
643 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampef4e910b2014-04-29 16:55:52 -0700644 EXPECT_EQ(str_obj.get(), array->Get(0));
Andreas Gampe525cde22014-04-22 15:44:50 -0700645
Andreas Gampef4e910b2014-04-29 16:55:52 -0700646 Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(str_obj.get()),
647 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
648
649 EXPECT_FALSE(self->IsExceptionPending());
650 EXPECT_EQ(str_obj.get(), array->Get(1));
651
652 Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(str_obj.get()),
653 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
654
655 EXPECT_FALSE(self->IsExceptionPending());
656 EXPECT_EQ(str_obj.get(), array->Get(2));
657
658 Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(str_obj.get()),
659 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
660
661 EXPECT_FALSE(self->IsExceptionPending());
662 EXPECT_EQ(str_obj.get(), array->Get(3));
663
664 // 1.2) Assign null to array[0..3]
Andreas Gampe525cde22014-04-22 15:44:50 -0700665
666 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(nullptr),
667 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
668
669 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampef4e910b2014-04-29 16:55:52 -0700670 EXPECT_EQ(nullptr, array->Get(0));
671
672 Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(nullptr),
673 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
674
675 EXPECT_FALSE(self->IsExceptionPending());
676 EXPECT_EQ(nullptr, array->Get(1));
677
678 Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(nullptr),
679 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
680
681 EXPECT_FALSE(self->IsExceptionPending());
682 EXPECT_EQ(nullptr, array->Get(2));
683
684 Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(nullptr),
685 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
686
687 EXPECT_FALSE(self->IsExceptionPending());
688 EXPECT_EQ(nullptr, array->Get(3));
Andreas Gampe525cde22014-04-22 15:44:50 -0700689
690 // TODO: Check _which_ exception is thrown. Then make 3) check that it's the right check order.
691
692 // 2) Failure cases (str into str[])
693 // 2.1) Array = null
694 // TODO: Throwing NPE needs actual DEX code
695
696// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.get()),
697// reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
698//
699// EXPECT_TRUE(self->IsExceptionPending());
700// self->ClearException();
701
702 // 2.2) Index < 0
703
704 Invoke3(reinterpret_cast<size_t>(array.get()), static_cast<size_t>(-1),
705 reinterpret_cast<size_t>(str_obj.get()),
706 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
707
708 EXPECT_TRUE(self->IsExceptionPending());
709 self->ClearException();
710
711 // 2.3) Index > 0
712
Andreas Gampef4e910b2014-04-29 16:55:52 -0700713 Invoke3(reinterpret_cast<size_t>(array.get()), 10U, reinterpret_cast<size_t>(str_obj.get()),
Andreas Gampe525cde22014-04-22 15:44:50 -0700714 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
715
716 EXPECT_TRUE(self->IsExceptionPending());
717 self->ClearException();
718
719 // 3) Failure cases (obj into str[])
720
721 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(obj_obj.get()),
722 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
723
724 EXPECT_TRUE(self->IsExceptionPending());
725 self->ClearException();
726
727 // Tests done.
728#else
729 LOG(INFO) << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA;
730 // Force-print to std::cout so it's also outside the logcat.
731 std::cout << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA << std::endl;
732#endif
733}
734
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700735TEST_F(StubTest, AllocObject) {
736 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
737
738#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
739 // TODO: Check the "Unresolved" allocation stubs
740
741 Thread* self = Thread::Current();
742 // Create an object
743 ScopedObjectAccess soa(self);
744 // garbage is created during ClassLinker::Init
745
746 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
747 "Ljava/lang/Object;"));
748
749 // Play with it...
750
751 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700752 {
753 // Use an arbitrary method from c to use as referrer
754 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
755 reinterpret_cast<size_t>(c->GetVirtualMethod(0)), // arbitrary
756 0U,
Mathieu Chartier119c6bd2014-05-09 14:11:47 -0700757 reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObject),
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700758 self);
759
760 EXPECT_FALSE(self->IsExceptionPending());
761 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
762 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
763 EXPECT_EQ(c.get(), obj->GetClass());
764 VerifyObject(obj);
765 }
766
767 {
768 // We can use nullptr in the second argument as we do not need a method here (not used in
769 // resolved/initialized cases)
770 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
Mathieu Chartier119c6bd2014-05-09 14:11:47 -0700771 reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectResolved),
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700772 self);
773
774 EXPECT_FALSE(self->IsExceptionPending());
775 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
776 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
777 EXPECT_EQ(c.get(), obj->GetClass());
778 VerifyObject(obj);
779 }
780
781 {
782 // We can use nullptr in the second argument as we do not need a method here (not used in
783 // resolved/initialized cases)
784 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
Mathieu Chartier119c6bd2014-05-09 14:11:47 -0700785 reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700786 self);
787
788 EXPECT_FALSE(self->IsExceptionPending());
789 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
790 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
791 EXPECT_EQ(c.get(), obj->GetClass());
792 VerifyObject(obj);
793 }
794
795 // Failure tests.
796
797 // Out-of-memory.
798 {
799 Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
800
801 // Array helps to fill memory faster.
802 SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
803 "[Ljava/lang/Object;"));
804 std::vector<SirtRef<mirror::Object>*> sirt_refs;
805 // Start allocating with 128K
806 size_t length = 128 * KB / 4;
807 while (length > 10) {
808 SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
809 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
810 ca.get(),
811 length/4));
812 if (self->IsExceptionPending() || ref->get() == nullptr) {
813 self->ClearException();
814 delete ref;
815
816 // Try a smaller length
817 length = length / 8;
818 // Use at most half the reported free space.
819 size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
820 if (length * 8 > mem) {
821 length = mem / 8;
822 }
823 } else {
824 sirt_refs.push_back(ref);
825 }
826 }
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700827 LOG(INFO) << "Used " << sirt_refs.size() << " arrays to fill space.";
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700828
829 // Allocate simple objects till it fails.
830 while (!self->IsExceptionPending()) {
831 SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
832 c->AllocObject(soa.Self()));
833 if (!self->IsExceptionPending() && ref->get() != nullptr) {
834 sirt_refs.push_back(ref);
835 } else {
836 delete ref;
837 }
838 }
839 self->ClearException();
840
841 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
Mathieu Chartier119c6bd2014-05-09 14:11:47 -0700842 reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700843 self);
844
845 EXPECT_TRUE(self->IsExceptionPending());
846 self->ClearException();
847 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
848
849 // Release all the allocated objects.
850 // Need to go backward to release SirtRef in the right order.
851 auto it = sirt_refs.rbegin();
852 auto end = sirt_refs.rend();
853 for (; it != end; ++it) {
854 delete *it;
855 }
856 }
857
858 // Tests done.
859#else
860 LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
861 // Force-print to std::cout so it's also outside the logcat.
862 std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
863#endif
864}
865
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700866TEST_F(StubTest, AllocObjectArray) {
867 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
868
869#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
870 // TODO: Check the "Unresolved" allocation stubs
871
872 Thread* self = Thread::Current();
873 // Create an object
874 ScopedObjectAccess soa(self);
875 // garbage is created during ClassLinker::Init
876
877 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
878 "[Ljava/lang/Object;"));
879
880 // Needed to have a linked method.
881 SirtRef<mirror::Class> c_obj(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
882 "Ljava/lang/Object;"));
883
884 // Play with it...
885
886 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700887
888 // For some reason this does not work, as the type_idx is artificial and outside what the
889 // resolved types of c_obj allow...
890
891 if (false) {
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700892 // Use an arbitrary method from c to use as referrer
893 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
894 reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
895 10U,
Mathieu Chartier119c6bd2014-05-09 14:11:47 -0700896 reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArray),
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700897 self);
898
899 EXPECT_FALSE(self->IsExceptionPending());
900 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
901 mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
902 EXPECT_EQ(c.get(), obj->GetClass());
903 VerifyObject(obj);
904 EXPECT_EQ(obj->GetLength(), 10);
905 }
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700906
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700907 {
908 // We can use nullptr in the second argument as we do not need a method here (not used in
909 // resolved/initialized cases)
910 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 10U,
Mathieu Chartier119c6bd2014-05-09 14:11:47 -0700911 reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700912 self);
913
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700914 EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700915 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
916 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
917 EXPECT_TRUE(obj->IsArrayInstance());
918 EXPECT_TRUE(obj->IsObjectArray());
919 EXPECT_EQ(c.get(), obj->GetClass());
920 VerifyObject(obj);
921 mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
922 EXPECT_EQ(array->GetLength(), 10);
923 }
924
925 // Failure tests.
926
927 // Out-of-memory.
928 {
929 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr),
930 GB, // that should fail...
Mathieu Chartier119c6bd2014-05-09 14:11:47 -0700931 reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700932 self);
933
934 EXPECT_TRUE(self->IsExceptionPending());
935 self->ClearException();
936 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
937 }
938
939 // Tests done.
940#else
941 LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
942 // Force-print to std::cout so it's also outside the logcat.
943 std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
944#endif
945}
946
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700947
Andreas Gampe266340d2014-05-02 07:55:24 -0700948#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700949extern "C" void art_quick_string_compareto(void);
950#endif
951
952TEST_F(StubTest, StringCompareTo) {
953 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
954
Andreas Gampe266340d2014-05-02 07:55:24 -0700955#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700956 // TODO: Check the "Unresolved" allocation stubs
957
958 Thread* self = Thread::Current();
959 ScopedObjectAccess soa(self);
960 // garbage is created during ClassLinker::Init
961
962 // Create some strings
963 // Use array so we can index into it and use a matrix for expected results
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700964 // Setup: The first half is standard. The second half uses a non-zero offset.
965 // TODO: Shared backing arrays.
966 constexpr size_t base_string_count = 7;
967 const char* c[base_string_count] = { "", "", "a", "aa", "ab", "aac", "aac" , };
968
969 constexpr size_t string_count = 2 * base_string_count;
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700970
971 SirtRef<mirror::String>* s[string_count];
972
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700973 for (size_t i = 0; i < base_string_count; ++i) {
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700974 s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
975 c[i]));
976 }
977
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700978 RandGen r(0x1234);
979
980 for (size_t i = base_string_count; i < string_count; ++i) {
981 s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
982 c[i - base_string_count]));
983 int32_t length = s[i]->get()->GetLength();
984 if (length > 1) {
985 // Set a random offset and length.
986 int32_t new_offset = 1 + (r.next() % (length - 1));
987 int32_t rest = length - new_offset - 1;
988 int32_t new_length = 1 + (rest > 0 ? r.next() % rest : 0);
989
990 s[i]->get()->SetField32<false>(mirror::String::CountOffset(), new_length);
991 s[i]->get()->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
992 }
993 }
994
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700995 // TODO: wide characters
996
997 // Matrix of expectations. First component is first parameter. Note we only check against the
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700998 // sign, not the value. As we are testing random offsets, we need to compute this and need to
999 // rely on String::CompareTo being correct.
1000 int32_t expected[string_count][string_count];
1001 for (size_t x = 0; x < string_count; ++x) {
1002 for (size_t y = 0; y < string_count; ++y) {
1003 expected[x][y] = s[x]->get()->CompareTo(s[y]->get());
1004 }
1005 }
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001006
1007 // Play with it...
1008
1009 for (size_t x = 0; x < string_count; ++x) {
1010 for (size_t y = 0; y < string_count; ++y) {
1011 // Test string_compareto x y
1012 size_t result = Invoke3(reinterpret_cast<size_t>(s[x]->get()),
1013 reinterpret_cast<size_t>(s[y]->get()), 0U,
1014 reinterpret_cast<uintptr_t>(&art_quick_string_compareto), self);
1015
1016 EXPECT_FALSE(self->IsExceptionPending());
1017
1018 // The result is a 32b signed integer
1019 union {
1020 size_t r;
1021 int32_t i;
1022 } conv;
1023 conv.r = result;
1024 int32_t e = expected[x][y];
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -07001025 EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1026 conv.r;
1027 EXPECT_TRUE(e < 0 ? conv.i < 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1028 conv.r;
1029 EXPECT_TRUE(e > 0 ? conv.i > 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1030 conv.r;
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001031 }
1032 }
1033
Andreas Gampe7177d7c2014-05-02 12:10:02 -07001034 // TODO: Deallocate things.
1035
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001036 // Tests done.
1037#else
1038 LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1039 // Force-print to std::cout so it's also outside the logcat.
1040 std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1041 std::endl;
1042#endif
1043}
1044
Andreas Gampe6e4e59c2014-05-05 20:11:02 -07001045
1046#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1047extern "C" void art_quick_set32_static(void);
1048extern "C" void art_quick_get32_static(void);
1049#endif
1050
1051static void GetSet32Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1052 mirror::ArtMethod* referrer, StubTest* test)
1053 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1054#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1055 constexpr size_t num_values = 7;
1056 uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1057
1058 for (size_t i = 0; i < num_values; ++i) {
1059 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1060 static_cast<size_t>(values[i]),
1061 0U,
1062 reinterpret_cast<uintptr_t>(&art_quick_set32_static),
1063 self,
1064 referrer);
1065
1066 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1067 0U, 0U,
1068 reinterpret_cast<uintptr_t>(&art_quick_get32_static),
1069 self,
1070 referrer);
1071
1072 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1073 }
1074#else
1075 LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1076 // Force-print to std::cout so it's also outside the logcat.
1077 std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1078#endif
1079}
1080
1081
1082#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1083extern "C" void art_quick_set32_instance(void);
1084extern "C" void art_quick_get32_instance(void);
1085#endif
1086
1087static void GetSet32Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1088 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1089 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1090#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1091 constexpr size_t num_values = 7;
1092 uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1093
1094 for (size_t i = 0; i < num_values; ++i) {
1095 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1096 reinterpret_cast<size_t>(obj->get()),
1097 static_cast<size_t>(values[i]),
1098 reinterpret_cast<uintptr_t>(&art_quick_set32_instance),
1099 self,
1100 referrer);
1101
1102 int32_t res = f->get()->GetInt(obj->get());
1103 EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1104
1105 res++;
1106 f->get()->SetInt<false>(obj->get(), res);
1107
1108 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1109 reinterpret_cast<size_t>(obj->get()),
1110 0U,
1111 reinterpret_cast<uintptr_t>(&art_quick_get32_instance),
1112 self,
1113 referrer);
1114 EXPECT_EQ(res, static_cast<int32_t>(res2));
1115 }
1116#else
1117 LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1118 // Force-print to std::cout so it's also outside the logcat.
1119 std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1120#endif
1121}
1122
1123
1124#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1125extern "C" void art_quick_set_obj_static(void);
1126extern "C" void art_quick_get_obj_static(void);
1127
1128static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1129 mirror::ArtMethod* referrer, StubTest* test)
1130 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1131 test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1132 reinterpret_cast<size_t>(val),
1133 0U,
1134 reinterpret_cast<uintptr_t>(&art_quick_set_obj_static),
1135 self,
1136 referrer);
1137
1138 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1139 0U, 0U,
1140 reinterpret_cast<uintptr_t>(&art_quick_get_obj_static),
1141 self,
1142 referrer);
1143
1144 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1145}
1146#endif
1147
1148static void GetSetObjStatic(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1149 mirror::ArtMethod* referrer, StubTest* test)
1150 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1151#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1152 set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1153
1154 // Allocate a string object for simplicity.
1155 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1156 set_and_check_static((*f)->GetDexFieldIndex(), str, self, referrer, test);
1157
1158 set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1159#else
1160 LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1161 // Force-print to std::cout so it's also outside the logcat.
1162 std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1163#endif
1164}
1165
1166
1167#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1168extern "C" void art_quick_set_obj_instance(void);
1169extern "C" void art_quick_get_obj_instance(void);
1170
1171static void set_and_check_instance(SirtRef<mirror::ArtField>* f, mirror::Object* trg,
1172 mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
1173 StubTest* test)
1174 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1175 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1176 reinterpret_cast<size_t>(trg),
1177 reinterpret_cast<size_t>(val),
1178 reinterpret_cast<uintptr_t>(&art_quick_set_obj_instance),
1179 self,
1180 referrer);
1181
1182 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1183 reinterpret_cast<size_t>(trg),
1184 0U,
1185 reinterpret_cast<uintptr_t>(&art_quick_get_obj_instance),
1186 self,
1187 referrer);
1188
1189 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1190
1191 EXPECT_EQ(val, f->get()->GetObj(trg));
1192}
1193#endif
1194
1195static void GetSetObjInstance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1196 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1197 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1198#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1199 set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
1200
1201 // Allocate a string object for simplicity.
1202 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1203 set_and_check_instance(f, obj->get(), str, self, referrer, test);
1204
1205 set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
1206#else
1207 LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1208 // Force-print to std::cout so it's also outside the logcat.
1209 std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1210#endif
1211}
1212
1213
1214// TODO: Complete these tests for 32b architectures.
1215
1216#if defined(__x86_64__) || defined(__aarch64__)
1217extern "C" void art_quick_set64_static(void);
1218extern "C" void art_quick_get64_static(void);
1219#endif
1220
1221static void GetSet64Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1222 mirror::ArtMethod* referrer, StubTest* test)
1223 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1224#if defined(__x86_64__) || defined(__aarch64__)
1225 constexpr size_t num_values = 8;
1226 uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1227
1228 for (size_t i = 0; i < num_values; ++i) {
1229 test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1230 values[i],
1231 reinterpret_cast<uintptr_t>(&art_quick_set64_static),
1232 self,
1233 referrer);
1234
1235 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1236 0U, 0U,
1237 reinterpret_cast<uintptr_t>(&art_quick_get64_static),
1238 self,
1239 referrer);
1240
1241 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1242 }
1243#else
1244 LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1245 // Force-print to std::cout so it's also outside the logcat.
1246 std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1247#endif
1248}
1249
1250
1251#if defined(__x86_64__) || defined(__aarch64__)
1252extern "C" void art_quick_set64_instance(void);
1253extern "C" void art_quick_get64_instance(void);
1254#endif
1255
1256static void GetSet64Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1257 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1258 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1259#if defined(__x86_64__) || defined(__aarch64__)
1260 constexpr size_t num_values = 8;
1261 uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1262
1263 for (size_t i = 0; i < num_values; ++i) {
1264 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1265 reinterpret_cast<size_t>(obj->get()),
1266 static_cast<size_t>(values[i]),
1267 reinterpret_cast<uintptr_t>(&art_quick_set64_instance),
1268 self,
1269 referrer);
1270
1271 int64_t res = f->get()->GetLong(obj->get());
1272 EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1273
1274 res++;
1275 f->get()->SetLong<false>(obj->get(), res);
1276
1277 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1278 reinterpret_cast<size_t>(obj->get()),
1279 0U,
1280 reinterpret_cast<uintptr_t>(&art_quick_get64_instance),
1281 self,
1282 referrer);
1283 EXPECT_EQ(res, static_cast<int64_t>(res2));
1284 }
1285#else
1286 LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1287 // Force-print to std::cout so it's also outside the logcat.
1288 std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1289#endif
1290}
1291
1292static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1293 // garbage is created during ClassLinker::Init
1294
1295 JNIEnv* env = Thread::Current()->GetJniEnv();
1296 jclass jc = env->FindClass("AllFields");
1297 CHECK(jc != NULL);
1298 jobject o = env->AllocObject(jc);
1299 CHECK(o != NULL);
1300
1301 ScopedObjectAccess soa(self);
1302 SirtRef<mirror::Object> obj(self, soa.Decode<mirror::Object*>(o));
1303
1304 SirtRef<mirror::Class> c(self, obj->GetClass());
1305
1306 // Need a method as a referrer
1307 SirtRef<mirror::ArtMethod> m(self, c->GetDirectMethod(0));
1308
1309 // Play with it...
1310
1311 // Static fields.
1312 {
1313 SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetSFields());
1314 int32_t num_fields = fields->GetLength();
1315 for (int32_t i = 0; i < num_fields; ++i) {
1316 SirtRef<mirror::ArtField> f(self, fields->Get(i));
1317
1318 FieldHelper fh(f.get());
1319 Primitive::Type type = fh.GetTypeAsPrimitiveType();
1320 switch (type) {
1321 case Primitive::Type::kPrimInt:
1322 if (test_type == type) {
1323 GetSet32Static(&obj, &f, self, m.get(), test);
1324 }
1325 break;
1326
1327 case Primitive::Type::kPrimLong:
1328 if (test_type == type) {
1329 GetSet64Static(&obj, &f, self, m.get(), test);
1330 }
1331 break;
1332
1333 case Primitive::Type::kPrimNot:
1334 // Don't try array.
1335 if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
1336 GetSetObjStatic(&obj, &f, self, m.get(), test);
1337 }
1338 break;
1339
1340 default:
1341 break; // Skip.
1342 }
1343 }
1344 }
1345
1346 // Instance fields.
1347 {
1348 SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetIFields());
1349 int32_t num_fields = fields->GetLength();
1350 for (int32_t i = 0; i < num_fields; ++i) {
1351 SirtRef<mirror::ArtField> f(self, fields->Get(i));
1352
1353 FieldHelper fh(f.get());
1354 Primitive::Type type = fh.GetTypeAsPrimitiveType();
1355 switch (type) {
1356 case Primitive::Type::kPrimInt:
1357 if (test_type == type) {
1358 GetSet32Instance(&obj, &f, self, m.get(), test);
1359 }
1360 break;
1361
1362 case Primitive::Type::kPrimLong:
1363 if (test_type == type) {
1364 GetSet64Instance(&obj, &f, self, m.get(), test);
1365 }
1366 break;
1367
1368 case Primitive::Type::kPrimNot:
1369 // Don't try array.
1370 if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
1371 GetSetObjInstance(&obj, &f, self, m.get(), test);
1372 }
1373 break;
1374
1375 default:
1376 break; // Skip.
1377 }
1378 }
1379 }
1380
1381 // TODO: Deallocate things.
1382}
1383
1384
1385TEST_F(StubTest, Fields32) {
1386 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1387
1388 Thread* self = Thread::Current();
1389
1390 self->TransitionFromSuspendedToRunnable();
1391 LoadDex("AllFields");
1392 bool started = runtime_->Start();
1393 CHECK(started);
1394
1395 TestFields(self, this, Primitive::Type::kPrimInt);
1396}
1397
1398TEST_F(StubTest, FieldsObj) {
1399 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1400
1401 Thread* self = Thread::Current();
1402
1403 self->TransitionFromSuspendedToRunnable();
1404 LoadDex("AllFields");
1405 bool started = runtime_->Start();
1406 CHECK(started);
1407
1408 TestFields(self, this, Primitive::Type::kPrimNot);
1409}
1410
1411TEST_F(StubTest, Fields64) {
1412 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1413
1414 Thread* self = Thread::Current();
1415
1416 self->TransitionFromSuspendedToRunnable();
1417 LoadDex("AllFields");
1418 bool started = runtime_->Start();
1419 CHECK(started);
1420
1421 TestFields(self, this, Primitive::Type::kPrimLong);
1422}
1423
Andreas Gampe525cde22014-04-22 15:44:50 -07001424} // namespace art