blob: 4438f257977f83ac0a432de41eafb851fd27436c [file] [log] [blame]
Andreas Gampe525cde22014-04-22 15:44:50 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "common_runtime_test.h"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -070018#include "mirror/art_field-inl.h"
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -070019#include "mirror/string-inl.h"
Andreas Gampe525cde22014-04-22 15:44:50 -070020
21#include <cstdio>
22
23namespace art {
24
25
26class StubTest : public CommonRuntimeTest {
27 protected:
28 // We need callee-save methods set up in the Runtime for exceptions.
29 void SetUp() OVERRIDE {
30 // Do the normal setup.
31 CommonRuntimeTest::SetUp();
32
33 {
34 // Create callee-save methods
35 ScopedObjectAccess soa(Thread::Current());
36 for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
37 Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
38 if (!runtime_->HasCalleeSaveMethod(type)) {
39 runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(kRuntimeISA, type), type);
40 }
41 }
42 }
43 }
44
Andreas Gampe00c1e6d2014-04-25 15:47:13 -070045 void SetUpRuntimeOptions(Runtime::Options *options) OVERRIDE {
46 // Use a smaller heap
47 for (std::pair<std::string, const void*>& pair : *options) {
48 if (pair.first.find("-Xmx") == 0) {
49 pair.first = "-Xmx4M"; // Smallest we can go.
50 }
51 }
52 }
Andreas Gampe525cde22014-04-22 15:44:50 -070053
54 size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
55 // Push a transition back into managed code onto the linked list in thread.
56 ManagedStack fragment;
57 self->PushManagedStackFragment(&fragment);
58
59 size_t result;
60#if defined(__i386__)
61 // TODO: Set the thread?
62 __asm__ __volatile__(
63 "pushl $0\n\t" // Push nullptr to terminate quick stack
64 "call *%%edi\n\t" // Call the stub
65 "addl $4, %%esp" // Pop nullptr
66 : "=a" (result)
67 // Use the result from eax
68 : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code)
69 // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
70 : ); // clobber.
71 // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
72 // but compilation fails when declaring that.
73#elif defined(__arm__)
74 __asm__ __volatile__(
Andreas Gampe00c1e6d2014-04-25 15:47:13 -070075 "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
76 ".cfi_adjust_cfa_offset 52\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -070077 "push {r9}\n\t"
78 ".cfi_adjust_cfa_offset 4\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -070079 "mov r9, #0\n\n"
80 "str r9, [sp, #-8]!\n\t" // Push nullptr to terminate stack, +8B padding so 16B aligned
81 ".cfi_adjust_cfa_offset 8\n\t"
82 "ldr r9, [sp, #8]\n\t"
83
84 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
85 "sub sp, sp, #20\n\t"
86 "str %[arg0], [sp]\n\t"
87 "str %[arg1], [sp, #4]\n\t"
88 "str %[arg2], [sp, #8]\n\t"
89 "str %[code], [sp, #12]\n\t"
90 "str %[self], [sp, #16]\n\t"
91 "ldr r0, [sp]\n\t"
92 "ldr r1, [sp, #4]\n\t"
93 "ldr r2, [sp, #8]\n\t"
94 "ldr r3, [sp, #12]\n\t"
95 "ldr r9, [sp, #16]\n\t"
96 "add sp, sp, #20\n\t"
97
98 "blx r3\n\t" // Call the stub
Andreas Gampe00c1e6d2014-04-25 15:47:13 -070099 "add sp, sp, #12\n\t" // Pop nullptr and padding
100 ".cfi_adjust_cfa_offset -12\n\t"
101 "pop {r1-r12, lr}\n\t" // Restore state
102 ".cfi_adjust_cfa_offset -52\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700103 "mov %[result], r0\n\t" // Save the result
104 : [result] "=r" (result)
105 // Use the result from r0
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700106 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
Andreas Gampe525cde22014-04-22 15:44:50 -0700107 : ); // clobber.
108#elif defined(__aarch64__)
109 __asm__ __volatile__(
110 "sub sp, sp, #48\n\t" // Reserve stack space, 16B aligned
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700111 ".cfi_adjust_cfa_offset 48\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700112 "stp xzr, x1, [sp]\n\t" // nullptr(end of quick stack), x1
113 "stp x2, x3, [sp, #16]\n\t" // Save x2, x3
114 "stp x18, x30, [sp, #32]\n\t" // Save x18(xSELF), xLR
115
116 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
117 "sub sp, sp, #48\n\t"
118 "str %[arg0], [sp]\n\t"
119 "str %[arg1], [sp, #8]\n\t"
120 "str %[arg2], [sp, #16]\n\t"
121 "str %[code], [sp, #24]\n\t"
122 "str %[self], [sp, #32]\n\t"
123 "ldr x0, [sp]\n\t"
124 "ldr x1, [sp, #8]\n\t"
125 "ldr x2, [sp, #16]\n\t"
126 "ldr x3, [sp, #24]\n\t"
127 "ldr x18, [sp, #32]\n\t"
128 "add sp, sp, #48\n\t"
129
130 "blr x3\n\t" // Call the stub
Andreas Gampe525cde22014-04-22 15:44:50 -0700131 "ldp x1, x2, [sp, #8]\n\t" // Restore x1, x2
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700132 "ldp x3, x18, [sp, #24]\n\t" // Restore x3, xSELF
133 "ldr x30, [sp, #40]\n\t" // Restore xLR
Andreas Gampe525cde22014-04-22 15:44:50 -0700134 "add sp, sp, #48\n\t" // Free stack space
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700135 ".cfi_adjust_cfa_offset -48\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700136
Andreas Gampe525cde22014-04-22 15:44:50 -0700137 "mov %[result], x0\n\t" // Save the result
138 : [result] "=r" (result)
139 // Use the result from r0
140 : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700141 : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
Andreas Gampe525cde22014-04-22 15:44:50 -0700142#elif defined(__x86_64__)
143 // Note: Uses the native convention
144 // TODO: Set the thread?
145 __asm__ __volatile__(
146 "pushq $0\n\t" // Push nullptr to terminate quick stack
147 "pushq $0\n\t" // 16B alignment padding
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700148 ".cfi_adjust_cfa_offset 16\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700149 "call *%%rax\n\t" // Call the stub
Andreas Gampef4e910b2014-04-29 16:55:52 -0700150 "addq $16, %%rsp\n\t" // Pop nullptr and padding
151 ".cfi_adjust_cfa_offset -16\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700152 : "=a" (result)
153 // Use the result from rax
154 : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code)
155 // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
Andreas Gampef4e910b2014-04-29 16:55:52 -0700156 : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); // clobber all
Andreas Gampe525cde22014-04-22 15:44:50 -0700157 // TODO: Should we clobber the other registers?
Andreas Gampe525cde22014-04-22 15:44:50 -0700158#else
159 LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
160 result = 0;
161#endif
162 // Pop transition.
163 self->PopManagedStackFragment(fragment);
164 return result;
165 }
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700166
167 public:
168 // TODO: Set up a frame according to referrer's specs.
169 size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
170 mirror::ArtMethod* referrer) {
171 // Push a transition back into managed code onto the linked list in thread.
172 ManagedStack fragment;
173 self->PushManagedStackFragment(&fragment);
174
175 size_t result;
176#if defined(__i386__)
177 // TODO: Set the thread?
178 __asm__ __volatile__(
179 "pushl %[referrer]\n\t" // Store referrer
180 "call *%%edi\n\t" // Call the stub
181 "addl $4, %%esp" // Pop referrer
182 : "=a" (result)
183 // Use the result from eax
184 : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
185 // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
186 : ); // clobber.
187 // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
188 // but compilation fails when declaring that.
189#elif defined(__arm__)
190 __asm__ __volatile__(
191 "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
192 ".cfi_adjust_cfa_offset 52\n\t"
193 "push {r9}\n\t"
194 ".cfi_adjust_cfa_offset 4\n\t"
195 "mov r9, %[referrer]\n\n"
196 "str r9, [sp, #-8]!\n\t" // Push referrer, +8B padding so 16B aligned
197 ".cfi_adjust_cfa_offset 8\n\t"
198 "ldr r9, [sp, #8]\n\t"
199
200 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
201 "sub sp, sp, #20\n\t"
202 "str %[arg0], [sp]\n\t"
203 "str %[arg1], [sp, #4]\n\t"
204 "str %[arg2], [sp, #8]\n\t"
205 "str %[code], [sp, #12]\n\t"
206 "str %[self], [sp, #16]\n\t"
207 "ldr r0, [sp]\n\t"
208 "ldr r1, [sp, #4]\n\t"
209 "ldr r2, [sp, #8]\n\t"
210 "ldr r3, [sp, #12]\n\t"
211 "ldr r9, [sp, #16]\n\t"
212 "add sp, sp, #20\n\t"
213
214 "blx r3\n\t" // Call the stub
215 "add sp, sp, #12\n\t" // Pop nullptr and padding
216 ".cfi_adjust_cfa_offset -12\n\t"
217 "pop {r1-r12, lr}\n\t" // Restore state
218 ".cfi_adjust_cfa_offset -52\n\t"
219 "mov %[result], r0\n\t" // Save the result
220 : [result] "=r" (result)
221 // Use the result from r0
222 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
223 [referrer] "r"(referrer)
224 : ); // clobber.
225#elif defined(__aarch64__)
226 __asm__ __volatile__(
227 "sub sp, sp, #48\n\t" // Reserve stack space, 16B aligned
228 ".cfi_adjust_cfa_offset 48\n\t"
229 "stp %[referrer], x1, [sp]\n\t"// referrer, x1
230 "stp x2, x3, [sp, #16]\n\t" // Save x2, x3
231 "stp x18, x30, [sp, #32]\n\t" // Save x18(xSELF), xLR
232
233 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
234 "sub sp, sp, #48\n\t"
235 "str %[arg0], [sp]\n\t"
236 "str %[arg1], [sp, #8]\n\t"
237 "str %[arg2], [sp, #16]\n\t"
238 "str %[code], [sp, #24]\n\t"
239 "str %[self], [sp, #32]\n\t"
240 "ldr x0, [sp]\n\t"
241 "ldr x1, [sp, #8]\n\t"
242 "ldr x2, [sp, #16]\n\t"
243 "ldr x3, [sp, #24]\n\t"
244 "ldr x18, [sp, #32]\n\t"
245 "add sp, sp, #48\n\t"
246
247 "blr x3\n\t" // Call the stub
248 "ldp x1, x2, [sp, #8]\n\t" // Restore x1, x2
249 "ldp x3, x18, [sp, #24]\n\t" // Restore x3, xSELF
250 "ldr x30, [sp, #40]\n\t" // Restore xLR
251 "add sp, sp, #48\n\t" // Free stack space
252 ".cfi_adjust_cfa_offset -48\n\t"
253
254 "mov %[result], x0\n\t" // Save the result
255 : [result] "=r" (result)
256 // Use the result from r0
257 : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
258 [referrer] "r"(referrer)
259 : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
260#elif defined(__x86_64__)
261 // Note: Uses the native convention
262 // TODO: Set the thread?
263 __asm__ __volatile__(
264 "pushq %[referrer]\n\t" // Push referrer
265 "pushq (%%rsp)\n\t" // & 16B alignment padding
266 ".cfi_adjust_cfa_offset 16\n\t"
267 "call *%%rax\n\t" // Call the stub
268 "addq $16, %%rsp\n\t" // Pop nullptr and padding
269 ".cfi_adjust_cfa_offset -16\n\t"
270 : "=a" (result)
271 // Use the result from rax
272 : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "m"(referrer)
273 // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
274 : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); // clobber all
275 // TODO: Should we clobber the other registers?
276#else
277 LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
278 result = 0;
279#endif
280 // Pop transition.
281 self->PopManagedStackFragment(fragment);
282 return result;
283 }
284
285 // Method with 32b arg0, 64b arg1
286 size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
287 mirror::ArtMethod* referrer) {
288#if defined(__x86_64__) || defined(__aarch64__)
289 // Just pass through.
290 return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
291#else
292 // Need to split up arguments.
293 uint32_t lower = static_cast<uint32_t>(arg1 & 0xFFFFFFFF);
294 uint32_t upper = static_cast<uint32_t>((arg1 >> 32) & 0xFFFFFFFF);
295
296 return Invoke3WithReferrer(arg0, lower, upper, code, self, referrer);
297#endif
298 }
299
300 // Method with 32b arg0, 32b arg1, 64b arg2
301 size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
302 Thread* self, mirror::ArtMethod* referrer) {
303#if defined(__x86_64__) || defined(__aarch64__)
304 // Just pass through.
305 return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
306#else
307 // TODO: Needs 4-param invoke.
308 return 0;
309#endif
310 }
Andreas Gampe525cde22014-04-22 15:44:50 -0700311};
312
313
314#if defined(__i386__) || defined(__x86_64__)
315extern "C" void art_quick_memcpy(void);
316#endif
317
318TEST_F(StubTest, Memcpy) {
319#if defined(__i386__) || defined(__x86_64__)
320 Thread* self = Thread::Current();
321
322 uint32_t orig[20];
323 uint32_t trg[20];
324 for (size_t i = 0; i < 20; ++i) {
325 orig[i] = i;
326 trg[i] = 0;
327 }
328
329 Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
330 10 * sizeof(uint32_t), reinterpret_cast<uintptr_t>(&art_quick_memcpy), self);
331
332 EXPECT_EQ(orig[0], trg[0]);
333
334 for (size_t i = 1; i < 4; ++i) {
335 EXPECT_NE(orig[i], trg[i]);
336 }
337
338 for (size_t i = 4; i < 14; ++i) {
339 EXPECT_EQ(orig[i], trg[i]);
340 }
341
342 for (size_t i = 14; i < 20; ++i) {
343 EXPECT_NE(orig[i], trg[i]);
344 }
345
346 // TODO: Test overlapping?
347
348#else
349 LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
350 // Force-print to std::cout so it's also outside the logcat.
351 std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
352#endif
353}
354
Alexei Zavjalov80c79342014-05-02 16:45:40 +0700355#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700356extern "C" void art_quick_lock_object(void);
357#endif
358
359TEST_F(StubTest, LockObject) {
Alexei Zavjalov80c79342014-05-02 16:45:40 +0700360#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700361 static constexpr size_t kThinLockLoops = 100;
362
Andreas Gampe525cde22014-04-22 15:44:50 -0700363 Thread* self = Thread::Current();
364 // Create an object
365 ScopedObjectAccess soa(self);
366 // garbage is created during ClassLinker::Init
367
368 SirtRef<mirror::String> obj(soa.Self(),
369 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
370 LockWord lock = obj->GetLockWord(false);
371 LockWord::LockState old_state = lock.GetState();
372 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
373
374 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
375 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
376
377 LockWord lock_after = obj->GetLockWord(false);
378 LockWord::LockState new_state = lock_after.GetState();
379 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700380 EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
381
382 for (size_t i = 1; i < kThinLockLoops; ++i) {
383 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
384 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
385
386 // Check we're at lock count i
387
388 LockWord l_inc = obj->GetLockWord(false);
389 LockWord::LockState l_inc_state = l_inc.GetState();
390 EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
391 EXPECT_EQ(l_inc.ThinLockCount(), i);
392 }
393
394 // TODO: Improve this test. Somehow force it to go to fat locked. But that needs another thread.
395
396#else
397 LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
398 // Force-print to std::cout so it's also outside the logcat.
399 std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
400#endif
401}
402
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700403
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700404class RandGen {
405 public:
406 explicit RandGen(uint32_t seed) : val_(seed) {}
407
408 uint32_t next() {
409 val_ = val_ * 48271 % 2147483647 + 13;
410 return val_;
411 }
412
413 uint32_t val_;
414};
415
416
417#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
418extern "C" void art_quick_lock_object(void);
419extern "C" void art_quick_unlock_object(void);
420#endif
421
422TEST_F(StubTest, UnlockObject) {
423#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700424 static constexpr size_t kThinLockLoops = 100;
425
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700426 Thread* self = Thread::Current();
427 // Create an object
428 ScopedObjectAccess soa(self);
429 // garbage is created during ClassLinker::Init
430
431 SirtRef<mirror::String> obj(soa.Self(),
432 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
433 LockWord lock = obj->GetLockWord(false);
434 LockWord::LockState old_state = lock.GetState();
435 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
436
437 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
438 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
439
440 // This should be an illegal monitor state.
441 EXPECT_TRUE(self->IsExceptionPending());
442 self->ClearException();
443
444 LockWord lock_after = obj->GetLockWord(false);
445 LockWord::LockState new_state = lock_after.GetState();
446 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
Andreas Gampe525cde22014-04-22 15:44:50 -0700447
448 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
449 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
450
451 LockWord lock_after2 = obj->GetLockWord(false);
452 LockWord::LockState new_state2 = lock_after2.GetState();
453 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
454
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700455 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
456 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
457
458 LockWord lock_after3 = obj->GetLockWord(false);
459 LockWord::LockState new_state3 = lock_after3.GetState();
460 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
461
462 // Stress test:
463 // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
464 // each step.
465
466 RandGen r(0x1234);
467
468 constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
469 constexpr size_t kIterations = 10000; // Number of iterations
470
471 size_t counts[kNumberOfLocks];
472 SirtRef<mirror::String>* objects[kNumberOfLocks];
473
474 // Initialize = allocate.
475 for (size_t i = 0; i < kNumberOfLocks; ++i) {
476 counts[i] = 0;
477 objects[i] = new SirtRef<mirror::String>(soa.Self(),
478 mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
479 }
480
481 for (size_t i = 0; i < kIterations; ++i) {
482 // Select which lock to update.
483 size_t index = r.next() % kNumberOfLocks;
484
485 bool lock; // Whether to lock or unlock in this step.
486 if (counts[index] == 0) {
487 lock = true;
488 } else if (counts[index] == kThinLockLoops) {
489 lock = false;
490 } else {
491 // Randomly.
492 lock = r.next() % 2 == 0;
493 }
494
495 if (lock) {
496 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
497 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
498 counts[index]++;
499 } else {
500 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
501 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
502 counts[index]--;
503 }
504
505 EXPECT_FALSE(self->IsExceptionPending());
506
507 // Check the new state.
508 LockWord lock_iter = objects[index]->get()->GetLockWord(false);
509 LockWord::LockState iter_state = lock_iter.GetState();
510 if (counts[index] > 0) {
511 EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
512 EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
513 } else {
514 EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
515 }
516 }
517
518 // Unlock the remaining count times and then check it's unlocked. Then deallocate.
519 // Go reverse order to correctly handle SirtRefs.
520 for (size_t i = 0; i < kNumberOfLocks; ++i) {
521 size_t index = kNumberOfLocks - 1 - i;
522 size_t count = counts[index];
523 while (count > 0) {
524 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
525 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
526
527 count--;
528 }
529
530 LockWord lock_after4 = objects[index]->get()->GetLockWord(false);
531 LockWord::LockState new_state4 = lock_after4.GetState();
532 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state4);
533
534 delete objects[index];
535 }
536
Andreas Gampe525cde22014-04-22 15:44:50 -0700537 // TODO: Improve this test. Somehow force it to go to fat locked. But that needs another thread.
538
539#else
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700540 LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
Andreas Gampe525cde22014-04-22 15:44:50 -0700541 // Force-print to std::cout so it's also outside the logcat.
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700542 std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
Andreas Gampe525cde22014-04-22 15:44:50 -0700543#endif
544}
545
546
547#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
548extern "C" void art_quick_check_cast(void);
549#endif
550
551TEST_F(StubTest, CheckCast) {
552#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
553 Thread* self = Thread::Current();
554 // Find some classes.
555 ScopedObjectAccess soa(self);
556 // garbage is created during ClassLinker::Init
557
558 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
559 "[Ljava/lang/Object;"));
560 SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
561 "[Ljava/lang/String;"));
562
563 EXPECT_FALSE(self->IsExceptionPending());
564
565 Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c.get()), 0U,
566 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
567
568 EXPECT_FALSE(self->IsExceptionPending());
569
570 Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c2.get()), 0U,
571 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
572
573 EXPECT_FALSE(self->IsExceptionPending());
574
575 Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c2.get()), 0U,
576 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
577
578 EXPECT_FALSE(self->IsExceptionPending());
579
580 // TODO: Make the following work. But that would require correct managed frames.
581
582 Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c.get()), 0U,
583 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
584
585 EXPECT_TRUE(self->IsExceptionPending());
586 self->ClearException();
587
588#else
589 LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
590 // Force-print to std::cout so it's also outside the logcat.
591 std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
592#endif
593}
594
595
Andreas Gampef4e910b2014-04-29 16:55:52 -0700596#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700597extern "C" void art_quick_aput_obj_with_null_and_bound_check(void);
598// Do not check non-checked ones, we'd need handlers and stuff...
599#endif
600
601TEST_F(StubTest, APutObj) {
Hiroshi Yamauchid6881ae2014-04-28 17:21:48 -0700602 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
603
Andreas Gampef4e910b2014-04-29 16:55:52 -0700604#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700605 Thread* self = Thread::Current();
606 // Create an object
607 ScopedObjectAccess soa(self);
608 // garbage is created during ClassLinker::Init
609
610 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
611 "Ljava/lang/Object;"));
612 SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
613 "Ljava/lang/String;"));
614 SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
615 "[Ljava/lang/String;"));
616
617 // Build a string array of size 1
618 SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
Andreas Gampef4e910b2014-04-29 16:55:52 -0700619 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.get(), 10));
Andreas Gampe525cde22014-04-22 15:44:50 -0700620
621 // Build a string -> should be assignable
622 SirtRef<mirror::Object> str_obj(soa.Self(),
623 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
624
625 // Build a generic object -> should fail assigning
626 SirtRef<mirror::Object> obj_obj(soa.Self(), c->AllocObject(soa.Self()));
627
628 // Play with it...
629
630 // 1) Success cases
Andreas Gampef4e910b2014-04-29 16:55:52 -0700631 // 1.1) Assign str_obj to array[0..3]
Andreas Gampe525cde22014-04-22 15:44:50 -0700632
633 EXPECT_FALSE(self->IsExceptionPending());
634
635 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(str_obj.get()),
636 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
637
638 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampef4e910b2014-04-29 16:55:52 -0700639 EXPECT_EQ(str_obj.get(), array->Get(0));
Andreas Gampe525cde22014-04-22 15:44:50 -0700640
Andreas Gampef4e910b2014-04-29 16:55:52 -0700641 Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(str_obj.get()),
642 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
643
644 EXPECT_FALSE(self->IsExceptionPending());
645 EXPECT_EQ(str_obj.get(), array->Get(1));
646
647 Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(str_obj.get()),
648 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
649
650 EXPECT_FALSE(self->IsExceptionPending());
651 EXPECT_EQ(str_obj.get(), array->Get(2));
652
653 Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(str_obj.get()),
654 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
655
656 EXPECT_FALSE(self->IsExceptionPending());
657 EXPECT_EQ(str_obj.get(), array->Get(3));
658
659 // 1.2) Assign null to array[0..3]
Andreas Gampe525cde22014-04-22 15:44:50 -0700660
661 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(nullptr),
662 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
663
664 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampef4e910b2014-04-29 16:55:52 -0700665 EXPECT_EQ(nullptr, array->Get(0));
666
667 Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(nullptr),
668 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
669
670 EXPECT_FALSE(self->IsExceptionPending());
671 EXPECT_EQ(nullptr, array->Get(1));
672
673 Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(nullptr),
674 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
675
676 EXPECT_FALSE(self->IsExceptionPending());
677 EXPECT_EQ(nullptr, array->Get(2));
678
679 Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(nullptr),
680 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
681
682 EXPECT_FALSE(self->IsExceptionPending());
683 EXPECT_EQ(nullptr, array->Get(3));
Andreas Gampe525cde22014-04-22 15:44:50 -0700684
685 // TODO: Check _which_ exception is thrown. Then make 3) check that it's the right check order.
686
687 // 2) Failure cases (str into str[])
688 // 2.1) Array = null
689 // TODO: Throwing NPE needs actual DEX code
690
691// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.get()),
692// reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
693//
694// EXPECT_TRUE(self->IsExceptionPending());
695// self->ClearException();
696
697 // 2.2) Index < 0
698
699 Invoke3(reinterpret_cast<size_t>(array.get()), static_cast<size_t>(-1),
700 reinterpret_cast<size_t>(str_obj.get()),
701 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
702
703 EXPECT_TRUE(self->IsExceptionPending());
704 self->ClearException();
705
706 // 2.3) Index > 0
707
Andreas Gampef4e910b2014-04-29 16:55:52 -0700708 Invoke3(reinterpret_cast<size_t>(array.get()), 10U, reinterpret_cast<size_t>(str_obj.get()),
Andreas Gampe525cde22014-04-22 15:44:50 -0700709 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
710
711 EXPECT_TRUE(self->IsExceptionPending());
712 self->ClearException();
713
714 // 3) Failure cases (obj into str[])
715
716 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(obj_obj.get()),
717 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
718
719 EXPECT_TRUE(self->IsExceptionPending());
720 self->ClearException();
721
722 // Tests done.
723#else
724 LOG(INFO) << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA;
725 // Force-print to std::cout so it's also outside the logcat.
726 std::cout << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA << std::endl;
727#endif
728}
729
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700730
731#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
732extern "C" void art_quick_alloc_object_rosalloc(void);
733extern "C" void art_quick_alloc_object_resolved_rosalloc(void);
734extern "C" void art_quick_alloc_object_initialized_rosalloc(void);
735#endif
736
737TEST_F(StubTest, AllocObject) {
738 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
739
740#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
741 // TODO: Check the "Unresolved" allocation stubs
742
743 Thread* self = Thread::Current();
744 // Create an object
745 ScopedObjectAccess soa(self);
746 // garbage is created during ClassLinker::Init
747
748 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
749 "Ljava/lang/Object;"));
750
751 // Play with it...
752
753 EXPECT_FALSE(self->IsExceptionPending());
754
755 {
756 // Use an arbitrary method from c to use as referrer
757 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
758 reinterpret_cast<size_t>(c->GetVirtualMethod(0)), // arbitrary
759 0U,
760 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_rosalloc),
761 self);
762
763 EXPECT_FALSE(self->IsExceptionPending());
764 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
765 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
766 EXPECT_EQ(c.get(), obj->GetClass());
767 VerifyObject(obj);
768 }
769
770 {
771 // We can use nullptr in the second argument as we do not need a method here (not used in
772 // resolved/initialized cases)
773 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
774 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_resolved_rosalloc),
775 self);
776
777 EXPECT_FALSE(self->IsExceptionPending());
778 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
779 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
780 EXPECT_EQ(c.get(), obj->GetClass());
781 VerifyObject(obj);
782 }
783
784 {
785 // We can use nullptr in the second argument as we do not need a method here (not used in
786 // resolved/initialized cases)
787 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
788 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_initialized_rosalloc),
789 self);
790
791 EXPECT_FALSE(self->IsExceptionPending());
792 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
793 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
794 EXPECT_EQ(c.get(), obj->GetClass());
795 VerifyObject(obj);
796 }
797
798 // Failure tests.
799
800 // Out-of-memory.
801 {
802 Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
803
804 // Array helps to fill memory faster.
805 SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
806 "[Ljava/lang/Object;"));
807 std::vector<SirtRef<mirror::Object>*> sirt_refs;
808 // Start allocating with 128K
809 size_t length = 128 * KB / 4;
810 while (length > 10) {
811 SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
812 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
813 ca.get(),
814 length/4));
815 if (self->IsExceptionPending() || ref->get() == nullptr) {
816 self->ClearException();
817 delete ref;
818
819 // Try a smaller length
820 length = length / 8;
821 // Use at most half the reported free space.
822 size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
823 if (length * 8 > mem) {
824 length = mem / 8;
825 }
826 } else {
827 sirt_refs.push_back(ref);
828 }
829 }
830 LOG(DEBUG) << "Used " << sirt_refs.size() << " arrays to fill space.";
831
832 // Allocate simple objects till it fails.
833 while (!self->IsExceptionPending()) {
834 SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
835 c->AllocObject(soa.Self()));
836 if (!self->IsExceptionPending() && ref->get() != nullptr) {
837 sirt_refs.push_back(ref);
838 } else {
839 delete ref;
840 }
841 }
842 self->ClearException();
843
844 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
845 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_initialized_rosalloc),
846 self);
847
848 EXPECT_TRUE(self->IsExceptionPending());
849 self->ClearException();
850 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
851
852 // Release all the allocated objects.
853 // Need to go backward to release SirtRef in the right order.
854 auto it = sirt_refs.rbegin();
855 auto end = sirt_refs.rend();
856 for (; it != end; ++it) {
857 delete *it;
858 }
859 }
860
861 // Tests done.
862#else
863 LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
864 // Force-print to std::cout so it's also outside the logcat.
865 std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
866#endif
867}
868
869
870#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
871extern "C" void art_quick_alloc_array_rosalloc(void);
872extern "C" void art_quick_alloc_array_resolved_rosalloc(void);
873#endif
874
875TEST_F(StubTest, AllocObjectArray) {
876 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
877
878#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
879 // TODO: Check the "Unresolved" allocation stubs
880
881 Thread* self = Thread::Current();
882 // Create an object
883 ScopedObjectAccess soa(self);
884 // garbage is created during ClassLinker::Init
885
886 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
887 "[Ljava/lang/Object;"));
888
889 // Needed to have a linked method.
890 SirtRef<mirror::Class> c_obj(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
891 "Ljava/lang/Object;"));
892
893 // Play with it...
894
895 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700896
897 // For some reason this does not work, as the type_idx is artificial and outside what the
898 // resolved types of c_obj allow...
899
900 if (false) {
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700901 // Use an arbitrary method from c to use as referrer
902 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
903 reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
904 10U,
905 reinterpret_cast<uintptr_t>(&art_quick_alloc_array_rosalloc),
906 self);
907
908 EXPECT_FALSE(self->IsExceptionPending());
909 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
910 mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
911 EXPECT_EQ(c.get(), obj->GetClass());
912 VerifyObject(obj);
913 EXPECT_EQ(obj->GetLength(), 10);
914 }
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700915
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700916 {
917 // We can use nullptr in the second argument as we do not need a method here (not used in
918 // resolved/initialized cases)
919 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 10U,
920 reinterpret_cast<uintptr_t>(&art_quick_alloc_array_resolved_rosalloc),
921 self);
922
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700923 EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700924 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
925 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
926 EXPECT_TRUE(obj->IsArrayInstance());
927 EXPECT_TRUE(obj->IsObjectArray());
928 EXPECT_EQ(c.get(), obj->GetClass());
929 VerifyObject(obj);
930 mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
931 EXPECT_EQ(array->GetLength(), 10);
932 }
933
934 // Failure tests.
935
936 // Out-of-memory.
937 {
938 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr),
939 GB, // that should fail...
940 reinterpret_cast<uintptr_t>(&art_quick_alloc_array_resolved_rosalloc),
941 self);
942
943 EXPECT_TRUE(self->IsExceptionPending());
944 self->ClearException();
945 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
946 }
947
948 // Tests done.
949#else
950 LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
951 // Force-print to std::cout so it's also outside the logcat.
952 std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
953#endif
954}
955
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700956
Andreas Gampe266340d2014-05-02 07:55:24 -0700957#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700958extern "C" void art_quick_string_compareto(void);
959#endif
960
961TEST_F(StubTest, StringCompareTo) {
962 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
963
Andreas Gampe266340d2014-05-02 07:55:24 -0700964#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700965 // TODO: Check the "Unresolved" allocation stubs
966
967 Thread* self = Thread::Current();
968 ScopedObjectAccess soa(self);
969 // garbage is created during ClassLinker::Init
970
971 // Create some strings
972 // Use array so we can index into it and use a matrix for expected results
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700973 // Setup: The first half is standard. The second half uses a non-zero offset.
974 // TODO: Shared backing arrays.
975 constexpr size_t base_string_count = 7;
976 const char* c[base_string_count] = { "", "", "a", "aa", "ab", "aac", "aac" , };
977
978 constexpr size_t string_count = 2 * base_string_count;
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700979
980 SirtRef<mirror::String>* s[string_count];
981
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700982 for (size_t i = 0; i < base_string_count; ++i) {
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700983 s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
984 c[i]));
985 }
986
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700987 RandGen r(0x1234);
988
989 for (size_t i = base_string_count; i < string_count; ++i) {
990 s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
991 c[i - base_string_count]));
992 int32_t length = s[i]->get()->GetLength();
993 if (length > 1) {
994 // Set a random offset and length.
995 int32_t new_offset = 1 + (r.next() % (length - 1));
996 int32_t rest = length - new_offset - 1;
997 int32_t new_length = 1 + (rest > 0 ? r.next() % rest : 0);
998
999 s[i]->get()->SetField32<false>(mirror::String::CountOffset(), new_length);
1000 s[i]->get()->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
1001 }
1002 }
1003
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001004 // TODO: wide characters
1005
1006 // Matrix of expectations. First component is first parameter. Note we only check against the
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -07001007 // sign, not the value. As we are testing random offsets, we need to compute this and need to
1008 // rely on String::CompareTo being correct.
1009 int32_t expected[string_count][string_count];
1010 for (size_t x = 0; x < string_count; ++x) {
1011 for (size_t y = 0; y < string_count; ++y) {
1012 expected[x][y] = s[x]->get()->CompareTo(s[y]->get());
1013 }
1014 }
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001015
1016 // Play with it...
1017
1018 for (size_t x = 0; x < string_count; ++x) {
1019 for (size_t y = 0; y < string_count; ++y) {
1020 // Test string_compareto x y
1021 size_t result = Invoke3(reinterpret_cast<size_t>(s[x]->get()),
1022 reinterpret_cast<size_t>(s[y]->get()), 0U,
1023 reinterpret_cast<uintptr_t>(&art_quick_string_compareto), self);
1024
1025 EXPECT_FALSE(self->IsExceptionPending());
1026
1027 // The result is a 32b signed integer
1028 union {
1029 size_t r;
1030 int32_t i;
1031 } conv;
1032 conv.r = result;
1033 int32_t e = expected[x][y];
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -07001034 EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1035 conv.r;
1036 EXPECT_TRUE(e < 0 ? conv.i < 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1037 conv.r;
1038 EXPECT_TRUE(e > 0 ? conv.i > 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1039 conv.r;
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001040 }
1041 }
1042
Andreas Gampe7177d7c2014-05-02 12:10:02 -07001043 // TODO: Deallocate things.
1044
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001045 // Tests done.
1046#else
1047 LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1048 // Force-print to std::cout so it's also outside the logcat.
1049 std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1050 std::endl;
1051#endif
1052}
1053
Andreas Gampe6e4e59c2014-05-05 20:11:02 -07001054
1055#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1056extern "C" void art_quick_set32_static(void);
1057extern "C" void art_quick_get32_static(void);
1058#endif
1059
1060static void GetSet32Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1061 mirror::ArtMethod* referrer, StubTest* test)
1062 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1063#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1064 constexpr size_t num_values = 7;
1065 uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1066
1067 for (size_t i = 0; i < num_values; ++i) {
1068 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1069 static_cast<size_t>(values[i]),
1070 0U,
1071 reinterpret_cast<uintptr_t>(&art_quick_set32_static),
1072 self,
1073 referrer);
1074
1075 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1076 0U, 0U,
1077 reinterpret_cast<uintptr_t>(&art_quick_get32_static),
1078 self,
1079 referrer);
1080
1081 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1082 }
1083#else
1084 LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1085 // Force-print to std::cout so it's also outside the logcat.
1086 std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1087#endif
1088}
1089
1090
1091#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1092extern "C" void art_quick_set32_instance(void);
1093extern "C" void art_quick_get32_instance(void);
1094#endif
1095
1096static void GetSet32Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1097 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1098 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1099#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1100 constexpr size_t num_values = 7;
1101 uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1102
1103 for (size_t i = 0; i < num_values; ++i) {
1104 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1105 reinterpret_cast<size_t>(obj->get()),
1106 static_cast<size_t>(values[i]),
1107 reinterpret_cast<uintptr_t>(&art_quick_set32_instance),
1108 self,
1109 referrer);
1110
1111 int32_t res = f->get()->GetInt(obj->get());
1112 EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1113
1114 res++;
1115 f->get()->SetInt<false>(obj->get(), res);
1116
1117 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1118 reinterpret_cast<size_t>(obj->get()),
1119 0U,
1120 reinterpret_cast<uintptr_t>(&art_quick_get32_instance),
1121 self,
1122 referrer);
1123 EXPECT_EQ(res, static_cast<int32_t>(res2));
1124 }
1125#else
1126 LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1127 // Force-print to std::cout so it's also outside the logcat.
1128 std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1129#endif
1130}
1131
1132
1133#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1134extern "C" void art_quick_set_obj_static(void);
1135extern "C" void art_quick_get_obj_static(void);
1136
1137static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1138 mirror::ArtMethod* referrer, StubTest* test)
1139 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1140 test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1141 reinterpret_cast<size_t>(val),
1142 0U,
1143 reinterpret_cast<uintptr_t>(&art_quick_set_obj_static),
1144 self,
1145 referrer);
1146
1147 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1148 0U, 0U,
1149 reinterpret_cast<uintptr_t>(&art_quick_get_obj_static),
1150 self,
1151 referrer);
1152
1153 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1154}
1155#endif
1156
1157static void GetSetObjStatic(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1158 mirror::ArtMethod* referrer, StubTest* test)
1159 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1160#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1161 set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1162
1163 // Allocate a string object for simplicity.
1164 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1165 set_and_check_static((*f)->GetDexFieldIndex(), str, self, referrer, test);
1166
1167 set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1168#else
1169 LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1170 // Force-print to std::cout so it's also outside the logcat.
1171 std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1172#endif
1173}
1174
1175
1176#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1177extern "C" void art_quick_set_obj_instance(void);
1178extern "C" void art_quick_get_obj_instance(void);
1179
1180static void set_and_check_instance(SirtRef<mirror::ArtField>* f, mirror::Object* trg,
1181 mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
1182 StubTest* test)
1183 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1184 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1185 reinterpret_cast<size_t>(trg),
1186 reinterpret_cast<size_t>(val),
1187 reinterpret_cast<uintptr_t>(&art_quick_set_obj_instance),
1188 self,
1189 referrer);
1190
1191 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1192 reinterpret_cast<size_t>(trg),
1193 0U,
1194 reinterpret_cast<uintptr_t>(&art_quick_get_obj_instance),
1195 self,
1196 referrer);
1197
1198 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1199
1200 EXPECT_EQ(val, f->get()->GetObj(trg));
1201}
1202#endif
1203
1204static void GetSetObjInstance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1205 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1206 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1207#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1208 set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
1209
1210 // Allocate a string object for simplicity.
1211 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1212 set_and_check_instance(f, obj->get(), str, self, referrer, test);
1213
1214 set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
1215#else
1216 LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1217 // Force-print to std::cout so it's also outside the logcat.
1218 std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1219#endif
1220}
1221
1222
1223// TODO: Complete these tests for 32b architectures.
1224
1225#if defined(__x86_64__) || defined(__aarch64__)
1226extern "C" void art_quick_set64_static(void);
1227extern "C" void art_quick_get64_static(void);
1228#endif
1229
1230static void GetSet64Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1231 mirror::ArtMethod* referrer, StubTest* test)
1232 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1233#if defined(__x86_64__) || defined(__aarch64__)
1234 constexpr size_t num_values = 8;
1235 uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1236
1237 for (size_t i = 0; i < num_values; ++i) {
1238 test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1239 values[i],
1240 reinterpret_cast<uintptr_t>(&art_quick_set64_static),
1241 self,
1242 referrer);
1243
1244 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1245 0U, 0U,
1246 reinterpret_cast<uintptr_t>(&art_quick_get64_static),
1247 self,
1248 referrer);
1249
1250 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1251 }
1252#else
1253 LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1254 // Force-print to std::cout so it's also outside the logcat.
1255 std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1256#endif
1257}
1258
1259
1260#if defined(__x86_64__) || defined(__aarch64__)
1261extern "C" void art_quick_set64_instance(void);
1262extern "C" void art_quick_get64_instance(void);
1263#endif
1264
1265static void GetSet64Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1266 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1267 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1268#if defined(__x86_64__) || defined(__aarch64__)
1269 constexpr size_t num_values = 8;
1270 uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1271
1272 for (size_t i = 0; i < num_values; ++i) {
1273 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1274 reinterpret_cast<size_t>(obj->get()),
1275 static_cast<size_t>(values[i]),
1276 reinterpret_cast<uintptr_t>(&art_quick_set64_instance),
1277 self,
1278 referrer);
1279
1280 int64_t res = f->get()->GetLong(obj->get());
1281 EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1282
1283 res++;
1284 f->get()->SetLong<false>(obj->get(), res);
1285
1286 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1287 reinterpret_cast<size_t>(obj->get()),
1288 0U,
1289 reinterpret_cast<uintptr_t>(&art_quick_get64_instance),
1290 self,
1291 referrer);
1292 EXPECT_EQ(res, static_cast<int64_t>(res2));
1293 }
1294#else
1295 LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1296 // Force-print to std::cout so it's also outside the logcat.
1297 std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1298#endif
1299}
1300
1301static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1302 // garbage is created during ClassLinker::Init
1303
1304 JNIEnv* env = Thread::Current()->GetJniEnv();
1305 jclass jc = env->FindClass("AllFields");
1306 CHECK(jc != NULL);
1307 jobject o = env->AllocObject(jc);
1308 CHECK(o != NULL);
1309
1310 ScopedObjectAccess soa(self);
1311 SirtRef<mirror::Object> obj(self, soa.Decode<mirror::Object*>(o));
1312
1313 SirtRef<mirror::Class> c(self, obj->GetClass());
1314
1315 // Need a method as a referrer
1316 SirtRef<mirror::ArtMethod> m(self, c->GetDirectMethod(0));
1317
1318 // Play with it...
1319
1320 // Static fields.
1321 {
1322 SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetSFields());
1323 int32_t num_fields = fields->GetLength();
1324 for (int32_t i = 0; i < num_fields; ++i) {
1325 SirtRef<mirror::ArtField> f(self, fields->Get(i));
1326
1327 FieldHelper fh(f.get());
1328 Primitive::Type type = fh.GetTypeAsPrimitiveType();
1329 switch (type) {
1330 case Primitive::Type::kPrimInt:
1331 if (test_type == type) {
1332 GetSet32Static(&obj, &f, self, m.get(), test);
1333 }
1334 break;
1335
1336 case Primitive::Type::kPrimLong:
1337 if (test_type == type) {
1338 GetSet64Static(&obj, &f, self, m.get(), test);
1339 }
1340 break;
1341
1342 case Primitive::Type::kPrimNot:
1343 // Don't try array.
1344 if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
1345 GetSetObjStatic(&obj, &f, self, m.get(), test);
1346 }
1347 break;
1348
1349 default:
1350 break; // Skip.
1351 }
1352 }
1353 }
1354
1355 // Instance fields.
1356 {
1357 SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetIFields());
1358 int32_t num_fields = fields->GetLength();
1359 for (int32_t i = 0; i < num_fields; ++i) {
1360 SirtRef<mirror::ArtField> f(self, fields->Get(i));
1361
1362 FieldHelper fh(f.get());
1363 Primitive::Type type = fh.GetTypeAsPrimitiveType();
1364 switch (type) {
1365 case Primitive::Type::kPrimInt:
1366 if (test_type == type) {
1367 GetSet32Instance(&obj, &f, self, m.get(), test);
1368 }
1369 break;
1370
1371 case Primitive::Type::kPrimLong:
1372 if (test_type == type) {
1373 GetSet64Instance(&obj, &f, self, m.get(), test);
1374 }
1375 break;
1376
1377 case Primitive::Type::kPrimNot:
1378 // Don't try array.
1379 if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
1380 GetSetObjInstance(&obj, &f, self, m.get(), test);
1381 }
1382 break;
1383
1384 default:
1385 break; // Skip.
1386 }
1387 }
1388 }
1389
1390 // TODO: Deallocate things.
1391}
1392
1393
1394TEST_F(StubTest, Fields32) {
1395 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1396
1397 Thread* self = Thread::Current();
1398
1399 self->TransitionFromSuspendedToRunnable();
1400 LoadDex("AllFields");
1401 bool started = runtime_->Start();
1402 CHECK(started);
1403
1404 TestFields(self, this, Primitive::Type::kPrimInt);
1405}
1406
1407TEST_F(StubTest, FieldsObj) {
1408 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1409
1410 Thread* self = Thread::Current();
1411
1412 self->TransitionFromSuspendedToRunnable();
1413 LoadDex("AllFields");
1414 bool started = runtime_->Start();
1415 CHECK(started);
1416
1417 TestFields(self, this, Primitive::Type::kPrimNot);
1418}
1419
1420TEST_F(StubTest, Fields64) {
1421 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1422
1423 Thread* self = Thread::Current();
1424
1425 self->TransitionFromSuspendedToRunnable();
1426 LoadDex("AllFields");
1427 bool started = runtime_->Start();
1428 CHECK(started);
1429
1430 TestFields(self, this, Primitive::Type::kPrimLong);
1431}
1432
Andreas Gampe525cde22014-04-22 15:44:50 -07001433} // namespace art