blob: c74fdedbf92276376069a2f767cec1aad34928dd [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "gc/collector/concurrent_copying.h"
23#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/reference.h"
26#include "runtime.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070027
28namespace art {
29
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080030template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070031inline MirrorType* ReadBarrier::Barrier(
32 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070033 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
34 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080035 // The higher bits of the rb ptr, rb_ptr_high_bits (must be zero)
36 // is used to create artificial data dependency from the is_gray
37 // load to the ref field (ptr) load to avoid needing a load-load
38 // barrier between the two.
39 uintptr_t rb_ptr_high_bits;
40 bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
41 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
42 rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
43 MirrorType* ref = ref_addr->AsMirrorPtr();
44 if (is_gray) {
45 // Slow-path.
46 ref = reinterpret_cast<MirrorType*>(Mark(ref));
47 }
48 if (kEnableReadBarrierInvariantChecks) {
49 CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
50 }
51 AssertToSpaceInvariant(obj, offset, ref);
52 return ref;
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070053 } else if (with_read_barrier && kUseBrooksReadBarrier) {
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070054 // To be implemented.
55 return ref_addr->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080056 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
57 MirrorType* ref = ref_addr->AsMirrorPtr();
58 MirrorType* old_ref = ref;
59 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
60 gc::Heap* heap = Runtime::Current()->GetHeap();
61 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
62 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
63 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
64 obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
65 offset, old_ref, ref);
66 }
67 AssertToSpaceInvariant(obj, offset, ref);
68 return ref;
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070069 } else {
70 // No read barrier.
71 return ref_addr->AsMirrorPtr();
72 }
73}
74
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080075template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070076inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) {
77 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070078 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
79 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080080 if (kMaybeDuringStartup && IsDuringStartup()) {
81 // During startup, the heap may not be initialized yet. Just
82 // return the given ref.
83 return ref;
84 }
85 // TODO: separate the read barrier code from the collector code more.
86 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
87 ref = reinterpret_cast<MirrorType*>(Mark(ref));
88 }
89 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070090 return ref;
91 } else if (with_read_barrier && kUseBrooksReadBarrier) {
92 // To be implemented.
93 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080094 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
95 if (kMaybeDuringStartup && IsDuringStartup()) {
96 // During startup, the heap may not be initialized yet. Just
97 // return the given ref.
98 return ref;
99 }
100 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
101 MirrorType* old_ref = ref;
102 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
103 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
104 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
105 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
106 }
107 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
108 return ref;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700109 } else {
110 return ref;
111 }
112}
113
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800114inline bool ReadBarrier::IsDuringStartup() {
115 gc::Heap* heap = Runtime::Current()->GetHeap();
116 if (heap == nullptr) {
117 // During startup, the heap can be null.
118 return true;
119 }
120 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
121 // CC isn't running.
122 return true;
123 }
124 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
125 if (collector == nullptr) {
126 // During startup, the collector can be null.
127 return true;
128 }
129 return false;
130}
131
132inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
133 mirror::Object* ref) {
134 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
135 if (ref == nullptr || IsDuringStartup()) {
136 return;
137 }
138 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
139 AssertToSpaceInvariant(obj, offset, ref);
140 }
141}
142
143inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
144 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj);
145}
146
147inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
148 uintptr_t* out_rb_ptr_high_bits) {
149 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
150 uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
151 uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
152 if (kEnableReadBarrierInvariantChecks) {
153 CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
154 rb_ptr_low_bits == black_ptr_)
155 << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj);
156 }
157 bool is_gray = rb_ptr_low_bits == gray_ptr_;
158 // The high bits are supposed to be zero. We check this on the caller side.
159 *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
160 return is_gray;
161}
162
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700163} // namespace art
164
165#endif // ART_RUNTIME_READ_BARRIER_INL_H_