blob: 7d8b584fc90cd1a2746e7d8ac4787a414563962d [file] [log] [blame]
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
18#define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
Mathieu Chartierd8195f12012-10-05 12:21:28 -070019
Brian Carlstroma2806552014-02-27 12:29:32 -080020#include <algorithm>
Mathieu Chartierd8195f12012-10-05 12:21:28 -070021#include <string>
22
Ian Rogersef7d42f2014-01-06 12:55:46 -080023#include "atomic.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080024#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080025#include "base/macros.h"
Ian Rogers507dfdd2014-05-15 16:42:40 -070026#include "UniquePtrCompat.h"
Mathieu Chartierd8195f12012-10-05 12:21:28 -070027#include "mem_map.h"
28#include "utils.h"
29
30namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070031namespace gc {
32namespace accounting {
Mathieu Chartierd8195f12012-10-05 12:21:28 -070033
34template <typename T>
35class AtomicStack {
36 public:
37 // Capacity is how many elements we can store in the stack.
38 static AtomicStack* Create(const std::string& name, size_t capacity) {
39 UniquePtr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
40 mark_stack->Init();
41 return mark_stack.release();
42 }
43
Ian Rogers1d54e732013-05-02 21:10:01 -070044 ~AtomicStack() {}
Mathieu Chartierd8195f12012-10-05 12:21:28 -070045
46 void Reset() {
47 DCHECK(mem_map_.get() != NULL);
48 DCHECK(begin_ != NULL);
49 front_index_ = 0;
50 back_index_ = 0;
Mathieu Chartierf082d3c2013-07-29 17:04:07 -070051 debug_is_sorted_ = true;
Mathieu Chartierd8195f12012-10-05 12:21:28 -070052 int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
53 if (result == -1) {
54 PLOG(WARNING) << "madvise failed";
55 }
56 }
57
58 // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
59
60 // Returns false if we overflowed the stack.
61 bool AtomicPushBack(const T& value) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -070062 if (kIsDebugBuild) {
63 debug_is_sorted_ = false;
64 }
Ian Rogers56edc432013-01-18 16:51:51 -080065 int32_t index;
66 do {
67 index = back_index_;
68 if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
69 // Stack overflow.
70 return false;
71 }
Ian Rogersb122a4b2013-11-19 18:00:50 -080072 } while (!back_index_.CompareAndSwap(index, index + 1));
Mathieu Chartierd8195f12012-10-05 12:21:28 -070073 begin_[index] = value;
74 return true;
75 }
76
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -080077 // Atomically bump the back index by the given number of
78 // slots. Returns false if we overflowed the stack.
79 bool AtomicBumpBack(size_t num_slots, T** start_address, T** end_address) {
80 if (kIsDebugBuild) {
81 debug_is_sorted_ = false;
82 }
83 int32_t index;
84 int32_t new_index;
85 do {
86 index = back_index_;
87 new_index = index + num_slots;
88 if (UNLIKELY(static_cast<size_t>(new_index) >= capacity_)) {
89 // Stack overflow.
90 return false;
91 }
92 } while (!back_index_.CompareAndSwap(index, new_index));
93 *start_address = &begin_[index];
94 *end_address = &begin_[new_index];
95 if (kIsDebugBuild) {
96 // Sanity check that the memory is zero.
97 for (int32_t i = index; i < new_index; ++i) {
Brian Carlstroma2806552014-02-27 12:29:32 -080098 DCHECK_EQ(begin_[i], static_cast<T>(0))
99 << "i=" << i << " index=" << index << " new_index=" << new_index;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800100 }
101 }
102 return true;
103 }
104
105 void AssertAllZero() {
106 if (kIsDebugBuild) {
107 for (size_t i = 0; i < capacity_; ++i) {
108 DCHECK_EQ(begin_[i], static_cast<T>(0)) << "i=" << i;
109 }
110 }
111 }
112
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700113 void PushBack(const T& value) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700114 if (kIsDebugBuild) {
115 debug_is_sorted_ = false;
116 }
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700117 int32_t index = back_index_;
118 DCHECK_LT(static_cast<size_t>(index), capacity_);
119 back_index_ = index + 1;
120 begin_[index] = value;
121 }
122
123 T PopBack() {
124 DCHECK_GT(back_index_, front_index_);
125 // Decrement the back index non atomically.
126 back_index_ = back_index_ - 1;
127 return begin_[back_index_];
128 }
129
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700130 // Take an item from the front of the stack.
131 T PopFront() {
132 int32_t index = front_index_;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800133 DCHECK_LT(index, back_index_.Load());
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700134 front_index_ = front_index_ + 1;
135 return begin_[index];
136 }
137
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700138 // Pop a number of elements.
139 void PopBackCount(int32_t n) {
140 DCHECK_GE(Size(), static_cast<size_t>(n));
Ian Rogersb122a4b2013-11-19 18:00:50 -0800141 back_index_.FetchAndSub(n);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700142 }
143
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700144 bool IsEmpty() const {
145 return Size() == 0;
146 }
147
148 size_t Size() const {
149 DCHECK_LE(front_index_, back_index_);
150 return back_index_ - front_index_;
151 }
152
Ian Rogers1d54e732013-05-02 21:10:01 -0700153 T* Begin() const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700154 return const_cast<T*>(begin_ + front_index_);
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700155 }
156
Ian Rogers1d54e732013-05-02 21:10:01 -0700157 T* End() const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700158 return const_cast<T*>(begin_ + back_index_);
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700159 }
160
161 size_t Capacity() const {
162 return capacity_;
163 }
164
165 // Will clear the stack.
166 void Resize(size_t new_capacity) {
167 capacity_ = new_capacity;
168 Init();
169 }
170
Ian Rogers1d54e732013-05-02 21:10:01 -0700171 void Sort() {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800172 int32_t start_back_index = back_index_.Load();
173 int32_t start_front_index = front_index_.Load();
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700174 std::sort(Begin(), End());
Ian Rogersb122a4b2013-11-19 18:00:50 -0800175 CHECK_EQ(start_back_index, back_index_.Load());
176 CHECK_EQ(start_front_index, front_index_.Load());
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700177 if (kIsDebugBuild) {
178 debug_is_sorted_ = true;
Ian Rogers1d54e732013-05-02 21:10:01 -0700179 }
180 }
181
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700182 bool ContainsSorted(const T& value) const {
183 DCHECK(debug_is_sorted_);
184 return std::binary_search(Begin(), End(), value);
185 }
186
Ian Rogers1d54e732013-05-02 21:10:01 -0700187 bool Contains(const T& value) const {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700188 return std::find(Begin(), End(), value) != End();
Ian Rogers1d54e732013-05-02 21:10:01 -0700189 }
190
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700191 private:
192 AtomicStack(const std::string& name, const size_t capacity)
193 : name_(name),
194 back_index_(0),
195 front_index_(0),
196 begin_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700197 capacity_(capacity),
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700198 debug_is_sorted_(true) {
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700199 }
200
201 // Size in number of elements.
202 void Init() {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700203 std::string error_msg;
204 mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
Ian Rogersef7d42f2014-01-06 12:55:46 -0800205 PROT_READ | PROT_WRITE, false, &error_msg));
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700206 CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700207 byte* addr = mem_map_->Begin();
208 CHECK(addr != NULL);
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700209 debug_is_sorted_ = true;
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700210 begin_ = reinterpret_cast<T*>(addr);
211 Reset();
212 }
213
214 // Name of the mark stack.
215 std::string name_;
216
217 // Memory mapping of the atomic stack.
218 UniquePtr<MemMap> mem_map_;
219
220 // Back index (index after the last element pushed).
221 AtomicInteger back_index_;
222
223 // Front index, used for implementing PopFront.
224 AtomicInteger front_index_;
225
226 // Base of the atomic stack.
227 T* begin_;
228
229 // Maximum number of elements.
230 size_t capacity_;
231
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700232 // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
233 bool debug_is_sorted_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700234
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700235 DISALLOW_COPY_AND_ASSIGN(AtomicStack);
236};
237
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800238typedef AtomicStack<mirror::Object*> ObjectStack;
239
Ian Rogers1d54e732013-05-02 21:10:01 -0700240} // namespace accounting
241} // namespace gc
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700242} // namespace art
243
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700244#endif // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_