blob: cd0b21a7f85e79b5cf9326261723a90ebd2496cc [file] [log] [blame]
Feng Xiao6ef984a2014-11-10 17:34:54 -08001// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31#include <google/protobuf/arena.h>
32
Feng Xiao99aa0f92014-11-20 16:18:53 -080033#ifdef ADDRESS_SANITIZER
34#include <sanitizer/asan_interface.h>
35#endif
36
Feng Xiao6ef984a2014-11-10 17:34:54 -080037namespace google {
38namespace protobuf {
39
40google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_;
Jisi Liu46e8ff62015-10-05 11:59:43 -070041#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
Bo Yangd6c9f642015-04-24 15:34:40 -070042Arena::ThreadCache& Arena::thread_cache() {
43 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
44 new internal::ThreadLocalStorage<ThreadCache>();
45 return *thread_cache_->Get();
46}
Jisi Liu46e8ff62015-10-05 11:59:43 -070047#elif defined(PROTOBUF_USE_DLLS)
48Arena::ThreadCache& Arena::thread_cache() {
49 static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
50 return thread_cache_;
51}
Feng Xiao8d5d7cc2014-12-09 17:05:10 -080052#else
Jisi Liu0a25cfe2015-03-01 19:44:46 -080053GOOGLE_THREAD_LOCAL Arena::ThreadCache Arena::thread_cache_ = { -1, NULL };
Feng Xiao8d5d7cc2014-12-09 17:05:10 -080054#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -080055
Jisi Liu885b6122015-02-28 14:51:22 -080056void Arena::Init() {
Feng Xiao6ef984a2014-11-10 17:34:54 -080057 lifecycle_id_ = lifecycle_id_generator_.GetNext();
Feng Xiao9173ba22014-12-02 15:28:11 -080058 blocks_ = 0;
59 hint_ = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -080060 owns_first_block_ = true;
Feng Xiao9173ba22014-12-02 15:28:11 -080061 cleanup_list_ = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -080062
Jisi Liu885b6122015-02-28 14:51:22 -080063 if (options_.initial_block != NULL && options_.initial_block_size > 0) {
Austin Schuhad1d7262015-08-21 14:16:34 -070064 GOOGLE_CHECK_GE(options_.initial_block_size, sizeof(Block))
65 << ": Initial block size too small for header.";
66
Feng Xiao6ef984a2014-11-10 17:34:54 -080067 // Add first unowned block to list.
Jisi Liu885b6122015-02-28 14:51:22 -080068 Block* first_block = reinterpret_cast<Block*>(options_.initial_block);
69 first_block->size = options_.initial_block_size;
Feng Xiao6ef984a2014-11-10 17:34:54 -080070 first_block->pos = kHeaderSize;
71 first_block->next = NULL;
Feng Xiaoeee38b02015-08-22 18:25:48 -070072 // Thread which calls Init() owns the first block. This allows the
73 // single-threaded case to allocate on the first block without taking any
74 // locks.
75 first_block->owner = &thread_cache();
76 SetThreadCacheBlock(first_block);
77 AddBlockInternal(first_block);
Feng Xiao6ef984a2014-11-10 17:34:54 -080078 owns_first_block_ = false;
79 }
Jisi Liu885b6122015-02-28 14:51:22 -080080
81 // Call the initialization hook
82 if (options_.on_arena_init != NULL) {
83 hooks_cookie_ = options_.on_arena_init(this);
84 } else {
85 hooks_cookie_ = NULL;
86 }
87}
88
89Arena::~Arena() {
Feng Xiaoeee38b02015-08-22 18:25:48 -070090 uint64 space_allocated = ResetInternal();
Jisi Liu885b6122015-02-28 14:51:22 -080091
92 // Call the destruction hook
93 if (options_.on_arena_destruction != NULL) {
94 options_.on_arena_destruction(this, hooks_cookie_, space_allocated);
95 }
Feng Xiao6ef984a2014-11-10 17:34:54 -080096}
97
98uint64 Arena::Reset() {
Feng Xiao6ef984a2014-11-10 17:34:54 -080099 // Invalidate any ThreadCaches pointing to any blocks we just destroyed.
100 lifecycle_id_ = lifecycle_id_generator_.GetNext();
Feng Xiaoeee38b02015-08-22 18:25:48 -0700101 return ResetInternal();
102}
103
104uint64 Arena::ResetInternal() {
105 CleanupList();
106 uint64 space_allocated = FreeBlocks();
Jisi Liu885b6122015-02-28 14:51:22 -0800107
108 // Call the reset hook
109 if (options_.on_arena_reset != NULL) {
110 options_.on_arena_reset(this, hooks_cookie_, space_allocated);
111 }
112
113 return space_allocated;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800114}
115
116Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
117 size_t start_block_size, size_t max_block_size) {
118 size_t size;
119 if (my_last_block != NULL) {
120 // Double the current block size, up to a limit.
121 size = 2 * (my_last_block->size);
122 if (size > max_block_size) size = max_block_size;
123 } else {
124 size = start_block_size;
125 }
126 if (n > size - kHeaderSize) {
127 // TODO(sanjay): Check if n + kHeaderSize would overflow
128 size = kHeaderSize + n;
129 }
130
Jisi Liu885b6122015-02-28 14:51:22 -0800131 Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
Feng Xiao6ef984a2014-11-10 17:34:54 -0800132 b->pos = kHeaderSize + n;
133 b->size = size;
134 if (b->avail() == 0) {
135 // Do not attempt to reuse this block.
136 b->owner = NULL;
137 } else {
138 b->owner = me;
139 }
Feng Xiao99aa0f92014-11-20 16:18:53 -0800140#ifdef ADDRESS_SANITIZER
141 // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
142 // malloc but it's not yet usable until we return it as part of an allocation.
143 ASAN_POISON_MEMORY_REGION(
144 reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
145#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -0800146 return b;
147}
148
149void Arena::AddBlock(Block* b) {
150 MutexLock l(&blocks_lock_);
Feng Xiaoeee38b02015-08-22 18:25:48 -0700151 AddBlockInternal(b);
152}
153
154void Arena::AddBlockInternal(Block* b) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800155 b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
156 google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
157 if (b->avail() != 0) {
158 // Direct future allocations to this block.
159 google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
160 }
161}
162
163void Arena::AddListNode(void* elem, void (*cleanup)(void*)) {
164 Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node)));
165 node->elem = elem;
166 node->cleanup = cleanup;
167 node->next = reinterpret_cast<Node*>(
168 google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_,
169 reinterpret_cast<google::protobuf::internal::AtomicWord>(node)));
170}
171
Bo Yang5db21732015-05-21 14:28:59 -0700172void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800173 // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
174 n = (n + 7) & -8;
175
Bo Yang5db21732015-05-21 14:28:59 -0700176 // Monitor allocation if needed.
177 if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL) &&
178 options_.on_arena_allocation != NULL) {
179 options_.on_arena_allocation(allocated, n, hooks_cookie_);
180 }
181
Feng Xiao6ef984a2014-11-10 17:34:54 -0800182 // If this thread already owns a block in this arena then try to use that.
183 // This fast path optimizes the case where multiple threads allocate from the
184 // same arena.
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800185 if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ &&
186 thread_cache().last_block_used_ != NULL) {
187 if (thread_cache().last_block_used_->avail() < n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800188 return SlowAlloc(n);
189 }
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800190 return AllocFromBlock(thread_cache().last_block_used_, n);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800191 }
192
193 // Check whether we own the last accessed block on this arena.
194 // This fast path optimizes the case where a single thread uses multiple
195 // arenas.
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800196 void* me = &thread_cache();
Feng Xiao6ef984a2014-11-10 17:34:54 -0800197 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_));
198 if (!b || b->owner != me || b->avail() < n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800199 return SlowAlloc(n);
200 }
201 return AllocFromBlock(b, n);
202}
203
204void* Arena::AllocFromBlock(Block* b, size_t n) {
205 size_t p = b->pos;
206 b->pos = p + n;
Feng Xiao99aa0f92014-11-20 16:18:53 -0800207#ifdef ADDRESS_SANITIZER
208 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
209#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -0800210 return reinterpret_cast<char*>(b) + p;
211}
212
213void* Arena::SlowAlloc(size_t n) {
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800214 void* me = &thread_cache();
Feng Xiao6ef984a2014-11-10 17:34:54 -0800215 Block* b = FindBlock(me); // Find block owned by me.
216 // See if allocation fits in my latest block.
217 if (b != NULL && b->avail() >= n) {
218 SetThreadCacheBlock(b);
219 google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
220 return AllocFromBlock(b, n);
221 }
Jisi Liu885b6122015-02-28 14:51:22 -0800222 b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800223 AddBlock(b);
224 if (b->owner == me) { // If this block can be reused (see NewBlock()).
225 SetThreadCacheBlock(b);
226 }
227 return reinterpret_cast<char*>(b) + kHeaderSize;
228}
229
Jisi Liu885b6122015-02-28 14:51:22 -0800230uint64 Arena::SpaceAllocated() const {
231 uint64 space_allocated = 0;
232 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
233 while (b != NULL) {
234 space_allocated += (b->size);
235 b = b->next;
236 }
237 return space_allocated;
238}
239
Feng Xiao6ef984a2014-11-10 17:34:54 -0800240uint64 Arena::SpaceUsed() const {
241 uint64 space_used = 0;
242 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
243 while (b != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800244 space_used += (b->pos - kHeaderSize);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800245 b = b->next;
246 }
247 return space_used;
248}
249
Feng Xiao6ef984a2014-11-10 17:34:54 -0800250uint64 Arena::FreeBlocks() {
Jisi Liu885b6122015-02-28 14:51:22 -0800251 uint64 space_allocated = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800252 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
253 Block* first_block = NULL;
254 while (b != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800255 space_allocated += (b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800256 Block* next = b->next;
257 if (next != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800258 options_.block_dealloc(b, b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800259 } else {
260 if (owns_first_block_) {
Jisi Liu885b6122015-02-28 14:51:22 -0800261 options_.block_dealloc(b, b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800262 } else {
263 // User passed in the first block, skip free'ing the memory.
264 first_block = b;
265 }
266 }
267 b = next;
268 }
269 blocks_ = 0;
270 hint_ = 0;
271 if (!owns_first_block_) {
272 // Make the first block that was passed in through ArenaOptions
273 // available for reuse.
274 first_block->pos = kHeaderSize;
Feng Xiaoeee38b02015-08-22 18:25:48 -0700275 // Thread which calls Reset() owns the first block. This allows the
276 // single-threaded case to allocate on the first block without taking any
277 // locks.
278 first_block->owner = &thread_cache();
279 SetThreadCacheBlock(first_block);
280 AddBlockInternal(first_block);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800281 }
Jisi Liu885b6122015-02-28 14:51:22 -0800282 return space_allocated;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800283}
284
285void Arena::CleanupList() {
286 Node* head =
287 reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_));
288 while (head != NULL) {
289 head->cleanup(head->elem);
290 head = head->next;
291 }
292 cleanup_list_ = 0;
293}
294
295Arena::Block* Arena::FindBlock(void* me) {
296 // TODO(sanjay): We might want to keep a separate list with one
297 // entry per thread.
298 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_));
299 while (b != NULL && b->owner != me) {
300 b = b->next;
301 }
302 return b;
303}
304
305} // namespace protobuf
306} // namespace google