blob: e856f5b14204af1860e9ad4176b73179e78a2c0a [file] [log] [blame]
Feng Xiao6ef984a2014-11-10 17:34:54 -08001// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31#include <google/protobuf/arena.h>
32
Feng Xiao99aa0f92014-11-20 16:18:53 -080033#ifdef ADDRESS_SANITIZER
34#include <sanitizer/asan_interface.h>
35#endif
36
Feng Xiao6ef984a2014-11-10 17:34:54 -080037namespace google {
38namespace protobuf {
39
Jisi Liu3b3c8ab2016-03-30 11:39:59 -070040
Feng Xiao6ef984a2014-11-10 17:34:54 -080041google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_;
Jisi Liu46e8ff62015-10-05 11:59:43 -070042#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
Bo Yangd6c9f642015-04-24 15:34:40 -070043Arena::ThreadCache& Arena::thread_cache() {
44 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
45 new internal::ThreadLocalStorage<ThreadCache>();
46 return *thread_cache_->Get();
47}
Jisi Liu46e8ff62015-10-05 11:59:43 -070048#elif defined(PROTOBUF_USE_DLLS)
49Arena::ThreadCache& Arena::thread_cache() {
50 static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
51 return thread_cache_;
52}
Feng Xiao8d5d7cc2014-12-09 17:05:10 -080053#else
Jisi Liu0a25cfe2015-03-01 19:44:46 -080054GOOGLE_THREAD_LOCAL Arena::ThreadCache Arena::thread_cache_ = { -1, NULL };
Feng Xiao8d5d7cc2014-12-09 17:05:10 -080055#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -080056
Jisi Liu885b6122015-02-28 14:51:22 -080057void Arena::Init() {
Feng Xiao6ef984a2014-11-10 17:34:54 -080058 lifecycle_id_ = lifecycle_id_generator_.GetNext();
Feng Xiao9173ba22014-12-02 15:28:11 -080059 blocks_ = 0;
60 hint_ = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -080061 owns_first_block_ = true;
Feng Xiao9173ba22014-12-02 15:28:11 -080062 cleanup_list_ = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -080063
Jisi Liu885b6122015-02-28 14:51:22 -080064 if (options_.initial_block != NULL && options_.initial_block_size > 0) {
Austin Schuhad1d7262015-08-21 14:16:34 -070065 GOOGLE_CHECK_GE(options_.initial_block_size, sizeof(Block))
66 << ": Initial block size too small for header.";
67
Feng Xiao6ef984a2014-11-10 17:34:54 -080068 // Add first unowned block to list.
Jisi Liu885b6122015-02-28 14:51:22 -080069 Block* first_block = reinterpret_cast<Block*>(options_.initial_block);
70 first_block->size = options_.initial_block_size;
Feng Xiao6ef984a2014-11-10 17:34:54 -080071 first_block->pos = kHeaderSize;
72 first_block->next = NULL;
Feng Xiaoeee38b02015-08-22 18:25:48 -070073 // Thread which calls Init() owns the first block. This allows the
74 // single-threaded case to allocate on the first block without taking any
75 // locks.
76 first_block->owner = &thread_cache();
77 SetThreadCacheBlock(first_block);
78 AddBlockInternal(first_block);
Feng Xiao6ef984a2014-11-10 17:34:54 -080079 owns_first_block_ = false;
80 }
Jisi Liu885b6122015-02-28 14:51:22 -080081
82 // Call the initialization hook
83 if (options_.on_arena_init != NULL) {
84 hooks_cookie_ = options_.on_arena_init(this);
85 } else {
86 hooks_cookie_ = NULL;
87 }
88}
89
90Arena::~Arena() {
Feng Xiaoeee38b02015-08-22 18:25:48 -070091 uint64 space_allocated = ResetInternal();
Jisi Liu885b6122015-02-28 14:51:22 -080092
93 // Call the destruction hook
94 if (options_.on_arena_destruction != NULL) {
95 options_.on_arena_destruction(this, hooks_cookie_, space_allocated);
96 }
Feng Xiao6ef984a2014-11-10 17:34:54 -080097}
98
99uint64 Arena::Reset() {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800100 // Invalidate any ThreadCaches pointing to any blocks we just destroyed.
101 lifecycle_id_ = lifecycle_id_generator_.GetNext();
Feng Xiaoeee38b02015-08-22 18:25:48 -0700102 return ResetInternal();
103}
104
105uint64 Arena::ResetInternal() {
106 CleanupList();
107 uint64 space_allocated = FreeBlocks();
Jisi Liu885b6122015-02-28 14:51:22 -0800108
109 // Call the reset hook
110 if (options_.on_arena_reset != NULL) {
111 options_.on_arena_reset(this, hooks_cookie_, space_allocated);
112 }
113
114 return space_allocated;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800115}
116
117Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
118 size_t start_block_size, size_t max_block_size) {
119 size_t size;
120 if (my_last_block != NULL) {
121 // Double the current block size, up to a limit.
122 size = 2 * (my_last_block->size);
123 if (size > max_block_size) size = max_block_size;
124 } else {
125 size = start_block_size;
126 }
127 if (n > size - kHeaderSize) {
128 // TODO(sanjay): Check if n + kHeaderSize would overflow
129 size = kHeaderSize + n;
130 }
131
Jisi Liu885b6122015-02-28 14:51:22 -0800132 Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
Feng Xiao6ef984a2014-11-10 17:34:54 -0800133 b->pos = kHeaderSize + n;
134 b->size = size;
135 if (b->avail() == 0) {
136 // Do not attempt to reuse this block.
137 b->owner = NULL;
138 } else {
139 b->owner = me;
140 }
Feng Xiao99aa0f92014-11-20 16:18:53 -0800141#ifdef ADDRESS_SANITIZER
142 // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
143 // malloc but it's not yet usable until we return it as part of an allocation.
144 ASAN_POISON_MEMORY_REGION(
145 reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
146#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -0800147 return b;
148}
149
150void Arena::AddBlock(Block* b) {
151 MutexLock l(&blocks_lock_);
Feng Xiaoeee38b02015-08-22 18:25:48 -0700152 AddBlockInternal(b);
153}
154
155void Arena::AddBlockInternal(Block* b) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800156 b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
157 google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
158 if (b->avail() != 0) {
159 // Direct future allocations to this block.
160 google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
161 }
162}
163
164void Arena::AddListNode(void* elem, void (*cleanup)(void*)) {
165 Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node)));
166 node->elem = elem;
167 node->cleanup = cleanup;
168 node->next = reinterpret_cast<Node*>(
169 google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_,
170 reinterpret_cast<google::protobuf::internal::AtomicWord>(node)));
171}
172
Bo Yang5db21732015-05-21 14:28:59 -0700173void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800174 // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
175 n = (n + 7) & -8;
176
Bo Yang5db21732015-05-21 14:28:59 -0700177 // Monitor allocation if needed.
178 if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL) &&
179 options_.on_arena_allocation != NULL) {
180 options_.on_arena_allocation(allocated, n, hooks_cookie_);
181 }
182
Feng Xiao6ef984a2014-11-10 17:34:54 -0800183 // If this thread already owns a block in this arena then try to use that.
184 // This fast path optimizes the case where multiple threads allocate from the
185 // same arena.
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800186 if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ &&
187 thread_cache().last_block_used_ != NULL) {
188 if (thread_cache().last_block_used_->avail() < n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800189 return SlowAlloc(n);
190 }
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800191 return AllocFromBlock(thread_cache().last_block_used_, n);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800192 }
193
194 // Check whether we own the last accessed block on this arena.
195 // This fast path optimizes the case where a single thread uses multiple
196 // arenas.
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800197 void* me = &thread_cache();
Feng Xiao6ef984a2014-11-10 17:34:54 -0800198 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_));
199 if (!b || b->owner != me || b->avail() < n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800200 return SlowAlloc(n);
201 }
202 return AllocFromBlock(b, n);
203}
204
205void* Arena::AllocFromBlock(Block* b, size_t n) {
206 size_t p = b->pos;
207 b->pos = p + n;
Feng Xiao99aa0f92014-11-20 16:18:53 -0800208#ifdef ADDRESS_SANITIZER
209 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
210#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -0800211 return reinterpret_cast<char*>(b) + p;
212}
213
214void* Arena::SlowAlloc(size_t n) {
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800215 void* me = &thread_cache();
Feng Xiao6ef984a2014-11-10 17:34:54 -0800216 Block* b = FindBlock(me); // Find block owned by me.
217 // See if allocation fits in my latest block.
218 if (b != NULL && b->avail() >= n) {
219 SetThreadCacheBlock(b);
220 google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
221 return AllocFromBlock(b, n);
222 }
Jisi Liu885b6122015-02-28 14:51:22 -0800223 b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800224 AddBlock(b);
225 if (b->owner == me) { // If this block can be reused (see NewBlock()).
226 SetThreadCacheBlock(b);
227 }
228 return reinterpret_cast<char*>(b) + kHeaderSize;
229}
230
Jisi Liu885b6122015-02-28 14:51:22 -0800231uint64 Arena::SpaceAllocated() const {
232 uint64 space_allocated = 0;
233 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
234 while (b != NULL) {
235 space_allocated += (b->size);
236 b = b->next;
237 }
238 return space_allocated;
239}
240
Feng Xiao6ef984a2014-11-10 17:34:54 -0800241uint64 Arena::SpaceUsed() const {
242 uint64 space_used = 0;
243 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
244 while (b != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800245 space_used += (b->pos - kHeaderSize);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800246 b = b->next;
247 }
248 return space_used;
249}
250
Feng Xiao6ef984a2014-11-10 17:34:54 -0800251uint64 Arena::FreeBlocks() {
Jisi Liu885b6122015-02-28 14:51:22 -0800252 uint64 space_allocated = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800253 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
254 Block* first_block = NULL;
255 while (b != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800256 space_allocated += (b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800257 Block* next = b->next;
258 if (next != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800259 options_.block_dealloc(b, b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800260 } else {
261 if (owns_first_block_) {
Jisi Liu885b6122015-02-28 14:51:22 -0800262 options_.block_dealloc(b, b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800263 } else {
264 // User passed in the first block, skip free'ing the memory.
265 first_block = b;
266 }
267 }
268 b = next;
269 }
270 blocks_ = 0;
271 hint_ = 0;
272 if (!owns_first_block_) {
273 // Make the first block that was passed in through ArenaOptions
274 // available for reuse.
275 first_block->pos = kHeaderSize;
Feng Xiaoeee38b02015-08-22 18:25:48 -0700276 // Thread which calls Reset() owns the first block. This allows the
277 // single-threaded case to allocate on the first block without taking any
278 // locks.
279 first_block->owner = &thread_cache();
280 SetThreadCacheBlock(first_block);
281 AddBlockInternal(first_block);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800282 }
Jisi Liu885b6122015-02-28 14:51:22 -0800283 return space_allocated;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800284}
285
286void Arena::CleanupList() {
287 Node* head =
288 reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_));
289 while (head != NULL) {
290 head->cleanup(head->elem);
291 head = head->next;
292 }
293 cleanup_list_ = 0;
294}
295
296Arena::Block* Arena::FindBlock(void* me) {
297 // TODO(sanjay): We might want to keep a separate list with one
298 // entry per thread.
299 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_));
300 while (b != NULL && b->owner != me) {
301 b = b->next;
302 }
303 return b;
304}
305
306} // namespace protobuf
307} // namespace google