blob: e9ee61701b0e54fe5a87aa26d50409dc1b44152b [file] [log] [blame]
Feng Xiao6ef984a2014-11-10 17:34:54 -08001// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31#include <google/protobuf/arena.h>
32
Feng Xiao99aa0f92014-11-20 16:18:53 -080033#ifdef ADDRESS_SANITIZER
34#include <sanitizer/asan_interface.h>
35#endif
36
Feng Xiao6ef984a2014-11-10 17:34:54 -080037namespace google {
38namespace protobuf {
39
40google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_;
Feng Xiao8d5d7cc2014-12-09 17:05:10 -080041#ifdef PROTOBUF_USE_DLLS
42Arena::ThreadCache& Arena::thread_cache() {
43 static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
44 return thread_cache_;
45}
46#else
Jisi Liu885b6122015-02-28 14:51:22 -080047__thread Arena::ThreadCache Arena::thread_cache_ = { -1, NULL };
Feng Xiao8d5d7cc2014-12-09 17:05:10 -080048#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -080049
Jisi Liu885b6122015-02-28 14:51:22 -080050void Arena::Init() {
Feng Xiao6ef984a2014-11-10 17:34:54 -080051 lifecycle_id_ = lifecycle_id_generator_.GetNext();
Feng Xiao9173ba22014-12-02 15:28:11 -080052 blocks_ = 0;
53 hint_ = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -080054 owns_first_block_ = true;
Feng Xiao9173ba22014-12-02 15:28:11 -080055 cleanup_list_ = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -080056
Jisi Liu885b6122015-02-28 14:51:22 -080057 if (options_.initial_block != NULL && options_.initial_block_size > 0) {
Feng Xiao6ef984a2014-11-10 17:34:54 -080058 // Add first unowned block to list.
Jisi Liu885b6122015-02-28 14:51:22 -080059 Block* first_block = reinterpret_cast<Block*>(options_.initial_block);
60 first_block->size = options_.initial_block_size;
Feng Xiao6ef984a2014-11-10 17:34:54 -080061 first_block->pos = kHeaderSize;
62 first_block->next = NULL;
63 first_block->owner = &first_block->owner;
64 AddBlock(first_block);
65 owns_first_block_ = false;
66 }
Jisi Liu885b6122015-02-28 14:51:22 -080067
68 // Call the initialization hook
69 if (options_.on_arena_init != NULL) {
70 hooks_cookie_ = options_.on_arena_init(this);
71 } else {
72 hooks_cookie_ = NULL;
73 }
74}
75
76Arena::~Arena() {
77 uint64 space_allocated = Reset();
78
79 // Call the destruction hook
80 if (options_.on_arena_destruction != NULL) {
81 options_.on_arena_destruction(this, hooks_cookie_, space_allocated);
82 }
Feng Xiao6ef984a2014-11-10 17:34:54 -080083}
84
85uint64 Arena::Reset() {
86 CleanupList();
Jisi Liu885b6122015-02-28 14:51:22 -080087 uint64 space_allocated = FreeBlocks();
Feng Xiao6ef984a2014-11-10 17:34:54 -080088 // Invalidate any ThreadCaches pointing to any blocks we just destroyed.
89 lifecycle_id_ = lifecycle_id_generator_.GetNext();
Jisi Liu885b6122015-02-28 14:51:22 -080090
91 // Call the reset hook
92 if (options_.on_arena_reset != NULL) {
93 options_.on_arena_reset(this, hooks_cookie_, space_allocated);
94 }
95
96 return space_allocated;
Feng Xiao6ef984a2014-11-10 17:34:54 -080097}
98
99Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
100 size_t start_block_size, size_t max_block_size) {
101 size_t size;
102 if (my_last_block != NULL) {
103 // Double the current block size, up to a limit.
104 size = 2 * (my_last_block->size);
105 if (size > max_block_size) size = max_block_size;
106 } else {
107 size = start_block_size;
108 }
109 if (n > size - kHeaderSize) {
110 // TODO(sanjay): Check if n + kHeaderSize would overflow
111 size = kHeaderSize + n;
112 }
113
Jisi Liu885b6122015-02-28 14:51:22 -0800114 Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
Feng Xiao6ef984a2014-11-10 17:34:54 -0800115 b->pos = kHeaderSize + n;
116 b->size = size;
117 if (b->avail() == 0) {
118 // Do not attempt to reuse this block.
119 b->owner = NULL;
120 } else {
121 b->owner = me;
122 }
Feng Xiao99aa0f92014-11-20 16:18:53 -0800123#ifdef ADDRESS_SANITIZER
124 // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
125 // malloc but it's not yet usable until we return it as part of an allocation.
126 ASAN_POISON_MEMORY_REGION(
127 reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
128#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -0800129 return b;
130}
131
132void Arena::AddBlock(Block* b) {
133 MutexLock l(&blocks_lock_);
134 b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
135 google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
136 if (b->avail() != 0) {
137 // Direct future allocations to this block.
138 google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
139 }
140}
141
142void Arena::AddListNode(void* elem, void (*cleanup)(void*)) {
143 Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node)));
144 node->elem = elem;
145 node->cleanup = cleanup;
146 node->next = reinterpret_cast<Node*>(
147 google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_,
148 reinterpret_cast<google::protobuf::internal::AtomicWord>(node)));
149}
150
151void* Arena::AllocateAligned(size_t n) {
152 // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
153 n = (n + 7) & -8;
154
155 // If this thread already owns a block in this arena then try to use that.
156 // This fast path optimizes the case where multiple threads allocate from the
157 // same arena.
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800158 if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ &&
159 thread_cache().last_block_used_ != NULL) {
160 if (thread_cache().last_block_used_->avail() < n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800161 return SlowAlloc(n);
162 }
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800163 return AllocFromBlock(thread_cache().last_block_used_, n);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800164 }
165
166 // Check whether we own the last accessed block on this arena.
167 // This fast path optimizes the case where a single thread uses multiple
168 // arenas.
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800169 void* me = &thread_cache();
Feng Xiao6ef984a2014-11-10 17:34:54 -0800170 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_));
171 if (!b || b->owner != me || b->avail() < n) {
172 // If the next block to allocate from is the first block, try to claim it
173 // for this thread.
174 if (!owns_first_block_ && b->next == NULL) {
175 MutexLock l(&blocks_lock_);
Feng Xiaof157a562014-11-14 11:50:31 -0800176 if (b->owner == &b->owner && b->avail() >= n) {
Feng Xiao6ef984a2014-11-10 17:34:54 -0800177 b->owner = me;
178 SetThreadCacheBlock(b);
179 return AllocFromBlock(b, n);
180 }
181 }
182 return SlowAlloc(n);
183 }
184 return AllocFromBlock(b, n);
185}
186
187void* Arena::AllocFromBlock(Block* b, size_t n) {
188 size_t p = b->pos;
189 b->pos = p + n;
Feng Xiao99aa0f92014-11-20 16:18:53 -0800190#ifdef ADDRESS_SANITIZER
191 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
192#endif
Feng Xiao6ef984a2014-11-10 17:34:54 -0800193 return reinterpret_cast<char*>(b) + p;
194}
195
196void* Arena::SlowAlloc(size_t n) {
Feng Xiao8d5d7cc2014-12-09 17:05:10 -0800197 void* me = &thread_cache();
Feng Xiao6ef984a2014-11-10 17:34:54 -0800198 Block* b = FindBlock(me); // Find block owned by me.
199 // See if allocation fits in my latest block.
200 if (b != NULL && b->avail() >= n) {
201 SetThreadCacheBlock(b);
202 google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
203 return AllocFromBlock(b, n);
204 }
Jisi Liu885b6122015-02-28 14:51:22 -0800205 b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800206 AddBlock(b);
207 if (b->owner == me) { // If this block can be reused (see NewBlock()).
208 SetThreadCacheBlock(b);
209 }
210 return reinterpret_cast<char*>(b) + kHeaderSize;
211}
212
Jisi Liu885b6122015-02-28 14:51:22 -0800213uint64 Arena::SpaceAllocated() const {
214 uint64 space_allocated = 0;
215 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
216 while (b != NULL) {
217 space_allocated += (b->size);
218 b = b->next;
219 }
220 return space_allocated;
221}
222
Feng Xiao6ef984a2014-11-10 17:34:54 -0800223uint64 Arena::SpaceUsed() const {
224 uint64 space_used = 0;
225 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
226 while (b != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800227 space_used += (b->pos - kHeaderSize);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800228 b = b->next;
229 }
230 return space_used;
231}
232
Feng Xiao6ef984a2014-11-10 17:34:54 -0800233uint64 Arena::FreeBlocks() {
Jisi Liu885b6122015-02-28 14:51:22 -0800234 uint64 space_allocated = 0;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800235 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
236 Block* first_block = NULL;
237 while (b != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800238 space_allocated += (b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800239 Block* next = b->next;
240 if (next != NULL) {
Jisi Liu885b6122015-02-28 14:51:22 -0800241 options_.block_dealloc(b, b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800242 } else {
243 if (owns_first_block_) {
Jisi Liu885b6122015-02-28 14:51:22 -0800244 options_.block_dealloc(b, b->size);
Feng Xiao6ef984a2014-11-10 17:34:54 -0800245 } else {
246 // User passed in the first block, skip free'ing the memory.
247 first_block = b;
248 }
249 }
250 b = next;
251 }
252 blocks_ = 0;
253 hint_ = 0;
254 if (!owns_first_block_) {
255 // Make the first block that was passed in through ArenaOptions
256 // available for reuse.
257 first_block->pos = kHeaderSize;
258 first_block->owner = &first_block->owner;
259 AddBlock(first_block);
260 }
Jisi Liu885b6122015-02-28 14:51:22 -0800261 return space_allocated;
Feng Xiao6ef984a2014-11-10 17:34:54 -0800262}
263
264void Arena::CleanupList() {
265 Node* head =
266 reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_));
267 while (head != NULL) {
268 head->cleanup(head->elem);
269 head = head->next;
270 }
271 cleanup_list_ = 0;
272}
273
274Arena::Block* Arena::FindBlock(void* me) {
275 // TODO(sanjay): We might want to keep a separate list with one
276 // entry per thread.
277 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_));
278 while (b != NULL && b->owner != me) {
279 b = b->next;
280 }
281 return b;
282}
283
284} // namespace protobuf
285} // namespace google