ager@chromium.org | 9258b6b | 2008-09-11 09:11:10 +0000 | [diff] [blame] | 1 | // Copyright 2006-2008 the V8 project authors. All rights reserved. |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #ifndef V8_HEAP_INL_H_ |
| 29 | #define V8_HEAP_INL_H_ |
| 30 | |
| 31 | #include "log.h" |
| 32 | #include "v8-counters.h" |
| 33 | |
| 34 | namespace v8 { namespace internal { |
| 35 | |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 36 | int Heap::MaxHeapObjectSize() { |
| 37 | return Page::kMaxHeapObjectSize; |
| 38 | } |
| 39 | |
| 40 | |
ager@chromium.org | a74f0da | 2008-12-03 16:05:52 +0000 | [diff] [blame^] | 41 | Object* Heap::AllocateSymbol(Vector<const char> str, |
| 42 | int chars, |
| 43 | uint32_t length_field) { |
| 44 | if (global_external_symbol_callback_) { |
| 45 | return AllocateExternalSymbol(str, chars); |
| 46 | } |
| 47 | unibrow::Utf8InputBuffer<> buffer(str.start(), |
| 48 | static_cast<unsigned>(str.length())); |
| 49 | return AllocateInternalSymbol(&buffer, chars, length_field); |
| 50 | } |
| 51 | |
| 52 | |
ager@chromium.org | 9258b6b | 2008-09-11 09:11:10 +0000 | [diff] [blame] | 53 | Object* Heap::AllocateRaw(int size_in_bytes, |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 54 | AllocationSpace space, |
| 55 | AllocationSpace retry_space) { |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 56 | ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 57 | ASSERT(space != NEW_SPACE || |
| 58 | retry_space == OLD_POINTER_SPACE || |
| 59 | retry_space == OLD_DATA_SPACE); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 60 | #ifdef DEBUG |
| 61 | if (FLAG_gc_interval >= 0 && |
| 62 | !disallow_allocation_failure_ && |
| 63 | Heap::allocation_timeout_-- <= 0) { |
| 64 | return Failure::RetryAfterGC(size_in_bytes, space); |
| 65 | } |
| 66 | Counters::objs_since_last_full.Increment(); |
| 67 | Counters::objs_since_last_young.Increment(); |
| 68 | #endif |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 69 | Object* result; |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 70 | if (NEW_SPACE == space) { |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 71 | result = new_space_.AllocateRaw(size_in_bytes); |
| 72 | if (always_allocate() && result->IsFailure()) { |
| 73 | space = retry_space; |
| 74 | } else { |
| 75 | return result; |
| 76 | } |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 77 | } |
| 78 | |
ager@chromium.org | 9258b6b | 2008-09-11 09:11:10 +0000 | [diff] [blame] | 79 | if (OLD_POINTER_SPACE == space) { |
| 80 | result = old_pointer_space_->AllocateRaw(size_in_bytes); |
| 81 | } else if (OLD_DATA_SPACE == space) { |
| 82 | result = old_data_space_->AllocateRaw(size_in_bytes); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 83 | } else if (CODE_SPACE == space) { |
| 84 | result = code_space_->AllocateRaw(size_in_bytes); |
| 85 | } else if (LO_SPACE == space) { |
| 86 | result = lo_space_->AllocateRaw(size_in_bytes); |
| 87 | } else { |
| 88 | ASSERT(MAP_SPACE == space); |
| 89 | result = map_space_->AllocateRaw(size_in_bytes); |
| 90 | } |
| 91 | if (result->IsFailure()) old_gen_exhausted_ = true; |
| 92 | return result; |
| 93 | } |
| 94 | |
| 95 | |
| 96 | Object* Heap::NumberFromInt32(int32_t value) { |
| 97 | if (Smi::IsValid(value)) return Smi::FromInt(value); |
| 98 | // Bypass NumberFromDouble to avoid various redundant checks. |
| 99 | return AllocateHeapNumber(FastI2D(value)); |
| 100 | } |
| 101 | |
| 102 | |
| 103 | Object* Heap::NumberFromUint32(uint32_t value) { |
| 104 | if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) { |
| 105 | return Smi::FromInt((int32_t)value); |
| 106 | } |
| 107 | // Bypass NumberFromDouble to avoid various redundant checks. |
| 108 | return AllocateHeapNumber(FastUI2D(value)); |
| 109 | } |
| 110 | |
| 111 | |
| 112 | Object* Heap::AllocateRawMap(int size_in_bytes) { |
| 113 | #ifdef DEBUG |
| 114 | Counters::objs_since_last_full.Increment(); |
| 115 | Counters::objs_since_last_young.Increment(); |
| 116 | #endif |
| 117 | Object* result = map_space_->AllocateRaw(size_in_bytes); |
| 118 | if (result->IsFailure()) old_gen_exhausted_ = true; |
| 119 | return result; |
| 120 | } |
| 121 | |
| 122 | |
| 123 | bool Heap::InNewSpace(Object* object) { |
kasperl@chromium.org | 5a8ca6c | 2008-10-23 13:57:19 +0000 | [diff] [blame] | 124 | return new_space_.Contains(object); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | |
| 128 | bool Heap::InFromSpace(Object* object) { |
kasperl@chromium.org | 5a8ca6c | 2008-10-23 13:57:19 +0000 | [diff] [blame] | 129 | return new_space_.FromSpaceContains(object); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | |
| 133 | bool Heap::InToSpace(Object* object) { |
kasperl@chromium.org | 5a8ca6c | 2008-10-23 13:57:19 +0000 | [diff] [blame] | 134 | return new_space_.ToSpaceContains(object); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | |
| 138 | bool Heap::ShouldBePromoted(Address old_address, int object_size) { |
| 139 | // An object should be promoted if: |
| 140 | // - the object has survived a scavenge operation or |
| 141 | // - to space is already 25% full. |
kasperl@chromium.org | 5a8ca6c | 2008-10-23 13:57:19 +0000 | [diff] [blame] | 142 | return old_address < new_space_.age_mark() |
| 143 | || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | |
| 147 | void Heap::RecordWrite(Address address, int offset) { |
kasperl@chromium.org | 5a8ca6c | 2008-10-23 13:57:19 +0000 | [diff] [blame] | 148 | if (new_space_.Contains(address)) return; |
| 149 | ASSERT(!new_space_.FromSpaceContains(address)); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 150 | SLOW_ASSERT(Contains(address + offset)); |
| 151 | Page::SetRSet(address, offset); |
| 152 | } |
| 153 | |
| 154 | |
ager@chromium.org | 9258b6b | 2008-09-11 09:11:10 +0000 | [diff] [blame] | 155 | OldSpace* Heap::TargetSpace(HeapObject* object) { |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 156 | InstanceType type = object->map()->instance_type(); |
| 157 | AllocationSpace space = TargetSpaceId(type); |
| 158 | return (space == OLD_POINTER_SPACE) |
| 159 | ? old_pointer_space_ |
| 160 | : old_data_space_; |
| 161 | } |
| 162 | |
| 163 | |
| 164 | AllocationSpace Heap::TargetSpaceId(InstanceType type) { |
ager@chromium.org | 9258b6b | 2008-09-11 09:11:10 +0000 | [diff] [blame] | 165 | // Heap numbers and sequential strings are promoted to old data space, all |
| 166 | // other object types are promoted to old pointer space. We do not use |
kasper.lund | 7276f14 | 2008-07-30 08:49:36 +0000 | [diff] [blame] | 167 | // object->IsHeapNumber() and object->IsSeqString() because we already |
| 168 | // know that object has the heap object tag. |
kasper.lund | 7276f14 | 2008-07-30 08:49:36 +0000 | [diff] [blame] | 169 | ASSERT((type != CODE_TYPE) && (type != MAP_TYPE)); |
| 170 | bool has_pointers = |
| 171 | type != HEAP_NUMBER_TYPE && |
| 172 | (type >= FIRST_NONSTRING_TYPE || |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 173 | (type & kStringRepresentationMask) != kSeqStringTag); |
| 174 | return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE; |
kasper.lund | 7276f14 | 2008-07-30 08:49:36 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | |
kasperl@chromium.org | 5a8ca6c | 2008-10-23 13:57:19 +0000 | [diff] [blame] | 178 | void Heap::CopyBlock(Object** dst, Object** src, int byte_size) { |
| 179 | ASSERT(IsAligned(byte_size, kPointerSize)); |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 180 | |
kasperl@chromium.org | 5a8ca6c | 2008-10-23 13:57:19 +0000 | [diff] [blame] | 181 | // Use block copying memcpy if the segment we're copying is |
| 182 | // enough to justify the extra call/setup overhead. |
| 183 | static const int kBlockCopyLimit = 16 * kPointerSize; |
| 184 | |
| 185 | if (byte_size >= kBlockCopyLimit) { |
| 186 | memcpy(dst, src, byte_size); |
| 187 | } else { |
| 188 | int remaining = byte_size / kPointerSize; |
| 189 | do { |
| 190 | remaining--; |
| 191 | *dst++ = *src++; |
| 192 | } while (remaining > 0); |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | |
| 197 | Object* Heap::GetKeyedLookupCache() { |
| 198 | if (keyed_lookup_cache()->IsUndefined()) { |
| 199 | Object* obj = LookupCache::Allocate(4); |
| 200 | if (obj->IsFailure()) return obj; |
| 201 | keyed_lookup_cache_ = obj; |
| 202 | } |
| 203 | return keyed_lookup_cache(); |
| 204 | } |
| 205 | |
| 206 | |
| 207 | void Heap::SetKeyedLookupCache(LookupCache* cache) { |
| 208 | keyed_lookup_cache_ = cache; |
| 209 | } |
| 210 | |
| 211 | |
| 212 | void Heap::ClearKeyedLookupCache() { |
| 213 | keyed_lookup_cache_ = undefined_value(); |
| 214 | } |
| 215 | |
| 216 | |
| 217 | #define GC_GREEDY_CHECK() \ |
| 218 | ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck()) |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 219 | |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 220 | |
| 221 | // Calls the FUNCTION_CALL function and retries it up to three times |
| 222 | // to guarantee that any allocations performed during the call will |
| 223 | // succeed if there's enough memory. |
| 224 | |
| 225 | // Warning: Do not use the identifiers __object__ or __scope__ in a |
| 226 | // call to this macro. |
| 227 | |
| 228 | #define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ |
| 229 | do { \ |
| 230 | GC_GREEDY_CHECK(); \ |
| 231 | Object* __object__ = FUNCTION_CALL; \ |
| 232 | if (!__object__->IsFailure()) return RETURN_VALUE; \ |
| 233 | if (__object__->IsOutOfMemoryFailure()) { \ |
| 234 | v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0"); \ |
| 235 | } \ |
| 236 | if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY; \ |
kasperl@chromium.org | 5887095 | 2008-10-30 14:34:19 +0000 | [diff] [blame] | 237 | Heap::CollectGarbage(Failure::cast(__object__)->requested(), \ |
| 238 | Failure::cast(__object__)->allocation_space()); \ |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 239 | __object__ = FUNCTION_CALL; \ |
| 240 | if (!__object__->IsFailure()) return RETURN_VALUE; \ |
| 241 | if (__object__->IsOutOfMemoryFailure()) { \ |
kasperl@chromium.org | 5887095 | 2008-10-30 14:34:19 +0000 | [diff] [blame] | 242 | v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1"); \ |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 243 | } \ |
| 244 | if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY; \ |
| 245 | Counters::gc_last_resort_from_handles.Increment(); \ |
| 246 | Heap::CollectAllGarbage(); \ |
| 247 | { \ |
| 248 | AlwaysAllocateScope __scope__; \ |
| 249 | __object__ = FUNCTION_CALL; \ |
| 250 | } \ |
| 251 | if (!__object__->IsFailure()) return RETURN_VALUE; \ |
| 252 | if (__object__->IsOutOfMemoryFailure()) { \ |
| 253 | /* TODO(1181417): Fix this. */ \ |
kasperl@chromium.org | 5887095 | 2008-10-30 14:34:19 +0000 | [diff] [blame] | 254 | v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \ |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 255 | } \ |
| 256 | ASSERT(!__object__->IsRetryAfterGC()); \ |
| 257 | return RETURN_EMPTY; \ |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 258 | } while (false) |
| 259 | |
| 260 | |
kasperl@chromium.org | 9bbf968 | 2008-10-30 11:53:07 +0000 | [diff] [blame] | 261 | #define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \ |
| 262 | CALL_AND_RETRY(FUNCTION_CALL, \ |
| 263 | Handle<TYPE>(TYPE::cast(__object__)), \ |
| 264 | Handle<TYPE>()) |
| 265 | |
| 266 | |
| 267 | #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \ |
| 268 | CALL_AND_RETRY(FUNCTION_CALL, , ) |
kasperl@chromium.org | b912362 | 2008-09-17 14:05:56 +0000 | [diff] [blame] | 269 | |
| 270 | |
christian.plesner.hansen | 43d26ec | 2008-07-03 15:10:15 +0000 | [diff] [blame] | 271 | #ifdef DEBUG |
| 272 | |
| 273 | inline bool Heap::allow_allocation(bool new_state) { |
| 274 | bool old = allocation_allowed_; |
| 275 | allocation_allowed_ = new_state; |
| 276 | return old; |
| 277 | } |
| 278 | |
| 279 | #endif |
| 280 | |
| 281 | |
| 282 | } } // namespace v8::internal |
| 283 | |
| 284 | #endif // V8_HEAP_INL_H_ |