Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1 | // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 5 | #include "src/layout-descriptor.h" |
| 6 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 7 | #include <sstream> |
| 8 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 9 | #include "src/base/bits.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 10 | #include "src/handles-inl.h" |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 11 | |
| 12 | using v8::base::bits::CountTrailingZeros32; |
| 13 | |
| 14 | namespace v8 { |
| 15 | namespace internal { |
| 16 | |
| 17 | Handle<LayoutDescriptor> LayoutDescriptor::New( |
| 18 | Handle<Map> map, Handle<DescriptorArray> descriptors, int num_descriptors) { |
| 19 | Isolate* isolate = descriptors->GetIsolate(); |
| 20 | if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate); |
| 21 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 22 | int layout_descriptor_length = |
| 23 | CalculateCapacity(*map, *descriptors, num_descriptors); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 24 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 25 | if (layout_descriptor_length == 0) { |
| 26 | // No double fields were found, use fast pointer layout. |
| 27 | return handle(FastPointerLayout(), isolate); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 28 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 29 | |
| 30 | // Initially, layout descriptor corresponds to an object with all fields |
| 31 | // tagged. |
| 32 | Handle<LayoutDescriptor> layout_descriptor_handle = |
| 33 | LayoutDescriptor::New(isolate, layout_descriptor_length); |
| 34 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 35 | LayoutDescriptor* layout_descriptor = Initialize( |
| 36 | *layout_descriptor_handle, *map, *descriptors, num_descriptors); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 37 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 38 | return handle(layout_descriptor, isolate); |
| 39 | } |
| 40 | |
| 41 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 42 | Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend( |
| 43 | Handle<Map> map, PropertyDetails details) { |
| 44 | DCHECK(map->owns_descriptors()); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 45 | Isolate* isolate = map->GetIsolate(); |
| 46 | Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(), |
| 47 | isolate); |
| 48 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 49 | if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) { |
| 50 | DCHECK(details.location() != kField || |
| 51 | layout_descriptor->IsTagged(details.field_index())); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 52 | return layout_descriptor; |
| 53 | } |
| 54 | int field_index = details.field_index(); |
| 55 | layout_descriptor = LayoutDescriptor::EnsureCapacity( |
| 56 | isolate, layout_descriptor, field_index + details.field_width_in_words()); |
| 57 | |
| 58 | DisallowHeapAllocation no_allocation; |
| 59 | LayoutDescriptor* layout_desc = *layout_descriptor; |
| 60 | layout_desc = layout_desc->SetRawData(field_index); |
| 61 | if (details.field_width_in_words() > 1) { |
| 62 | layout_desc = layout_desc->SetRawData(field_index + 1); |
| 63 | } |
| 64 | return handle(layout_desc, isolate); |
| 65 | } |
| 66 | |
| 67 | |
| 68 | Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull( |
| 69 | Handle<Map> map, PropertyDetails details, |
| 70 | Handle<LayoutDescriptor> full_layout_descriptor) { |
| 71 | DisallowHeapAllocation no_allocation; |
| 72 | LayoutDescriptor* layout_descriptor = map->layout_descriptor(); |
| 73 | if (layout_descriptor->IsSlowLayout()) { |
| 74 | return full_layout_descriptor; |
| 75 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 76 | if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) { |
| 77 | DCHECK(details.location() != kField || |
| 78 | layout_descriptor->IsTagged(details.field_index())); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 79 | return handle(layout_descriptor, map->GetIsolate()); |
| 80 | } |
| 81 | int field_index = details.field_index(); |
| 82 | int new_capacity = field_index + details.field_width_in_words(); |
| 83 | if (new_capacity > layout_descriptor->capacity()) { |
| 84 | // Current map's layout descriptor runs out of space, so use the full |
| 85 | // layout descriptor. |
| 86 | return full_layout_descriptor; |
| 87 | } |
| 88 | |
| 89 | layout_descriptor = layout_descriptor->SetRawData(field_index); |
| 90 | if (details.field_width_in_words() > 1) { |
| 91 | layout_descriptor = layout_descriptor->SetRawData(field_index + 1); |
| 92 | } |
| 93 | return handle(layout_descriptor, map->GetIsolate()); |
| 94 | } |
| 95 | |
| 96 | |
| 97 | Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity( |
| 98 | Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor, |
| 99 | int new_capacity) { |
| 100 | int old_capacity = layout_descriptor->capacity(); |
| 101 | if (new_capacity <= old_capacity) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 102 | return layout_descriptor; |
| 103 | } |
| 104 | Handle<LayoutDescriptor> new_layout_descriptor = |
| 105 | LayoutDescriptor::New(isolate, new_capacity); |
| 106 | DCHECK(new_layout_descriptor->IsSlowLayout()); |
| 107 | |
| 108 | if (layout_descriptor->IsSlowLayout()) { |
| 109 | memcpy(new_layout_descriptor->DataPtr(), layout_descriptor->DataPtr(), |
| 110 | layout_descriptor->DataSize()); |
| 111 | return new_layout_descriptor; |
| 112 | } else { |
| 113 | // Fast layout. |
| 114 | uint32_t value = |
| 115 | static_cast<uint32_t>(Smi::cast(*layout_descriptor)->value()); |
| 116 | new_layout_descriptor->set(0, value); |
| 117 | return new_layout_descriptor; |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | |
| 122 | bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length, |
| 123 | int* out_sequence_length) { |
| 124 | DCHECK(max_sequence_length > 0); |
| 125 | if (IsFastPointerLayout()) { |
| 126 | *out_sequence_length = max_sequence_length; |
| 127 | return true; |
| 128 | } |
| 129 | |
| 130 | int layout_word_index; |
| 131 | int layout_bit_index; |
| 132 | |
| 133 | if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) { |
| 134 | // Out of bounds queries are considered tagged. |
| 135 | *out_sequence_length = max_sequence_length; |
| 136 | return true; |
| 137 | } |
| 138 | uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index; |
| 139 | |
| 140 | uint32_t value = IsSlowLayout() |
| 141 | ? get_scalar(layout_word_index) |
| 142 | : static_cast<uint32_t>(Smi::cast(this)->value()); |
| 143 | |
| 144 | bool is_tagged = (value & layout_mask) == 0; |
| 145 | if (!is_tagged) value = ~value; // Count set bits instead of cleared bits. |
| 146 | value = value & ~(layout_mask - 1); // Clear bits we are not interested in. |
| 147 | int sequence_length = CountTrailingZeros32(value) - layout_bit_index; |
| 148 | |
| 149 | if (layout_bit_index + sequence_length == kNumberOfBits) { |
| 150 | // This is a contiguous sequence till the end of current word, proceed |
| 151 | // counting in the subsequent words. |
| 152 | if (IsSlowLayout()) { |
| 153 | int len = length(); |
| 154 | ++layout_word_index; |
| 155 | for (; layout_word_index < len; layout_word_index++) { |
| 156 | value = get_scalar(layout_word_index); |
| 157 | bool cur_is_tagged = (value & 1) == 0; |
| 158 | if (cur_is_tagged != is_tagged) break; |
| 159 | if (!is_tagged) value = ~value; // Count set bits instead. |
| 160 | int cur_sequence_length = CountTrailingZeros32(value); |
| 161 | sequence_length += cur_sequence_length; |
| 162 | if (sequence_length >= max_sequence_length) break; |
| 163 | if (cur_sequence_length != kNumberOfBits) break; |
| 164 | } |
| 165 | } |
| 166 | if (is_tagged && (field_index + sequence_length == capacity())) { |
| 167 | // The contiguous sequence of tagged fields lasts till the end of the |
| 168 | // layout descriptor which means that all the fields starting from |
| 169 | // field_index are tagged. |
| 170 | sequence_length = std::numeric_limits<int>::max(); |
| 171 | } |
| 172 | } |
| 173 | *out_sequence_length = Min(sequence_length, max_sequence_length); |
| 174 | return is_tagged; |
| 175 | } |
| 176 | |
| 177 | |
| 178 | Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate, |
| 179 | int length) { |
| 180 | return New(isolate, length); |
| 181 | } |
| 182 | |
| 183 | |
| 184 | LayoutDescriptor* LayoutDescriptor::SetTaggedForTesting(int field_index, |
| 185 | bool tagged) { |
| 186 | return SetTagged(field_index, tagged); |
| 187 | } |
| 188 | |
| 189 | |
| 190 | bool LayoutDescriptorHelper::IsTagged( |
| 191 | int offset_in_bytes, int end_offset, |
| 192 | int* out_end_of_contiguous_region_offset) { |
| 193 | DCHECK(IsAligned(offset_in_bytes, kPointerSize)); |
| 194 | DCHECK(IsAligned(end_offset, kPointerSize)); |
| 195 | DCHECK(offset_in_bytes < end_offset); |
| 196 | if (all_fields_tagged_) { |
| 197 | *out_end_of_contiguous_region_offset = end_offset; |
| 198 | DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset); |
| 199 | return true; |
| 200 | } |
| 201 | int max_sequence_length = (end_offset - offset_in_bytes) / kPointerSize; |
| 202 | int field_index = Max(0, (offset_in_bytes - header_size_) / kPointerSize); |
| 203 | int sequence_length; |
| 204 | bool tagged = layout_descriptor_->IsTagged(field_index, max_sequence_length, |
| 205 | &sequence_length); |
| 206 | DCHECK(sequence_length > 0); |
| 207 | if (offset_in_bytes < header_size_) { |
| 208 | // Object headers do not contain non-tagged fields. Check if the contiguous |
| 209 | // region continues after the header. |
| 210 | if (tagged) { |
| 211 | // First field is tagged, calculate end offset from there. |
| 212 | *out_end_of_contiguous_region_offset = |
| 213 | header_size_ + sequence_length * kPointerSize; |
| 214 | |
| 215 | } else { |
| 216 | *out_end_of_contiguous_region_offset = header_size_; |
| 217 | } |
| 218 | DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset); |
| 219 | return true; |
| 220 | } |
| 221 | *out_end_of_contiguous_region_offset = |
| 222 | offset_in_bytes + sequence_length * kPointerSize; |
| 223 | DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset); |
| 224 | return tagged; |
| 225 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 226 | |
| 227 | |
| 228 | LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map, |
| 229 | DescriptorArray* descriptors, |
| 230 | int num_descriptors) { |
| 231 | DisallowHeapAllocation no_allocation; |
| 232 | // Fast mode descriptors are never shared and therefore always fully |
| 233 | // correspond to their map. |
| 234 | if (!IsSlowLayout()) return this; |
| 235 | |
| 236 | int layout_descriptor_length = |
| 237 | CalculateCapacity(map, descriptors, num_descriptors); |
| 238 | // It must not become fast-mode descriptor here, because otherwise it has to |
| 239 | // be fast pointer layout descriptor already but it's is slow mode now. |
| 240 | DCHECK_LT(kSmiValueSize, layout_descriptor_length); |
| 241 | |
| 242 | // Trim, clean and reinitialize this slow-mode layout descriptor. |
| 243 | int array_length = GetSlowModeBackingStoreLength(layout_descriptor_length); |
| 244 | int current_length = length(); |
| 245 | if (current_length != array_length) { |
| 246 | DCHECK_LT(array_length, current_length); |
| 247 | int delta = current_length - array_length; |
| 248 | heap->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(this, delta); |
| 249 | } |
| 250 | memset(DataPtr(), 0, DataSize()); |
| 251 | LayoutDescriptor* layout_descriptor = |
| 252 | Initialize(this, map, descriptors, num_descriptors); |
| 253 | DCHECK_EQ(this, layout_descriptor); |
| 254 | return layout_descriptor; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 255 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 256 | |
| 257 | |
| 258 | bool LayoutDescriptor::IsConsistentWithMap(Map* map, bool check_tail) { |
| 259 | if (FLAG_unbox_double_fields) { |
| 260 | DescriptorArray* descriptors = map->instance_descriptors(); |
| 261 | int nof_descriptors = map->NumberOfOwnDescriptors(); |
| 262 | int last_field_index = 0; |
| 263 | for (int i = 0; i < nof_descriptors; i++) { |
| 264 | PropertyDetails details = descriptors->GetDetails(i); |
| 265 | if (details.location() != kField) continue; |
| 266 | FieldIndex field_index = FieldIndex::ForDescriptor(map, i); |
| 267 | bool tagged_expected = |
| 268 | !field_index.is_inobject() || !details.representation().IsDouble(); |
| 269 | for (int bit = 0; bit < details.field_width_in_words(); bit++) { |
| 270 | bool tagged_actual = IsTagged(details.field_index() + bit); |
| 271 | DCHECK_EQ(tagged_expected, tagged_actual); |
| 272 | if (tagged_actual != tagged_expected) return false; |
| 273 | } |
| 274 | last_field_index = |
| 275 | Max(last_field_index, |
| 276 | details.field_index() + details.field_width_in_words()); |
| 277 | } |
| 278 | if (check_tail) { |
| 279 | int n = capacity(); |
| 280 | for (int i = last_field_index; i < n; i++) { |
| 281 | DCHECK(IsTagged(i)); |
| 282 | } |
| 283 | } |
| 284 | } |
| 285 | return true; |
| 286 | } |
| 287 | } // namespace internal |
| 288 | } // namespace v8 |