blob: 03afcdcda5706726cdba2201a209383a47f0c989 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_STUB_CACHE_H_
6#define V8_STUB_CACHE_H_
7
8#include "src/macro-assembler.h"
9
10namespace v8 {
11namespace internal {
12
13
14// The stub cache is used for megamorphic property accesses.
15// It maps (map, name, type) to property access handlers. The cache does not
16// need explicit invalidation when a prototype chain is modified, since the
17// handlers verify the chain.
18
19
20class SCTableReference {
21 public:
22 Address address() const { return address_; }
23
24 private:
25 explicit SCTableReference(Address address) : address_(address) {}
26
27 Address address_;
28
29 friend class StubCache;
30};
31
32
33class StubCache {
34 public:
35 struct Entry {
36 Name* key;
37 Code* value;
38 Map* map;
39 };
40
41 void Initialize();
42 // Access cache for entry hash(name, map).
43 Code* Set(Name* name, Map* map, Code* code);
44 Code* Get(Name* name, Map* map, Code::Flags flags);
45 // Clear the lookup table (@ mark compact collection).
46 void Clear();
47 // Collect all maps that match the name and flags.
48 void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
49 Code::Flags flags, Handle<Context> native_context,
50 Zone* zone);
51 // Generate code for probing the stub cache table.
52 // Arguments extra, extra2 and extra3 may be used to pass additional scratch
53 // registers. Set to no_reg if not needed.
54 // If leave_frame is true, then exit a frame before the tail call.
Emily Bernierd0a1eb72015-03-24 16:35:39 -040055 void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000056 Code::Flags flags, Register receiver, Register name,
57 Register scratch, Register extra, Register extra2 = no_reg,
58 Register extra3 = no_reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000059
60 enum Table { kPrimary, kSecondary };
61
62 SCTableReference key_reference(StubCache::Table table) {
63 return SCTableReference(
64 reinterpret_cast<Address>(&first_entry(table)->key));
65 }
66
67 SCTableReference map_reference(StubCache::Table table) {
68 return SCTableReference(
69 reinterpret_cast<Address>(&first_entry(table)->map));
70 }
71
72 SCTableReference value_reference(StubCache::Table table) {
73 return SCTableReference(
74 reinterpret_cast<Address>(&first_entry(table)->value));
75 }
76
77 StubCache::Entry* first_entry(StubCache::Table table) {
78 switch (table) {
79 case StubCache::kPrimary:
80 return StubCache::primary_;
81 case StubCache::kSecondary:
82 return StubCache::secondary_;
83 }
84 UNREACHABLE();
85 return NULL;
86 }
87
88 Isolate* isolate() { return isolate_; }
89
90 // Setting the entry size such that the index is shifted by Name::kHashShift
91 // is convenient; shifting down the length field (to extract the hash code)
92 // automatically discards the hash bit field.
93 static const int kCacheIndexShift = Name::kHashShift;
94
Ben Murdoch61f157c2016-09-16 13:49:30 +010095 static const int kPrimaryTableBits = 11;
96 static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
97 static const int kSecondaryTableBits = 9;
98 static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
99
100 static int PrimaryOffsetForTesting(Name* name, Code::Flags flags, Map* map) {
101 return PrimaryOffset(name, flags, map);
102 }
103
104 static int SecondaryOffsetForTesting(Name* name, Code::Flags flags,
105 int seed) {
106 return SecondaryOffset(name, flags, seed);
107 }
108
109 // The constructor is made public only for the purposes of testing.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000110 explicit StubCache(Isolate* isolate);
111
Ben Murdoch61f157c2016-09-16 13:49:30 +0100112 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 // The stub cache has a primary and secondary level. The two levels have
114 // different hashing algorithms in order to avoid simultaneous collisions
115 // in both caches. Unlike a probing strategy (quadratic or otherwise) the
116 // update strategy on updates is fairly clear and simple: Any existing entry
117 // in the primary cache is moved to the secondary cache, and secondary cache
118 // entries are overwritten.
119
120 // Hash algorithm for the primary table. This algorithm is replicated in
121 // assembler for every architecture. Returns an index into the table that
122 // is scaled by 1 << kCacheIndexShift.
123 static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
124 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
125 // Compute the hash of the name (use entire hash field).
126 DCHECK(name->HasHashCode());
127 uint32_t field = name->hash_field();
128 // Using only the low bits in 64-bit mode is unlikely to increase the
129 // risk of collision even if the heap is spread over an area larger than
130 // 4Gb (and not at all if it isn't).
131 uint32_t map_low32bits =
132 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
133 // We always set the in_loop bit to zero when generating the lookup code
134 // so do it here too so the hash codes match.
135 uint32_t iflags =
136 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
137 // Base the offset on a simple combination of name, flags, and map.
138 uint32_t key = (map_low32bits + field) ^ iflags;
139 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
140 }
141
142 // Hash algorithm for the secondary table. This algorithm is replicated in
143 // assembler for every architecture. Returns an index into the table that
144 // is scaled by 1 << kCacheIndexShift.
145 static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
146 // Use the seed from the primary cache in the secondary cache.
147 uint32_t name_low32bits =
148 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
149 // We always set the in_loop bit to zero when generating the lookup code
150 // so do it here too so the hash codes match.
151 uint32_t iflags =
152 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
153 uint32_t key = (seed - name_low32bits) + iflags;
154 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
155 }
156
157 // Compute the entry for a given offset in exactly the same way as
158 // we do in generated code. We generate an hash code that already
159 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
160 // of sizeof(Entry). This makes it easier to avoid making mistakes
161 // in the hashed offset computations.
162 static Entry* entry(Entry* table, int offset) {
163 const int multiplier = sizeof(*table) >> Name::kHashShift;
164 return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
165 offset * multiplier);
166 }
167
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000168 private:
169 Entry primary_[kPrimaryTableSize];
170 Entry secondary_[kSecondaryTableSize];
171 Isolate* isolate_;
172
173 friend class Isolate;
174 friend class SCTableReference;
175
176 DISALLOW_COPY_AND_ASSIGN(StubCache);
177};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000178} // namespace internal
179} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000180
181#endif // V8_STUB_CACHE_H_