blob: 064e5a82c0e919f867ce01666a1bb47067bbba71 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "test/cctest/cctest.h"
6#include "test/cctest/heap/heap-tester.h"
7#include "test/cctest/heap/utils-inl.h"
8
9namespace v8 {
10namespace internal {
11
12static void CheckInvariantsOfAbortedPage(Page* page) {
13 // Check invariants:
14 // 1) Markbits are cleared
15 // 2) The page is not marked as evacuation candidate anymore
16 // 3) The page is not marked as aborted compaction anymore.
17 CHECK(page->markbits()->IsClean());
18 CHECK(!page->IsEvacuationCandidate());
19 CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
20}
21
22
23HEAP_TEST(CompactionFullAbortedPage) {
24 // Test the scenario where we reach OOM during compaction and the whole page
25 // is aborted.
26
27 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
28 // we can reach the state of a half aborted page.
29 FLAG_concurrent_sweeping = false;
30 FLAG_manual_evacuation_candidates_selection = true;
31 CcTest::InitializeVM();
32 Isolate* isolate = CcTest::i_isolate();
33 Heap* heap = isolate->heap();
34 {
35 HandleScope scope1(isolate);
36 PageIterator it(heap->old_space());
37 while (it.has_next()) {
38 it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
39 }
40
41 {
42 HandleScope scope2(isolate);
43 CHECK(heap->old_space()->Expand());
44 auto compaction_page_handles =
45 CreatePadding(heap, Page::kAllocatableMemory, TENURED);
46 Page* to_be_aborted_page =
47 Page::FromAddress(compaction_page_handles.front()->address());
48 to_be_aborted_page->SetFlag(
49 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
50
51 heap->set_force_oom(true);
52 heap->CollectAllGarbage();
53
54 // Check that all handles still point to the same page, i.e., compaction
55 // has been aborted on the page.
56 for (Handle<FixedArray> object : compaction_page_handles) {
57 CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
58 }
59 CheckInvariantsOfAbortedPage(to_be_aborted_page);
60 }
61 }
62}
63
64
65HEAP_TEST(CompactionPartiallyAbortedPage) {
66 // Test the scenario where we reach OOM during compaction and parts of the
67 // page have already been migrated to a new one.
68
69 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
70 // we can reach the state of a half aborted page.
71 FLAG_concurrent_sweeping = false;
72 FLAG_manual_evacuation_candidates_selection = true;
73
74 const int object_size = 128 * KB;
75
76 CcTest::InitializeVM();
77 Isolate* isolate = CcTest::i_isolate();
78 Heap* heap = isolate->heap();
79 {
80 HandleScope scope1(isolate);
81 PageIterator it(heap->old_space());
82 while (it.has_next()) {
83 it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
84 }
85
86 {
87 HandleScope scope2(isolate);
88 // Fill another page with objects of size {object_size} (last one is
89 // properly adjusted).
90 CHECK(heap->old_space()->Expand());
91 auto compaction_page_handles =
92 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
93 Page* to_be_aborted_page =
94 Page::FromAddress(compaction_page_handles.front()->address());
95 to_be_aborted_page->SetFlag(
96 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
97
98 {
99 // Add another page that is filled with {num_objects} objects of size
100 // {object_size}.
101 HandleScope scope3(isolate);
102 CHECK(heap->old_space()->Expand());
103 const int num_objects = 3;
104 std::vector<Handle<FixedArray>> page_to_fill_handles = CreatePadding(
105 heap, object_size * num_objects, TENURED, object_size);
106 Page* page_to_fill =
107 Page::FromAddress(page_to_fill_handles.front()->address());
108
109 heap->set_force_oom(true);
110 heap->CollectAllGarbage();
111
112 bool migration_aborted = false;
113 for (Handle<FixedArray> object : compaction_page_handles) {
114 // Once compaction has been aborted, all following objects still have
115 // to be on the initial page.
116 CHECK(!migration_aborted ||
117 (Page::FromAddress(object->address()) == to_be_aborted_page));
118 if (Page::FromAddress(object->address()) == to_be_aborted_page) {
119 // This object has not been migrated.
120 migration_aborted = true;
121 } else {
122 CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
123 }
124 }
125 // Check that we actually created a scenario with a partially aborted
126 // page.
127 CHECK(migration_aborted);
128 CheckInvariantsOfAbortedPage(to_be_aborted_page);
129 }
130 }
131 }
132}
133
134
135HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
136 // Test the scenario where we reach OOM during compaction and parts of the
137 // page have already been migrated to a new one. Objects on the aborted page
138 // are linked together. This test makes sure that intra-aborted page pointers
139 // get properly updated.
140
141 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
142 // we can reach the state of a half aborted page.
143 FLAG_concurrent_sweeping = false;
144 FLAG_manual_evacuation_candidates_selection = true;
145
146 const int object_size = 128 * KB;
147
148 CcTest::InitializeVM();
149 Isolate* isolate = CcTest::i_isolate();
150 Heap* heap = isolate->heap();
151 {
152 HandleScope scope1(isolate);
153 Handle<FixedArray> root_array =
154 isolate->factory()->NewFixedArray(10, TENURED);
155
156 PageIterator it(heap->old_space());
157 while (it.has_next()) {
158 it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
159 }
160
161 Page* to_be_aborted_page = nullptr;
162 {
163 HandleScope temporary_scope(isolate);
164 // Fill a fresh page with objects of size {object_size} (last one is
165 // properly adjusted).
166 CHECK(heap->old_space()->Expand());
167 std::vector<Handle<FixedArray>> compaction_page_handles =
168 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
169 to_be_aborted_page =
170 Page::FromAddress(compaction_page_handles.front()->address());
171 to_be_aborted_page->SetFlag(
172 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
173 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
174 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
175 }
176 root_array->set(0, *compaction_page_handles.back());
177 }
178
179 {
180 // Add another page that is filled with {num_objects} objects of size
181 // {object_size}.
182 HandleScope scope3(isolate);
183 CHECK(heap->old_space()->Expand());
184 const int num_objects = 2;
185 int used_memory = object_size * num_objects;
186 std::vector<Handle<FixedArray>> page_to_fill_handles =
187 CreatePadding(heap, used_memory, TENURED, object_size);
188 Page* page_to_fill =
189 Page::FromAddress(page_to_fill_handles.front()->address());
190
191 heap->set_force_oom(true);
192 heap->CollectAllGarbage();
193
194 // The following check makes sure that we compacted "some" objects, while
195 // leaving others in place.
196 bool in_place = true;
197 Handle<FixedArray> current = root_array;
198 while (current->get(0) != heap->undefined_value()) {
199 current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
200 CHECK(current->IsFixedArray());
201 if (Page::FromAddress(current->address()) != to_be_aborted_page) {
202 in_place = false;
203 }
204 bool on_aborted_page =
205 Page::FromAddress(current->address()) == to_be_aborted_page;
206 bool on_fill_page =
207 Page::FromAddress(current->address()) == page_to_fill;
208 CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
209 }
210 // Check that we at least migrated one object, as otherwise the test would
211 // not trigger.
212 CHECK(!in_place);
213 CheckInvariantsOfAbortedPage(to_be_aborted_page);
214 }
215 }
216}
217
218
219HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
220 // Test the scenario where we reach OOM during compaction and parts of the
221 // page have already been migrated to a new one. Objects on the aborted page
222 // are linked together and the very first object on the aborted page points
223 // into new space. The test verifies that the store buffer entries are
224 // properly cleared and rebuilt after aborting a page. Failing to do so can
225 // result in other objects being allocated in the free space where their
226 // payload looks like a valid new space pointer.
227
228 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
229 // we can reach the state of a half aborted page.
230 FLAG_concurrent_sweeping = false;
231 FLAG_manual_evacuation_candidates_selection = true;
232
233 const int object_size = 128 * KB;
234
235 CcTest::InitializeVM();
236 Isolate* isolate = CcTest::i_isolate();
237 Heap* heap = isolate->heap();
238 {
239 HandleScope scope1(isolate);
240 Handle<FixedArray> root_array =
241 isolate->factory()->NewFixedArray(10, TENURED);
242 PageIterator it(heap->old_space());
243 while (it.has_next()) {
244 it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
245 }
246
247 Page* to_be_aborted_page = nullptr;
248 {
249 HandleScope temporary_scope(isolate);
250 // Fill another page with objects of size {object_size} (last one is
251 // properly adjusted).
252 CHECK(heap->old_space()->Expand());
253 auto compaction_page_handles =
254 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
255 // Sanity check that we have enough space for linking up arrays.
256 CHECK_GE(compaction_page_handles.front()->length(), 2);
257 to_be_aborted_page =
258 Page::FromAddress(compaction_page_handles.front()->address());
259 to_be_aborted_page->SetFlag(
260 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
261
262 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
263 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
264 }
265 root_array->set(0, *compaction_page_handles.back());
266 Handle<FixedArray> new_space_array =
267 isolate->factory()->NewFixedArray(1, NOT_TENURED);
268 CHECK(heap->InNewSpace(*new_space_array));
269 compaction_page_handles.front()->set(1, *new_space_array);
270 }
271
272 {
273 // Add another page that is filled with {num_objects} objects of size
274 // {object_size}.
275 HandleScope scope3(isolate);
276 CHECK(heap->old_space()->Expand());
277 const int num_objects = 2;
278 int used_memory = object_size * num_objects;
279 std::vector<Handle<FixedArray>> page_to_fill_handles =
280 CreatePadding(heap, used_memory, TENURED, object_size);
281 Page* page_to_fill =
282 Page::FromAddress(page_to_fill_handles.front()->address());
283
284 heap->set_force_oom(true);
285 heap->CollectAllGarbage();
286
287 // The following check makes sure that we compacted "some" objects, while
288 // leaving others in place.
289 bool in_place = true;
290 Handle<FixedArray> current = root_array;
291 while (current->get(0) != heap->undefined_value()) {
292 current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
293 CHECK(!heap->InNewSpace(*current));
294 CHECK(current->IsFixedArray());
295 if (Page::FromAddress(current->address()) != to_be_aborted_page) {
296 in_place = false;
297 }
298 bool on_aborted_page =
299 Page::FromAddress(current->address()) == to_be_aborted_page;
300 bool on_fill_page =
301 Page::FromAddress(current->address()) == page_to_fill;
302 CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
303 }
304 // Check that we at least migrated one object, as otherwise the test would
305 // not trigger.
306 CHECK(!in_place);
307 CheckInvariantsOfAbortedPage(to_be_aborted_page);
308
309 // Allocate a new object in new space.
310 Handle<FixedArray> holder =
311 isolate->factory()->NewFixedArray(10, NOT_TENURED);
312 // Create a broken address that looks like a tagged pointer to a new space
313 // object.
314 Address broken_address = holder->address() + 2 * kPointerSize + 1;
315 // Convert it to a vector to create a string from it.
316 Vector<const uint8_t> string_to_broken_addresss(
317 reinterpret_cast<const uint8_t*>(&broken_address), 8);
318
319 Handle<String> string;
320 do {
321 // We know that the interesting slot will be on the aborted page and
322 // hence we allocate until we get our string on the aborted page.
323 // We used slot 1 in the fixed size array which corresponds to the
324 // the first word in the string. Since the first object definitely
325 // migrated we can just allocate until we hit the aborted page.
326 string = isolate->factory()
327 ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
328 .ToHandleChecked();
329 } while (Page::FromAddress(string->address()) != to_be_aborted_page);
330
331 // If store buffer entries are not properly filtered/reset for aborted
332 // pages we have now a broken address at an object slot in old space and
333 // the following scavenge will crash.
334 heap->CollectGarbage(NEW_SPACE);
335 }
336 }
337}
338
339} // namespace internal
340} // namespace v8