blob: b787780d25dd7bb8c7a3840d238a35b307882c31 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "test/cctest/cctest.h"
6#include "test/cctest/heap/heap-tester.h"
7#include "test/cctest/heap/utils-inl.h"
8
9namespace v8 {
10namespace internal {
11
12static void CheckInvariantsOfAbortedPage(Page* page) {
13 // Check invariants:
14 // 1) Markbits are cleared
15 // 2) The page is not marked as evacuation candidate anymore
16 // 3) The page is not marked as aborted compaction anymore.
17 CHECK(page->markbits()->IsClean());
18 CHECK(!page->IsEvacuationCandidate());
19 CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
20}
21
22
23HEAP_TEST(CompactionFullAbortedPage) {
24 // Test the scenario where we reach OOM during compaction and the whole page
25 // is aborted.
26
27 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
28 // we can reach the state of a half aborted page.
29 FLAG_concurrent_sweeping = false;
30 FLAG_manual_evacuation_candidates_selection = true;
31 CcTest::InitializeVM();
32 Isolate* isolate = CcTest::i_isolate();
33 Heap* heap = isolate->heap();
34 {
35 HandleScope scope1(isolate);
36 PageIterator it(heap->old_space());
37 while (it.has_next()) {
Ben Murdochda12d292016-06-02 14:46:10 +010038 it.next()->MarkNeverAllocateForTesting();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000039 }
40
41 {
42 HandleScope scope2(isolate);
43 CHECK(heap->old_space()->Expand());
44 auto compaction_page_handles =
45 CreatePadding(heap, Page::kAllocatableMemory, TENURED);
46 Page* to_be_aborted_page =
47 Page::FromAddress(compaction_page_handles.front()->address());
48 to_be_aborted_page->SetFlag(
49 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
50
51 heap->set_force_oom(true);
52 heap->CollectAllGarbage();
Ben Murdochc5610432016-08-08 18:44:38 +010053 heap->mark_compact_collector()->EnsureSweepingCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000054
55 // Check that all handles still point to the same page, i.e., compaction
56 // has been aborted on the page.
57 for (Handle<FixedArray> object : compaction_page_handles) {
58 CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
59 }
60 CheckInvariantsOfAbortedPage(to_be_aborted_page);
61 }
62 }
63}
64
65
66HEAP_TEST(CompactionPartiallyAbortedPage) {
67 // Test the scenario where we reach OOM during compaction and parts of the
68 // page have already been migrated to a new one.
69
70 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
71 // we can reach the state of a half aborted page.
72 FLAG_concurrent_sweeping = false;
73 FLAG_manual_evacuation_candidates_selection = true;
74
75 const int object_size = 128 * KB;
76
77 CcTest::InitializeVM();
78 Isolate* isolate = CcTest::i_isolate();
79 Heap* heap = isolate->heap();
80 {
81 HandleScope scope1(isolate);
82 PageIterator it(heap->old_space());
83 while (it.has_next()) {
Ben Murdochda12d292016-06-02 14:46:10 +010084 it.next()->MarkNeverAllocateForTesting();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000085 }
86
87 {
88 HandleScope scope2(isolate);
89 // Fill another page with objects of size {object_size} (last one is
90 // properly adjusted).
91 CHECK(heap->old_space()->Expand());
92 auto compaction_page_handles =
93 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
94 Page* to_be_aborted_page =
95 Page::FromAddress(compaction_page_handles.front()->address());
96 to_be_aborted_page->SetFlag(
97 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
98
99 {
100 // Add another page that is filled with {num_objects} objects of size
101 // {object_size}.
102 HandleScope scope3(isolate);
103 CHECK(heap->old_space()->Expand());
104 const int num_objects = 3;
105 std::vector<Handle<FixedArray>> page_to_fill_handles = CreatePadding(
106 heap, object_size * num_objects, TENURED, object_size);
107 Page* page_to_fill =
108 Page::FromAddress(page_to_fill_handles.front()->address());
109
110 heap->set_force_oom(true);
111 heap->CollectAllGarbage();
Ben Murdochc5610432016-08-08 18:44:38 +0100112 heap->mark_compact_collector()->EnsureSweepingCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000113
114 bool migration_aborted = false;
115 for (Handle<FixedArray> object : compaction_page_handles) {
116 // Once compaction has been aborted, all following objects still have
117 // to be on the initial page.
118 CHECK(!migration_aborted ||
119 (Page::FromAddress(object->address()) == to_be_aborted_page));
120 if (Page::FromAddress(object->address()) == to_be_aborted_page) {
121 // This object has not been migrated.
122 migration_aborted = true;
123 } else {
124 CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
125 }
126 }
127 // Check that we actually created a scenario with a partially aborted
128 // page.
129 CHECK(migration_aborted);
130 CheckInvariantsOfAbortedPage(to_be_aborted_page);
131 }
132 }
133 }
134}
135
136
137HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
138 // Test the scenario where we reach OOM during compaction and parts of the
139 // page have already been migrated to a new one. Objects on the aborted page
140 // are linked together. This test makes sure that intra-aborted page pointers
141 // get properly updated.
142
143 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
144 // we can reach the state of a half aborted page.
145 FLAG_concurrent_sweeping = false;
146 FLAG_manual_evacuation_candidates_selection = true;
147
148 const int object_size = 128 * KB;
149
150 CcTest::InitializeVM();
151 Isolate* isolate = CcTest::i_isolate();
152 Heap* heap = isolate->heap();
153 {
154 HandleScope scope1(isolate);
155 Handle<FixedArray> root_array =
156 isolate->factory()->NewFixedArray(10, TENURED);
157
158 PageIterator it(heap->old_space());
159 while (it.has_next()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100160 it.next()->MarkNeverAllocateForTesting();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000161 }
162
163 Page* to_be_aborted_page = nullptr;
164 {
165 HandleScope temporary_scope(isolate);
166 // Fill a fresh page with objects of size {object_size} (last one is
167 // properly adjusted).
168 CHECK(heap->old_space()->Expand());
169 std::vector<Handle<FixedArray>> compaction_page_handles =
170 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
171 to_be_aborted_page =
172 Page::FromAddress(compaction_page_handles.front()->address());
173 to_be_aborted_page->SetFlag(
174 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
175 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
176 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
177 }
178 root_array->set(0, *compaction_page_handles.back());
179 }
180
181 {
182 // Add another page that is filled with {num_objects} objects of size
183 // {object_size}.
184 HandleScope scope3(isolate);
185 CHECK(heap->old_space()->Expand());
186 const int num_objects = 2;
187 int used_memory = object_size * num_objects;
188 std::vector<Handle<FixedArray>> page_to_fill_handles =
189 CreatePadding(heap, used_memory, TENURED, object_size);
190 Page* page_to_fill =
191 Page::FromAddress(page_to_fill_handles.front()->address());
192
193 heap->set_force_oom(true);
194 heap->CollectAllGarbage();
Ben Murdochc5610432016-08-08 18:44:38 +0100195 heap->mark_compact_collector()->EnsureSweepingCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000196
197 // The following check makes sure that we compacted "some" objects, while
198 // leaving others in place.
199 bool in_place = true;
200 Handle<FixedArray> current = root_array;
201 while (current->get(0) != heap->undefined_value()) {
202 current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
203 CHECK(current->IsFixedArray());
204 if (Page::FromAddress(current->address()) != to_be_aborted_page) {
205 in_place = false;
206 }
207 bool on_aborted_page =
208 Page::FromAddress(current->address()) == to_be_aborted_page;
209 bool on_fill_page =
210 Page::FromAddress(current->address()) == page_to_fill;
211 CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
212 }
213 // Check that we at least migrated one object, as otherwise the test would
214 // not trigger.
215 CHECK(!in_place);
216 CheckInvariantsOfAbortedPage(to_be_aborted_page);
217 }
218 }
219}
220
221
222HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
223 // Test the scenario where we reach OOM during compaction and parts of the
224 // page have already been migrated to a new one. Objects on the aborted page
225 // are linked together and the very first object on the aborted page points
226 // into new space. The test verifies that the store buffer entries are
227 // properly cleared and rebuilt after aborting a page. Failing to do so can
228 // result in other objects being allocated in the free space where their
229 // payload looks like a valid new space pointer.
230
231 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
232 // we can reach the state of a half aborted page.
233 FLAG_concurrent_sweeping = false;
234 FLAG_manual_evacuation_candidates_selection = true;
235
236 const int object_size = 128 * KB;
237
238 CcTest::InitializeVM();
239 Isolate* isolate = CcTest::i_isolate();
240 Heap* heap = isolate->heap();
241 {
242 HandleScope scope1(isolate);
243 Handle<FixedArray> root_array =
244 isolate->factory()->NewFixedArray(10, TENURED);
245 PageIterator it(heap->old_space());
246 while (it.has_next()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100247 it.next()->MarkNeverAllocateForTesting();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000248 }
249
250 Page* to_be_aborted_page = nullptr;
251 {
252 HandleScope temporary_scope(isolate);
253 // Fill another page with objects of size {object_size} (last one is
254 // properly adjusted).
255 CHECK(heap->old_space()->Expand());
256 auto compaction_page_handles =
257 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
258 // Sanity check that we have enough space for linking up arrays.
259 CHECK_GE(compaction_page_handles.front()->length(), 2);
260 to_be_aborted_page =
261 Page::FromAddress(compaction_page_handles.front()->address());
262 to_be_aborted_page->SetFlag(
263 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
264
265 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
266 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
267 }
268 root_array->set(0, *compaction_page_handles.back());
269 Handle<FixedArray> new_space_array =
270 isolate->factory()->NewFixedArray(1, NOT_TENURED);
271 CHECK(heap->InNewSpace(*new_space_array));
272 compaction_page_handles.front()->set(1, *new_space_array);
273 }
274
275 {
276 // Add another page that is filled with {num_objects} objects of size
277 // {object_size}.
278 HandleScope scope3(isolate);
279 CHECK(heap->old_space()->Expand());
280 const int num_objects = 2;
281 int used_memory = object_size * num_objects;
282 std::vector<Handle<FixedArray>> page_to_fill_handles =
283 CreatePadding(heap, used_memory, TENURED, object_size);
284 Page* page_to_fill =
285 Page::FromAddress(page_to_fill_handles.front()->address());
286
287 heap->set_force_oom(true);
288 heap->CollectAllGarbage();
Ben Murdochc5610432016-08-08 18:44:38 +0100289 heap->mark_compact_collector()->EnsureSweepingCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000290
291 // The following check makes sure that we compacted "some" objects, while
292 // leaving others in place.
293 bool in_place = true;
294 Handle<FixedArray> current = root_array;
295 while (current->get(0) != heap->undefined_value()) {
296 current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
297 CHECK(!heap->InNewSpace(*current));
298 CHECK(current->IsFixedArray());
299 if (Page::FromAddress(current->address()) != to_be_aborted_page) {
300 in_place = false;
301 }
302 bool on_aborted_page =
303 Page::FromAddress(current->address()) == to_be_aborted_page;
304 bool on_fill_page =
305 Page::FromAddress(current->address()) == page_to_fill;
306 CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
307 }
308 // Check that we at least migrated one object, as otherwise the test would
309 // not trigger.
310 CHECK(!in_place);
311 CheckInvariantsOfAbortedPage(to_be_aborted_page);
312
313 // Allocate a new object in new space.
314 Handle<FixedArray> holder =
315 isolate->factory()->NewFixedArray(10, NOT_TENURED);
316 // Create a broken address that looks like a tagged pointer to a new space
317 // object.
318 Address broken_address = holder->address() + 2 * kPointerSize + 1;
319 // Convert it to a vector to create a string from it.
320 Vector<const uint8_t> string_to_broken_addresss(
321 reinterpret_cast<const uint8_t*>(&broken_address), 8);
322
323 Handle<String> string;
324 do {
325 // We know that the interesting slot will be on the aborted page and
326 // hence we allocate until we get our string on the aborted page.
327 // We used slot 1 in the fixed size array which corresponds to the
328 // the first word in the string. Since the first object definitely
329 // migrated we can just allocate until we hit the aborted page.
330 string = isolate->factory()
331 ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
332 .ToHandleChecked();
333 } while (Page::FromAddress(string->address()) != to_be_aborted_page);
334
335 // If store buffer entries are not properly filtered/reset for aborted
336 // pages we have now a broken address at an object slot in old space and
337 // the following scavenge will crash.
338 heap->CollectGarbage(NEW_SPACE);
339 }
340 }
341}
342
343} // namespace internal
344} // namespace v8