blob: 325ee4fa016e5596383eee979ec4d4c99cfbb8f3 [file] [log] [blame]
Andreas Gampee21dc3d2014-12-08 16:59:43 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "swap_space.h"
18
19#include <algorithm>
20#include <numeric>
21
22#include "base/logging.h"
23#include "base/macros.h"
24#include "base/mutex.h"
25#include "thread-inl.h"
26
27namespace art {
28
29// The chunk size by which the swap file is increased and mapped.
30static constexpr size_t kMininumMapSize = 16 * MB;
31
32static constexpr bool kCheckFreeMaps = false;
33
34template <typename FreeBySizeSet>
35static void DumpFreeMap(const FreeBySizeSet& free_by_size) {
36 size_t last_size = static_cast<size_t>(-1);
37 for (const auto& entry : free_by_size) {
38 if (last_size != entry.first) {
39 last_size = entry.first;
40 LOG(INFO) << "Size " << last_size;
41 }
42 LOG(INFO) << " 0x" << std::hex << entry.second->Start()
43 << " size=" << std::dec << entry.second->size;
44 }
45}
46
47template <typename FreeByStartSet, typename FreeBySizeSet>
48static void RemoveChunk(FreeByStartSet* free_by_start,
49 FreeBySizeSet* free_by_size,
50 typename FreeBySizeSet::const_iterator free_by_size_pos) {
51 auto free_by_start_pos = free_by_size_pos->second;
52 free_by_size->erase(free_by_size_pos);
53 free_by_start->erase(free_by_start_pos);
54}
55
56template <typename FreeByStartSet, typename FreeBySizeSet>
57static void InsertChunk(FreeByStartSet* free_by_start,
58 FreeBySizeSet* free_by_size,
59 const SpaceChunk& chunk) {
60 DCHECK_NE(chunk.size, 0u);
61 auto insert_result = free_by_start->insert(chunk);
62 DCHECK(insert_result.second);
63 free_by_size->emplace(chunk.size, insert_result.first);
64}
65
66SwapSpace::SwapSpace(int fd, size_t initial_size)
67 : fd_(fd),
68 size_(0),
69 lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) {
70 // Assume that the file is unlinked.
71
72 InsertChunk(&free_by_start_, &free_by_size_, NewFileChunk(initial_size));
73}
74
75SwapSpace::~SwapSpace() {
76 // All arenas are backed by the same file. Just close the descriptor.
77 close(fd_);
78}
79
80template <typename FreeByStartSet, typename FreeBySizeSet>
81static size_t CollectFree(const FreeByStartSet& free_by_start, const FreeBySizeSet& free_by_size) {
82 if (free_by_start.size() != free_by_size.size()) {
83 LOG(FATAL) << "Size: " << free_by_start.size() << " vs " << free_by_size.size();
84 }
85
86 // Calculate over free_by_size.
87 size_t sum1 = 0;
88 for (const auto& entry : free_by_size) {
89 sum1 += entry.second->size;
90 }
91
92 // Calculate over free_by_start.
93 size_t sum2 = 0;
94 for (const auto& entry : free_by_start) {
95 sum2 += entry.size;
96 }
97
98 if (sum1 != sum2) {
99 LOG(FATAL) << "Sum: " << sum1 << " vs " << sum2;
100 }
101 return sum1;
102}
103
104void* SwapSpace::Alloc(size_t size) {
105 MutexLock lock(Thread::Current(), lock_);
106 size = RoundUp(size, 8U);
107
108 // Check the free list for something that fits.
109 // TODO: Smarter implementation. Global biggest chunk, ...
110 SpaceChunk old_chunk;
111 auto it = free_by_start_.empty()
112 ? free_by_size_.end()
113 : free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
114 if (it != free_by_size_.end()) {
115 old_chunk = *it->second;
116 RemoveChunk(&free_by_start_, &free_by_size_, it);
117 } else {
118 // Not a big enough free chunk, need to increase file size.
119 old_chunk = NewFileChunk(size);
120 }
121
122 void* ret = old_chunk.ptr;
123
124 if (old_chunk.size != size) {
125 // Insert the remainder.
126 SpaceChunk new_chunk = { old_chunk.ptr + size, old_chunk.size - size };
127 InsertChunk(&free_by_start_, &free_by_size_, new_chunk);
128 }
129
130 return ret;
131}
132
133SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
Andreas Gampebed520b2014-12-19 12:04:06 -0800134#if !defined(__APPLE__)
Andreas Gampee21dc3d2014-12-08 16:59:43 -0800135 size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize));
136 int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
137 if (result != 0) {
138 PLOG(FATAL) << "Unable to increase swap file.";
139 }
140 uint8_t* ptr = reinterpret_cast<uint8_t*>(
141 mmap(nullptr, next_part, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, size_));
142 if (ptr == MAP_FAILED) {
143 LOG(ERROR) << "Unable to mmap new swap file chunk.";
144 LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size;
145 LOG(ERROR) << "Free list:";
146 MutexLock lock(Thread::Current(), lock_);
147 DumpFreeMap(free_by_size_);
148 LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_);
149 LOG(FATAL) << "Aborting...";
150 }
151 size_ += next_part;
152 SpaceChunk new_chunk = {ptr, next_part};
153 maps_.push_back(new_chunk);
154 return new_chunk;
Andreas Gampebed520b2014-12-19 12:04:06 -0800155#else
Andreas Gampeedb157f2014-12-22 13:06:44 -0800156 UNUSED(min_size, kMininumMapSize);
Andreas Gampebed520b2014-12-19 12:04:06 -0800157 LOG(FATAL) << "No swap file support on the Mac.";
Andreas Gampeedb157f2014-12-22 13:06:44 -0800158 UNREACHABLE();
Andreas Gampebed520b2014-12-19 12:04:06 -0800159#endif
Andreas Gampee21dc3d2014-12-08 16:59:43 -0800160}
161
162// TODO: Full coalescing.
163void SwapSpace::Free(void* ptrV, size_t size) {
164 MutexLock lock(Thread::Current(), lock_);
165 size = RoundUp(size, 8U);
166
167 size_t free_before = 0;
168 if (kCheckFreeMaps) {
169 free_before = CollectFree(free_by_start_, free_by_size_);
170 }
171
172 SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptrV), size };
173 auto it = free_by_start_.lower_bound(chunk);
174 if (it != free_by_start_.begin()) {
175 auto prev = it;
176 --prev;
177 CHECK_LE(prev->End(), chunk.Start());
178 if (prev->End() == chunk.Start()) {
179 // Merge *prev with this chunk.
180 chunk.size += prev->size;
181 chunk.ptr -= prev->size;
182 auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev });
183 DCHECK(erase_pos != free_by_size_.end());
184 RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
185 // "prev" is invalidated but "it" remains valid.
186 }
187 }
188 if (it != free_by_start_.end()) {
189 CHECK_LE(chunk.End(), it->Start());
190 if (chunk.End() == it->Start()) {
191 // Merge *it with this chunk.
192 chunk.size += it->size;
193 auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it });
194 DCHECK(erase_pos != free_by_size_.end());
195 RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
196 // "it" is invalidated but we don't need it anymore.
197 }
198 }
199 InsertChunk(&free_by_start_, &free_by_size_, chunk);
200
201 if (kCheckFreeMaps) {
202 size_t free_after = CollectFree(free_by_start_, free_by_size_);
203
204 if (free_after != free_before + size) {
205 DumpFreeMap(free_by_size_);
206 CHECK_EQ(free_after, free_before + size) << "Should be " << size << " difference from " << free_before;
207 }
208 }
209}
210
211} // namespace art