blob: 67ede37820cede1513b7cde554c03430ed1b22bd [file] [log] [blame]
Wyatt Heplerc84393f2020-03-20 11:23:24 -07001// Copyright 2020 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14
Wyatt Heplerae222dc2020-10-14 10:46:27 -070015#define PW_LOG_MODULE_NAME "KVS"
16#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
Keir Mierled28068e2020-08-14 16:41:44 -070017#define PW_LOG_USE_ULTRA_SHORT_NAMES 1
18
Wyatt Heplerc84393f2020-03-20 11:23:24 -070019#include "pw_kvs/internal/sectors.h"
20
Wyatt Heplerae222dc2020-10-14 10:46:27 -070021#include "pw_kvs_private/config.h"
Wyatt Heplerc84393f2020-03-20 11:23:24 -070022#include "pw_log/log.h"
23
24namespace pw::kvs::internal {
25namespace {
26
27// Returns true if the container conatins the value.
28// TODO: At some point move this to pw_containers, along with adding tests.
29template <typename Container, typename T>
30bool Contains(const Container& container, const T& value) {
31 return std::find(std::begin(container), std::end(container), value) !=
32 std::end(container);
33}
34
35} // namespace
36
37Status Sectors::Find(FindMode find_mode,
38 SectorDescriptor** found_sector,
39 size_t size,
Wyatt Heplere2cbadf2020-06-22 11:21:45 -070040 std::span<const Address> addresses_to_skip,
41 std::span<const Address> reserved_addresses) {
Wyatt Heplerc84393f2020-03-20 11:23:24 -070042 SectorDescriptor* first_empty_sector = nullptr;
43 bool at_least_two_empty_sectors = (find_mode == kGarbageCollect);
44
45 // Used for the GC reclaimable bytes check
46 SectorDescriptor* non_empty_least_reclaimable_sector = nullptr;
47 const size_t sector_size_bytes = partition_.sector_size_bytes();
48
49 // Build a list of sectors to avoid.
50 //
51 // This is overly strict. reserved_addresses is populated when there are
52 // sectors reserved for a new entry. It is safe to garbage collect into
53 // these sectors, as long as there remains room for the pending entry. These
54 // reserved sectors could also be garbage collected if they have recoverable
55 // space. For simplicitly, avoid both the relocating key's redundant entries
56 // (addresses_to_skip) and the sectors reserved for pending writes
57 // (reserved_addresses).
58 // TODO(hepler): Look into improving garbage collection.
59 size_t sectors_to_skip = 0;
60 for (Address address : addresses_to_skip) {
61 temp_sectors_to_skip_[sectors_to_skip++] = &FromAddress(address);
62 }
63 for (Address address : reserved_addresses) {
64 temp_sectors_to_skip_[sectors_to_skip++] = &FromAddress(address);
65 }
66
David Rogers9fc78b82020-06-12 13:56:12 -070067 DBG("Find sector with %u bytes available, starting with sector %u, %s",
68 unsigned(size),
Wyatt Heplerc84393f2020-03-20 11:23:24 -070069 Index(last_new_),
70 (find_mode == kAppendEntry) ? "Append" : "GC");
71 for (size_t i = 0; i < sectors_to_skip; ++i) {
72 DBG(" Skip sector %u", Index(temp_sectors_to_skip_[i]));
73 }
74
75 // last_new_ is the sector that was last selected as the "new empty sector" to
76 // write to. This last new sector is used as the starting point for the next
77 // "find a new empty sector to write to" operation. By using the last new
78 // sector as the start point we will cycle which empty sector is selected
79 // next, spreading the wear across all the empty sectors and get a wear
80 // leveling benefit, rather than putting more wear on the lower number
81 // sectors.
82 SectorDescriptor* sector = last_new_;
83
84 // Look for a sector to use with enough space. The search uses a 3 priority
85 // tier process.
86 //
87 // Tier 1 is sector that already has valid data. During GC only select a
88 // sector that has no reclaimable bytes. Immediately use the first matching
89 // sector that is found.
90 //
91 // Tier 2 is find sectors that are empty/erased. While scanning for a partial
92 // sector, keep track of the first empty sector and if a second empty sector
93 // was seen. If during GC then count the second empty sector as always seen.
94 //
95 // Tier 3 is during garbage collection, find sectors with enough space that
96 // are not empty but have recoverable bytes. Pick the sector with the least
97 // recoverable bytes to minimize the likelyhood of this sector needing to be
98 // garbage collected soon.
99 for (size_t j = 0; j < descriptors_.size(); j++) {
100 sector += 1;
101 if (sector == descriptors_.end()) {
102 sector = descriptors_.begin();
103 }
104
105 // Skip sectors in the skip list.
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700106 if (Contains(std::span(temp_sectors_to_skip_, sectors_to_skip), sector)) {
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700107 continue;
108 }
109
110 if (!sector->Empty(sector_size_bytes) && sector->HasSpace(size)) {
111 if ((find_mode == kAppendEntry) ||
112 (sector->RecoverableBytes(sector_size_bytes) == 0)) {
113 *found_sector = sector;
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800114 return OkStatus();
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700115 } else {
116 if ((non_empty_least_reclaimable_sector == nullptr) ||
117 (non_empty_least_reclaimable_sector->RecoverableBytes(
118 sector_size_bytes) <
119 sector->RecoverableBytes(sector_size_bytes))) {
120 non_empty_least_reclaimable_sector = sector;
121 }
122 }
123 }
124
125 if (sector->Empty(sector_size_bytes)) {
126 if (first_empty_sector == nullptr) {
127 first_empty_sector = sector;
128 } else {
129 at_least_two_empty_sectors = true;
130 }
131 }
132 }
133
134 // Tier 2 check: If the scan for a partial sector does not find a suitable
135 // sector, use the first empty sector that was found. Normally it is required
136 // to keep 1 empty sector after the sector found here, but that rule does not
137 // apply during GC.
138 if (first_empty_sector != nullptr && at_least_two_empty_sectors) {
139 DBG(" Found a usable empty sector; returning the first found (%u)",
140 Index(first_empty_sector));
141 last_new_ = first_empty_sector;
142 *found_sector = first_empty_sector;
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800143 return OkStatus();
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700144 }
145
146 // Tier 3 check: If we got this far, use the sector with least recoverable
147 // bytes
148 if (non_empty_least_reclaimable_sector != nullptr) {
149 *found_sector = non_empty_least_reclaimable_sector;
David Rogers9fc78b82020-06-12 13:56:12 -0700150 DBG(" Found a usable sector %u, with %u B recoverable, in GC",
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700151 Index(*found_sector),
David Rogers9fc78b82020-06-12 13:56:12 -0700152 unsigned((*found_sector)->RecoverableBytes(sector_size_bytes)));
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800153 return OkStatus();
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700154 }
155
156 // No sector was found.
157 DBG(" Unable to find a usable sector");
158 *found_sector = nullptr;
Wyatt Heplerd78f7c62020-09-28 14:27:32 -0700159 return Status::ResourceExhausted();
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700160}
161
Armando Montanezf9e93e12020-04-22 07:44:14 -0700162SectorDescriptor& Sectors::WearLeveledSectorFromIndex(size_t idx) const {
163 return descriptors_[(Index(last_new_) + 1 + idx) % descriptors_.size()];
164}
165
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700166// TODO: Consider breaking this function into smaller sub-chunks.
167SectorDescriptor* Sectors::FindSectorToGarbageCollect(
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700168 std::span<const Address> reserved_addresses) const {
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700169 const size_t sector_size_bytes = partition_.sector_size_bytes();
170 SectorDescriptor* sector_candidate = nullptr;
171 size_t candidate_bytes = 0;
172
173 // Build a vector of sectors to avoid.
174 for (size_t i = 0; i < reserved_addresses.size(); ++i) {
175 temp_sectors_to_skip_[i] = &FromAddress(reserved_addresses[i]);
176 DBG(" Skip sector %u", Index(reserved_addresses[i]));
177 }
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700178 const std::span sectors_to_skip(temp_sectors_to_skip_,
179 reserved_addresses.size());
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700180
181 // Step 1: Try to find a sectors with stale keys and no valid keys (no
David Rogersd5138f32020-04-28 15:18:56 -0700182 // relocation needed). Use the first such sector found, as that will help the
183 // KVS "rotate" around the partition. Initially this would select the sector
184 // with the most reclaimable space, but that can cause GC sector selection to
185 // "ping-pong" between two sectors when updating large keys.
Armando Montanezf9e93e12020-04-22 07:44:14 -0700186 for (size_t i = 0; i < descriptors_.size(); ++i) {
187 SectorDescriptor& sector = WearLeveledSectorFromIndex(i);
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700188 if ((sector.valid_bytes() == 0) &&
David Rogersd5138f32020-04-28 15:18:56 -0700189 (sector.RecoverableBytes(sector_size_bytes) > 0) &&
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700190 !Contains(sectors_to_skip, &sector)) {
191 sector_candidate = &sector;
David Rogersd5138f32020-04-28 15:18:56 -0700192 break;
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700193 }
194 }
195
196 // Step 2: If step 1 yields no sectors, just find the sector with the most
197 // reclaimable bytes but no addresses to avoid.
198 if (sector_candidate == nullptr) {
Armando Montanezf9e93e12020-04-22 07:44:14 -0700199 for (size_t i = 0; i < descriptors_.size(); ++i) {
200 SectorDescriptor& sector = WearLeveledSectorFromIndex(i);
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700201 if ((sector.RecoverableBytes(sector_size_bytes) > candidate_bytes) &&
202 !Contains(sectors_to_skip, &sector)) {
203 sector_candidate = &sector;
204 candidate_bytes = sector.RecoverableBytes(sector_size_bytes);
205 }
206 }
207 }
208
209 // Step 3: If no sectors with reclaimable bytes, select the sector with the
210 // most free bytes. This at least will allow entries of existing keys to get
211 // spread to other sectors, including sectors that already have copies of the
212 // current key being written.
213 if (sector_candidate == nullptr) {
Armando Montanezf9e93e12020-04-22 07:44:14 -0700214 for (size_t i = 0; i < descriptors_.size(); ++i) {
215 SectorDescriptor& sector = WearLeveledSectorFromIndex(i);
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700216 if ((sector.valid_bytes() > candidate_bytes) &&
217 !Contains(sectors_to_skip, &sector)) {
218 sector_candidate = &sector;
219 candidate_bytes = sector.valid_bytes();
220 DBG(" Doing GC on sector with no reclaimable bytes!");
221 }
222 }
223 }
224
225 if (sector_candidate != nullptr) {
David Rogers9fc78b82020-06-12 13:56:12 -0700226 DBG("Found sector %u to Garbage Collect, %u recoverable bytes",
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700227 Index(sector_candidate),
David Rogers9fc78b82020-06-12 13:56:12 -0700228 unsigned(sector_candidate->RecoverableBytes(sector_size_bytes)));
Wyatt Heplerc84393f2020-03-20 11:23:24 -0700229 } else {
230 DBG("Unable to find sector to garbage collect!");
231 }
232 return sector_candidate;
233}
234
235} // namespace pw::kvs::internal