Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 1 | // Copyright 2020 The Pigweed Authors |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not |
| 4 | // use this file except in compliance with the License. You may obtain a copy of |
| 5 | // the License at |
| 6 | // |
| 7 | // https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 11 | // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 12 | // License for the specific language governing permissions and limitations under |
| 13 | // the License. |
| 14 | |
Wyatt Hepler | ae222dc | 2020-10-14 10:46:27 -0700 | [diff] [blame] | 15 | #define PW_LOG_MODULE_NAME "KVS" |
| 16 | #define PW_LOG_LEVEL PW_KVS_LOG_LEVEL |
Keir Mierle | d28068e | 2020-08-14 16:41:44 -0700 | [diff] [blame] | 17 | #define PW_LOG_USE_ULTRA_SHORT_NAMES 1 |
| 18 | |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 19 | #include "pw_kvs/internal/sectors.h" |
| 20 | |
Wyatt Hepler | ae222dc | 2020-10-14 10:46:27 -0700 | [diff] [blame] | 21 | #include "pw_kvs_private/config.h" |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 22 | #include "pw_log/log.h" |
| 23 | |
| 24 | namespace pw::kvs::internal { |
| 25 | namespace { |
| 26 | |
| 27 | // Returns true if the container conatins the value. |
| 28 | // TODO: At some point move this to pw_containers, along with adding tests. |
| 29 | template <typename Container, typename T> |
| 30 | bool Contains(const Container& container, const T& value) { |
| 31 | return std::find(std::begin(container), std::end(container), value) != |
| 32 | std::end(container); |
| 33 | } |
| 34 | |
| 35 | } // namespace |
| 36 | |
| 37 | Status Sectors::Find(FindMode find_mode, |
| 38 | SectorDescriptor** found_sector, |
| 39 | size_t size, |
Wyatt Hepler | e2cbadf | 2020-06-22 11:21:45 -0700 | [diff] [blame] | 40 | std::span<const Address> addresses_to_skip, |
| 41 | std::span<const Address> reserved_addresses) { |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 42 | SectorDescriptor* first_empty_sector = nullptr; |
| 43 | bool at_least_two_empty_sectors = (find_mode == kGarbageCollect); |
| 44 | |
| 45 | // Used for the GC reclaimable bytes check |
| 46 | SectorDescriptor* non_empty_least_reclaimable_sector = nullptr; |
| 47 | const size_t sector_size_bytes = partition_.sector_size_bytes(); |
| 48 | |
| 49 | // Build a list of sectors to avoid. |
| 50 | // |
| 51 | // This is overly strict. reserved_addresses is populated when there are |
| 52 | // sectors reserved for a new entry. It is safe to garbage collect into |
| 53 | // these sectors, as long as there remains room for the pending entry. These |
| 54 | // reserved sectors could also be garbage collected if they have recoverable |
| 55 | // space. For simplicitly, avoid both the relocating key's redundant entries |
| 56 | // (addresses_to_skip) and the sectors reserved for pending writes |
| 57 | // (reserved_addresses). |
| 58 | // TODO(hepler): Look into improving garbage collection. |
| 59 | size_t sectors_to_skip = 0; |
| 60 | for (Address address : addresses_to_skip) { |
| 61 | temp_sectors_to_skip_[sectors_to_skip++] = &FromAddress(address); |
| 62 | } |
| 63 | for (Address address : reserved_addresses) { |
| 64 | temp_sectors_to_skip_[sectors_to_skip++] = &FromAddress(address); |
| 65 | } |
| 66 | |
David Rogers | 9fc78b8 | 2020-06-12 13:56:12 -0700 | [diff] [blame] | 67 | DBG("Find sector with %u bytes available, starting with sector %u, %s", |
| 68 | unsigned(size), |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 69 | Index(last_new_), |
| 70 | (find_mode == kAppendEntry) ? "Append" : "GC"); |
| 71 | for (size_t i = 0; i < sectors_to_skip; ++i) { |
| 72 | DBG(" Skip sector %u", Index(temp_sectors_to_skip_[i])); |
| 73 | } |
| 74 | |
| 75 | // last_new_ is the sector that was last selected as the "new empty sector" to |
| 76 | // write to. This last new sector is used as the starting point for the next |
| 77 | // "find a new empty sector to write to" operation. By using the last new |
| 78 | // sector as the start point we will cycle which empty sector is selected |
| 79 | // next, spreading the wear across all the empty sectors and get a wear |
| 80 | // leveling benefit, rather than putting more wear on the lower number |
| 81 | // sectors. |
| 82 | SectorDescriptor* sector = last_new_; |
| 83 | |
| 84 | // Look for a sector to use with enough space. The search uses a 3 priority |
| 85 | // tier process. |
| 86 | // |
| 87 | // Tier 1 is sector that already has valid data. During GC only select a |
| 88 | // sector that has no reclaimable bytes. Immediately use the first matching |
| 89 | // sector that is found. |
| 90 | // |
| 91 | // Tier 2 is find sectors that are empty/erased. While scanning for a partial |
| 92 | // sector, keep track of the first empty sector and if a second empty sector |
| 93 | // was seen. If during GC then count the second empty sector as always seen. |
| 94 | // |
| 95 | // Tier 3 is during garbage collection, find sectors with enough space that |
| 96 | // are not empty but have recoverable bytes. Pick the sector with the least |
| 97 | // recoverable bytes to minimize the likelyhood of this sector needing to be |
| 98 | // garbage collected soon. |
| 99 | for (size_t j = 0; j < descriptors_.size(); j++) { |
| 100 | sector += 1; |
| 101 | if (sector == descriptors_.end()) { |
| 102 | sector = descriptors_.begin(); |
| 103 | } |
| 104 | |
| 105 | // Skip sectors in the skip list. |
Wyatt Hepler | e2cbadf | 2020-06-22 11:21:45 -0700 | [diff] [blame] | 106 | if (Contains(std::span(temp_sectors_to_skip_, sectors_to_skip), sector)) { |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 107 | continue; |
| 108 | } |
| 109 | |
| 110 | if (!sector->Empty(sector_size_bytes) && sector->HasSpace(size)) { |
| 111 | if ((find_mode == kAppendEntry) || |
| 112 | (sector->RecoverableBytes(sector_size_bytes) == 0)) { |
| 113 | *found_sector = sector; |
Wyatt Hepler | 1b3da3a | 2021-01-07 13:26:57 -0800 | [diff] [blame] | 114 | return OkStatus(); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 115 | } else { |
| 116 | if ((non_empty_least_reclaimable_sector == nullptr) || |
| 117 | (non_empty_least_reclaimable_sector->RecoverableBytes( |
| 118 | sector_size_bytes) < |
| 119 | sector->RecoverableBytes(sector_size_bytes))) { |
| 120 | non_empty_least_reclaimable_sector = sector; |
| 121 | } |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | if (sector->Empty(sector_size_bytes)) { |
| 126 | if (first_empty_sector == nullptr) { |
| 127 | first_empty_sector = sector; |
| 128 | } else { |
| 129 | at_least_two_empty_sectors = true; |
| 130 | } |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | // Tier 2 check: If the scan for a partial sector does not find a suitable |
| 135 | // sector, use the first empty sector that was found. Normally it is required |
| 136 | // to keep 1 empty sector after the sector found here, but that rule does not |
| 137 | // apply during GC. |
| 138 | if (first_empty_sector != nullptr && at_least_two_empty_sectors) { |
| 139 | DBG(" Found a usable empty sector; returning the first found (%u)", |
| 140 | Index(first_empty_sector)); |
| 141 | last_new_ = first_empty_sector; |
| 142 | *found_sector = first_empty_sector; |
Wyatt Hepler | 1b3da3a | 2021-01-07 13:26:57 -0800 | [diff] [blame] | 143 | return OkStatus(); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | // Tier 3 check: If we got this far, use the sector with least recoverable |
| 147 | // bytes |
| 148 | if (non_empty_least_reclaimable_sector != nullptr) { |
| 149 | *found_sector = non_empty_least_reclaimable_sector; |
David Rogers | 9fc78b8 | 2020-06-12 13:56:12 -0700 | [diff] [blame] | 150 | DBG(" Found a usable sector %u, with %u B recoverable, in GC", |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 151 | Index(*found_sector), |
David Rogers | 9fc78b8 | 2020-06-12 13:56:12 -0700 | [diff] [blame] | 152 | unsigned((*found_sector)->RecoverableBytes(sector_size_bytes))); |
Wyatt Hepler | 1b3da3a | 2021-01-07 13:26:57 -0800 | [diff] [blame] | 153 | return OkStatus(); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | // No sector was found. |
| 157 | DBG(" Unable to find a usable sector"); |
| 158 | *found_sector = nullptr; |
Wyatt Hepler | d78f7c6 | 2020-09-28 14:27:32 -0700 | [diff] [blame] | 159 | return Status::ResourceExhausted(); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 160 | } |
| 161 | |
Armando Montanez | f9e93e1 | 2020-04-22 07:44:14 -0700 | [diff] [blame] | 162 | SectorDescriptor& Sectors::WearLeveledSectorFromIndex(size_t idx) const { |
| 163 | return descriptors_[(Index(last_new_) + 1 + idx) % descriptors_.size()]; |
| 164 | } |
| 165 | |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 166 | // TODO: Consider breaking this function into smaller sub-chunks. |
| 167 | SectorDescriptor* Sectors::FindSectorToGarbageCollect( |
Wyatt Hepler | e2cbadf | 2020-06-22 11:21:45 -0700 | [diff] [blame] | 168 | std::span<const Address> reserved_addresses) const { |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 169 | const size_t sector_size_bytes = partition_.sector_size_bytes(); |
| 170 | SectorDescriptor* sector_candidate = nullptr; |
| 171 | size_t candidate_bytes = 0; |
| 172 | |
| 173 | // Build a vector of sectors to avoid. |
| 174 | for (size_t i = 0; i < reserved_addresses.size(); ++i) { |
| 175 | temp_sectors_to_skip_[i] = &FromAddress(reserved_addresses[i]); |
| 176 | DBG(" Skip sector %u", Index(reserved_addresses[i])); |
| 177 | } |
Wyatt Hepler | e2cbadf | 2020-06-22 11:21:45 -0700 | [diff] [blame] | 178 | const std::span sectors_to_skip(temp_sectors_to_skip_, |
| 179 | reserved_addresses.size()); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 180 | |
| 181 | // Step 1: Try to find a sectors with stale keys and no valid keys (no |
David Rogers | d5138f3 | 2020-04-28 15:18:56 -0700 | [diff] [blame] | 182 | // relocation needed). Use the first such sector found, as that will help the |
| 183 | // KVS "rotate" around the partition. Initially this would select the sector |
| 184 | // with the most reclaimable space, but that can cause GC sector selection to |
| 185 | // "ping-pong" between two sectors when updating large keys. |
Armando Montanez | f9e93e1 | 2020-04-22 07:44:14 -0700 | [diff] [blame] | 186 | for (size_t i = 0; i < descriptors_.size(); ++i) { |
| 187 | SectorDescriptor& sector = WearLeveledSectorFromIndex(i); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 188 | if ((sector.valid_bytes() == 0) && |
David Rogers | d5138f3 | 2020-04-28 15:18:56 -0700 | [diff] [blame] | 189 | (sector.RecoverableBytes(sector_size_bytes) > 0) && |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 190 | !Contains(sectors_to_skip, §or)) { |
| 191 | sector_candidate = §or; |
David Rogers | d5138f3 | 2020-04-28 15:18:56 -0700 | [diff] [blame] | 192 | break; |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 193 | } |
| 194 | } |
| 195 | |
| 196 | // Step 2: If step 1 yields no sectors, just find the sector with the most |
| 197 | // reclaimable bytes but no addresses to avoid. |
| 198 | if (sector_candidate == nullptr) { |
Armando Montanez | f9e93e1 | 2020-04-22 07:44:14 -0700 | [diff] [blame] | 199 | for (size_t i = 0; i < descriptors_.size(); ++i) { |
| 200 | SectorDescriptor& sector = WearLeveledSectorFromIndex(i); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 201 | if ((sector.RecoverableBytes(sector_size_bytes) > candidate_bytes) && |
| 202 | !Contains(sectors_to_skip, §or)) { |
| 203 | sector_candidate = §or; |
| 204 | candidate_bytes = sector.RecoverableBytes(sector_size_bytes); |
| 205 | } |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | // Step 3: If no sectors with reclaimable bytes, select the sector with the |
| 210 | // most free bytes. This at least will allow entries of existing keys to get |
| 211 | // spread to other sectors, including sectors that already have copies of the |
| 212 | // current key being written. |
| 213 | if (sector_candidate == nullptr) { |
Armando Montanez | f9e93e1 | 2020-04-22 07:44:14 -0700 | [diff] [blame] | 214 | for (size_t i = 0; i < descriptors_.size(); ++i) { |
| 215 | SectorDescriptor& sector = WearLeveledSectorFromIndex(i); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 216 | if ((sector.valid_bytes() > candidate_bytes) && |
| 217 | !Contains(sectors_to_skip, §or)) { |
| 218 | sector_candidate = §or; |
| 219 | candidate_bytes = sector.valid_bytes(); |
| 220 | DBG(" Doing GC on sector with no reclaimable bytes!"); |
| 221 | } |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | if (sector_candidate != nullptr) { |
David Rogers | 9fc78b8 | 2020-06-12 13:56:12 -0700 | [diff] [blame] | 226 | DBG("Found sector %u to Garbage Collect, %u recoverable bytes", |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 227 | Index(sector_candidate), |
David Rogers | 9fc78b8 | 2020-06-12 13:56:12 -0700 | [diff] [blame] | 228 | unsigned(sector_candidate->RecoverableBytes(sector_size_bytes))); |
Wyatt Hepler | c84393f | 2020-03-20 11:23:24 -0700 | [diff] [blame] | 229 | } else { |
| 230 | DBG("Unable to find sector to garbage collect!"); |
| 231 | } |
| 232 | return sector_candidate; |
| 233 | } |
| 234 | |
| 235 | } // namespace pw::kvs::internal |