blob: 249425cfdc7dc2045a89e264b09b7b168d521876 [file] [log] [blame]
Wyatt Heplerb7609542020-01-24 10:29:54 -08001// Copyright 2020 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14
Wyatt Heplerb7609542020-01-24 10:29:54 -080015#include "pw_kvs/key_value_store.h"
16
Wyatt Heplerbab0e202020-02-04 07:40:08 -080017#include <algorithm>
Wyatt Heplerb7609542020-01-24 10:29:54 -080018#include <cstring>
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -080019#include <type_traits>
Wyatt Heplerb7609542020-01-24 10:29:54 -080020
Keir Mierle8c352dc2020-02-02 13:58:19 -080021#define PW_LOG_USE_ULTRA_SHORT_NAMES 1
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -080022#include "pw_kvs_private/format.h"
23#include "pw_kvs_private/macros.h"
Keir Mierle8c352dc2020-02-02 13:58:19 -080024#include "pw_log/log.h"
Wyatt Heplerb7609542020-01-24 10:29:54 -080025
Wyatt Hepler2ad60672020-01-21 08:00:16 -080026namespace pw::kvs {
Wyatt Heplerb7609542020-01-24 10:29:54 -080027
Wyatt Hepleracaacf92020-01-24 10:58:30 -080028using std::byte;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -080029using std::string_view;
Wyatt Hepleracaacf92020-01-24 10:58:30 -080030
Wyatt Heplerad0a7932020-02-06 08:20:38 -080031KeyValueStore::KeyValueStore(FlashPartition* partition,
32 const EntryHeaderFormat& format,
33 const Options& options)
34 : partition_(*partition),
35 entry_header_format_(format),
36 options_(options),
37 key_descriptor_list_{},
38 key_descriptor_list_size_(0),
39 sector_map_{},
40 sector_map_size_(partition_.sector_count()),
41 last_new_sector_(sector_map_.data()),
42 working_buffer_{} {}
43
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -080044Status KeyValueStore::Init() {
Wyatt Heplerad0a7932020-02-06 08:20:38 -080045 if (kMaxUsableSectors < sector_map_size_) {
46 CRT("KeyValueStore::kMaxUsableSectors must be at least as large as the "
47 "number of sectors in the flash partition");
48 return Status::FAILED_PRECONDITION;
49 }
50
51 if (kMaxUsableSectors > sector_map_size_) {
52 DBG("KeyValueStore::kMaxUsableSectors is %zu sectors larger than needed",
53 kMaxUsableSectors - sector_map_size_);
54 }
55
Keir Mierle8c352dc2020-02-02 13:58:19 -080056 // Reset the number of occupied key descriptors; we will fill them later.
57 key_descriptor_list_size_ = 0;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -080058
David Rogers8ce55cd2020-02-04 19:41:48 -080059 // TODO: init last_new_sector_ to a random sector. Since the on-flash stored
60 // information does not allow recovering the previous last_new_sector_ after
61 // clean start, random is a good second choice.
62
Keir Mierle8c352dc2020-02-02 13:58:19 -080063 const size_t sector_size_bytes = partition_.sector_size_bytes();
Keir Mierle8c352dc2020-02-02 13:58:19 -080064
David Rogersf0a35442020-02-04 12:16:38 -080065 if (working_buffer_.size() < sector_size_bytes) {
66 CRT("ERROR: working_buffer_ (%zu bytes) is smaller than sector "
67 "size (%zu bytes)",
68 working_buffer_.size(),
69 sector_size_bytes);
70 return Status::INVALID_ARGUMENT;
71 }
72
Keir Mierle8c352dc2020-02-02 13:58:19 -080073 DBG("First pass: Read all entries from all sectors");
Wyatt Heplerad0a7932020-02-06 08:20:38 -080074 for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
Keir Mierle8c352dc2020-02-02 13:58:19 -080075 // Track writable bytes in this sector. Updated after reading each entry.
76 sector_map_[sector_id].tail_free_bytes = sector_size_bytes;
77
78 const Address sector_address = sector_id * sector_size_bytes;
79 Address entry_address = sector_address;
80
81 for (int num_entries_in_sector = 0;; num_entries_in_sector++) {
82 DBG("Load entry: sector=%zu, entry#=%d, address=%zu",
83 sector_id,
84 num_entries_in_sector,
85 size_t(entry_address));
86
87 if (!AddressInSector(sector_map_[sector_id], entry_address)) {
88 DBG("Fell off end of sector; moving to the next sector");
89 break;
90 }
91
92 Address next_entry_address;
93 Status status = LoadEntry(entry_address, &next_entry_address);
94 if (status == Status::NOT_FOUND) {
95 DBG("Hit un-written data in sector; moving to the next sector");
96 break;
97 }
98 if (status == Status::DATA_LOSS) {
99 // It's not clear KVS can make a unilateral decision about what to do
100 // in corruption cases. It's an application decision, for which we
101 // should offer some configurability. For now, entirely bail out of
102 // loading and give up.
103 //
104 // Later, scan for remaining valid keys; since it's entirely possible
105 // that there is a duplicate of the key elsewhere and everything is
106 // fine. Later, we can wipe and maybe recover the sector.
107 //
108 // TODO: Implement rest-of-sector scanning for valid entries.
109 return Status::DATA_LOSS;
110 }
111 TRY(status);
112
113 // Entry loaded successfully; so get ready to load the next one.
114 entry_address = next_entry_address;
115
116 // Update of the number of writable bytes in this sector.
117 sector_map_[sector_id].tail_free_bytes =
118 sector_size_bytes - (entry_address - sector_address);
119 }
120 }
121
122 DBG("Second pass: Count valid bytes in each sector");
123 // Initialize the sector sizes.
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800124 for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800125 sector_map_[sector_id].valid_bytes = 0;
126 }
127 // For every valid key, increment the valid bytes for that sector.
128 for (size_t key_id = 0; key_id < key_descriptor_list_size_; ++key_id) {
129 uint32_t sector_id =
130 key_descriptor_list_[key_id].address / sector_size_bytes;
Keir Mierle8c352dc2020-02-02 13:58:19 -0800131 EntryHeader header;
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800132 TRY(ReadEntryHeader(key_descriptor_list_[key_id].address, &header));
Wyatt Hepler93b889d2020-02-05 09:01:18 -0800133 sector_map_[sector_id].valid_bytes += header.size();
Keir Mierle8c352dc2020-02-02 13:58:19 -0800134 }
Wyatt Hepler729f28c2020-02-05 09:46:00 -0800135 initialized_ = true;
Keir Mierle8c352dc2020-02-02 13:58:19 -0800136 return Status::OK;
137}
138
139Status KeyValueStore::LoadEntry(Address entry_address,
140 Address* next_entry_address) {
141 const size_t alignment_bytes = partition_.alignment_bytes();
142
Keir Mierle8c352dc2020-02-02 13:58:19 -0800143 EntryHeader header;
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800144 TRY(ReadEntryHeader(entry_address, &header));
Keir Mierle8c352dc2020-02-02 13:58:19 -0800145 // TODO: Should likely add a "LogHeader" method or similar.
146 DBG("Header: ");
147 DBG(" Address = 0x%zx", size_t(entry_address));
148 DBG(" Magic = 0x%zx", size_t(header.magic()));
Wyatt Hepler6e3a83b2020-02-04 07:36:45 -0800149 DBG(" Checksum = 0x%zx", size_t(header.checksum()));
Keir Mierle8c352dc2020-02-02 13:58:19 -0800150 DBG(" Key length = 0x%zx", size_t(header.key_length()));
151 DBG(" Value length = 0x%zx", size_t(header.value_length()));
Wyatt Hepler93b889d2020-02-05 09:01:18 -0800152 DBG(" Entry size = 0x%zx", size_t(header.size()));
Keir Mierle8c352dc2020-02-02 13:58:19 -0800153 DBG(" Padded size = 0x%zx",
Wyatt Hepler93b889d2020-02-05 09:01:18 -0800154 size_t(AlignUp(header.size(), alignment_bytes)));
Keir Mierle8c352dc2020-02-02 13:58:19 -0800155
156 if (HeaderLooksLikeUnwrittenData(header)) {
157 return Status::NOT_FOUND;
158 }
Keir Mierle8c352dc2020-02-02 13:58:19 -0800159
160 // TODO: Handle multiple magics for formats that have changed.
161 if (header.magic() != entry_header_format_.magic) {
162 // TODO: It may be cleaner to have some logging helpers for these cases.
163 CRT("Found corrupt magic: %zx; expecting %zx; at address %zx",
164 size_t(header.magic()),
165 size_t(entry_header_format_.magic),
166 size_t(entry_address));
167 return Status::DATA_LOSS;
168 }
169
170 // Read the key from flash & validate the entry (which reads the value).
171 KeyBuffer key_buffer;
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800172 TRY(ReadEntryKey(entry_address, header.key_length(), key_buffer.data()));
Wyatt Heplerbab0e202020-02-04 07:40:08 -0800173 const string_view key(key_buffer.data(), header.key_length());
174
Wyatt Hepler6e3a83b2020-02-04 07:36:45 -0800175 TRY(header.VerifyChecksumInFlash(
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800176 &partition_, entry_address, entry_header_format_.checksum));
177
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800178 KeyDescriptor key_descriptor(
179 key,
180 header.key_version(),
181 entry_address,
182 header.deleted() ? KeyDescriptor::kDeleted : KeyDescriptor::kValid);
Keir Mierle8c352dc2020-02-02 13:58:19 -0800183
184 DBG("Key hash: %zx (%zu)",
185 size_t(key_descriptor.key_hash),
186 size_t(key_descriptor.key_hash));
187
188 TRY(AppendNewOrOverwriteStaleExistingDescriptor(key_descriptor));
189
190 // TODO: Extract this to something like "NextValidEntryAddress".
191 *next_entry_address =
Wyatt Hepler93b889d2020-02-05 09:01:18 -0800192 AlignUp(key_descriptor.address + header.size(), alignment_bytes);
Keir Mierle8c352dc2020-02-02 13:58:19 -0800193
194 return Status::OK;
195}
196
197// TODO: This method is the trigger of the O(valid_entries * all_entries) time
198// complexity for reading. At some cost to memory, this could be optimized by
199// using a hash table instead of scanning, but in practice this should be fine
200// for a small number of keys
201Status KeyValueStore::AppendNewOrOverwriteStaleExistingDescriptor(
202 const KeyDescriptor& key_descriptor) {
203 // With the new key descriptor, either add it to the descriptor table or
204 // overwrite an existing entry with an older version of the key.
205 KeyDescriptor* existing_descriptor = FindDescriptor(key_descriptor.key_hash);
206 if (existing_descriptor) {
207 if (existing_descriptor->key_version < key_descriptor.key_version) {
208 // Existing entry is old; replace the existing entry with the new one.
209 *existing_descriptor = key_descriptor;
210 } else {
211 // Otherwise, check for data integrity and leave the existing entry.
212 if (existing_descriptor->key_version == key_descriptor.key_version) {
213 ERR("Data loss: Duplicated old(=%zu) and new(=%zu) version",
214 size_t(existing_descriptor->key_version),
215 size_t(key_descriptor.key_version));
216 return Status::DATA_LOSS;
217 }
218 DBG("Found stale entry when appending; ignoring");
219 }
220 return Status::OK;
221 }
222 // Write new entry.
223 KeyDescriptor* newly_allocated_key_descriptor;
224 TRY(AppendEmptyDescriptor(&newly_allocated_key_descriptor));
225 *newly_allocated_key_descriptor = key_descriptor;
226 return Status::OK;
227}
228
229// TODO: Need a better name.
230Status KeyValueStore::AppendEmptyDescriptor(KeyDescriptor** new_descriptor) {
231 if (KeyListFull()) {
232 // TODO: Is this the right return code?
233 return Status::RESOURCE_EXHAUSTED;
234 }
235 *new_descriptor = &key_descriptor_list_[key_descriptor_list_size_++];
236 return Status::OK;
237}
238
239// TODO: Finish.
240bool KeyValueStore::HeaderLooksLikeUnwrittenData(
241 const EntryHeader& header) const {
242 // TODO: This is not correct; it should call through to flash memory.
243 return header.magic() == 0xffffffff;
244}
245
246KeyValueStore::KeyDescriptor* KeyValueStore::FindDescriptor(uint32_t hash) {
247 for (size_t key_id = 0; key_id < key_descriptor_list_size_; key_id++) {
248 if (key_descriptor_list_[key_id].key_hash == hash) {
249 return &(key_descriptor_list_[key_id]);
250 }
251 }
252 return nullptr;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800253}
254
255StatusWithSize KeyValueStore::Get(string_view key,
256 span<byte> value_buffer) const {
Wyatt Hepler729f28c2020-02-05 09:46:00 -0800257 TRY(CheckOperation(key));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800258
David Rogers2761aeb2020-01-31 17:09:00 -0800259 const KeyDescriptor* key_descriptor;
260 TRY(FindKeyDescriptor(key, &key_descriptor));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800261
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800262 if (key_descriptor->deleted()) {
263 return Status::NOT_FOUND;
264 }
265
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800266 EntryHeader header;
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800267 TRY(ReadEntryHeader(key_descriptor->address, &header));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800268
Keir Mierle8c352dc2020-02-02 13:58:19 -0800269 StatusWithSize result = ReadEntryValue(*key_descriptor, header, value_buffer);
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800270 if (result.ok() && options_.verify_on_read) {
Wyatt Hepler6e3a83b2020-02-04 07:36:45 -0800271 return header.VerifyChecksum(entry_header_format_.checksum,
272 key,
273 value_buffer.subspan(0, result.size()));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800274 }
275 return result;
276}
277
278Status KeyValueStore::Put(string_view key, span<const byte> value) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800279 DBG("Writing key/value; key length=%zu, value length=%zu",
280 key.size(),
281 value.size());
Wyatt Hepler729f28c2020-02-05 09:46:00 -0800282
283 TRY(CheckOperation(key));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800284
285 if (value.size() > (1 << 24)) {
286 // TODO: Reject sizes that are larger than the maximum?
287 }
288
David Rogers2761aeb2020-01-31 17:09:00 -0800289 KeyDescriptor* key_descriptor;
290 if (FindKeyDescriptor(key, &key_descriptor).ok()) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800291 DBG("Writing over existing entry");
David Rogers2761aeb2020-01-31 17:09:00 -0800292 return WriteEntryForExistingKey(key_descriptor, key, value);
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800293 }
David Rogers2761aeb2020-01-31 17:09:00 -0800294
Keir Mierle8c352dc2020-02-02 13:58:19 -0800295 DBG("Writing new entry");
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800296 return WriteEntryForNewKey(key, value);
297}
298
299Status KeyValueStore::Delete(string_view key) {
Wyatt Hepler729f28c2020-02-05 09:46:00 -0800300 TRY(CheckOperation(key));
301
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800302 KeyDescriptor* key_descriptor;
303 TRY(FindKeyDescriptor(key, &key_descriptor));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800304
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800305 if (key_descriptor->deleted()) {
306 return Status::NOT_FOUND;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800307 }
308
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800309 key_descriptor->state = KeyDescriptor::kDeleted;
310
311 SectorDescriptor* sector;
312 TRY(FindOrRecoverSectorWithSpace(&sector, EntryHeader::size(key, {})));
313
314 DBG("Writing tombstone; found sector: %zu", SectorIndex(sector));
315 return AppendEntry(sector, key_descriptor, key, {});
316}
317
318KeyValueStore::iterator& KeyValueStore::iterator::operator++() {
319 // Skip to the next entry that is valid (not deleted).
320 while (++index_ < item_.kvs_.key_descriptor_list_size_ &&
321 descriptor().deleted()) {
322 }
323 return *this;
324}
325
326const KeyValueStore::Item& KeyValueStore::iterator::operator*() {
327 std::memset(item_.key_buffer_.data(), 0, item_.key_buffer_.size());
328
329 EntryHeader header;
330 if (item_.kvs_.ReadEntryHeader(descriptor().address, &header).ok()) {
331 item_.kvs_.ReadEntryKey(
332 descriptor().address, header.key_length(), item_.key_buffer_.data());
333 }
334
335 return item_;
336}
337
338KeyValueStore::iterator KeyValueStore::begin() const {
339 size_t i = 0;
340 // Skip over any deleted entries at the start of the descriptor list.
341 while (i < key_descriptor_list_size_ && key_descriptor_list_[i].deleted()) {
342 i += 1;
343 }
344 return iterator(*this, i);
345}
346
347// TODO(hepler): The valid entry count could be tracked in the KVS to avoid the
348// need for this for-loop.
349size_t KeyValueStore::size() const {
350 size_t valid_entries = 0;
351
352 for (size_t i = 0; i < key_descriptor_list_size_; ++i) {
353 if (!key_descriptor_list_[i].deleted()) {
354 valid_entries += 1;
355 }
356 }
357
358 return valid_entries;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800359}
360
Wyatt Heplered163b02020-02-03 17:49:32 -0800361StatusWithSize KeyValueStore::ValueSize(std::string_view key) const {
Wyatt Hepler729f28c2020-02-05 09:46:00 -0800362 TRY(CheckOperation(key));
Wyatt Heplered163b02020-02-03 17:49:32 -0800363
364 const KeyDescriptor* key_descriptor;
365 TRY(FindKeyDescriptor(key, &key_descriptor));
366
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800367 if (key_descriptor->deleted()) {
368 return Status::NOT_FOUND;
369 }
370
Wyatt Heplered163b02020-02-03 17:49:32 -0800371 EntryHeader header;
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800372 TRY(ReadEntryHeader(key_descriptor->address, &header));
Wyatt Heplered163b02020-02-03 17:49:32 -0800373
374 return StatusWithSize(header.value_length());
375}
376
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800377uint32_t KeyValueStore::HashKey(string_view string) {
378 uint32_t hash = 0;
379 uint32_t coefficient = 65599u;
380
381 for (char ch : string) {
382 hash += coefficient * unsigned(ch);
383 coefficient *= 65599u;
384 }
385
386 return hash;
387}
388
Wyatt Hepler6e3a83b2020-02-04 07:36:45 -0800389Status KeyValueStore::FixedSizeGet(std::string_view key,
390 byte* value,
391 size_t size_bytes) const {
392 // Ensure that the size of the stored value matches the size of the type.
393 // Otherwise, report error. This check avoids potential memory corruption.
394 StatusWithSize result = ValueSize(key);
395 if (!result.ok()) {
396 return result.status();
Keir Mierle8c352dc2020-02-02 13:58:19 -0800397 }
Wyatt Hepler6e3a83b2020-02-04 07:36:45 -0800398 if (result.size() != size_bytes) {
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800399 DBG("Requested %zu B read, but value is %zu B", size_bytes, result.size());
Wyatt Hepler6e3a83b2020-02-04 07:36:45 -0800400 return Status::INVALID_ARGUMENT;
Wyatt Heplerbab0e202020-02-04 07:40:08 -0800401 }
Wyatt Hepler6e3a83b2020-02-04 07:36:45 -0800402 return Get(key, span(value, size_bytes)).status();
Keir Mierle8c352dc2020-02-02 13:58:19 -0800403}
404
Wyatt Hepler729f28c2020-02-05 09:46:00 -0800405Status KeyValueStore::CheckOperation(string_view key) const {
Wyatt Hepleracaacf92020-01-24 10:58:30 -0800406 if (InvalidKey(key)) {
Wyatt Heplerb7609542020-01-24 10:29:54 -0800407 return Status::INVALID_ARGUMENT;
408 }
Wyatt Hepler729f28c2020-02-05 09:46:00 -0800409 if (!initialized_) {
Wyatt Heplerb7609542020-01-24 10:29:54 -0800410 return Status::FAILED_PRECONDITION;
411 }
Wyatt Heplerb7609542020-01-24 10:29:54 -0800412 return Status::OK;
413}
414
David Rogers2761aeb2020-01-31 17:09:00 -0800415Status KeyValueStore::FindKeyDescriptor(string_view key,
416 const KeyDescriptor** result) const {
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800417 char key_buffer[kMaxKeyLength];
418 const uint32_t hash = HashKey(key);
Wyatt Heplerb7609542020-01-24 10:29:54 -0800419
David Rogers2761aeb2020-01-31 17:09:00 -0800420 for (auto& descriptor : key_descriptors()) {
421 if (descriptor.key_hash == hash) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800422 DBG("Found match! For hash: %zx", size_t(hash));
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800423 TRY(ReadEntryKey(descriptor.address, key.size(), key_buffer));
Wyatt Heplerb7609542020-01-24 10:29:54 -0800424
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800425 if (key == string_view(key_buffer, key.size())) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800426 DBG("Keys matched too");
David Rogers2761aeb2020-01-31 17:09:00 -0800427 *result = &descriptor;
Wyatt Heplerb7609542020-01-24 10:29:54 -0800428 return Status::OK;
429 }
Wyatt Heplerb7609542020-01-24 10:29:54 -0800430 }
431 }
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800432 return Status::NOT_FOUND;
433}
434
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800435Status KeyValueStore::ReadEntryHeader(Address address,
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800436 EntryHeader* header) const {
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800437 return partition_.Read(address, sizeof(*header), header).status();
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800438}
439
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800440Status KeyValueStore::ReadEntryKey(Address address,
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800441 size_t key_length,
442 char* key) const {
443 // TODO: This check probably shouldn't be here; this is like
444 // checking that the Cortex M's RAM isn't corrupt. This should be
445 // done at boot time.
446 // ^^ This argument sometimes comes from EntryHeader::key_value_len,
447 // which is read directly from flash. If it's corrupted, we shouldn't try
448 // to read a bunch of extra data.
449 if (key_length == 0u || key_length > kMaxKeyLength) {
450 return Status::DATA_LOSS;
451 }
452 // The key is immediately after the entry header.
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800453 return partition_.Read(address + sizeof(EntryHeader), key_length, key)
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800454 .status();
455}
456
David Rogers2761aeb2020-01-31 17:09:00 -0800457StatusWithSize KeyValueStore::ReadEntryValue(
458 const KeyDescriptor& key_descriptor,
459 const EntryHeader& header,
460 span<byte> value) const {
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800461 const size_t read_size = std::min(header.value_length(), value.size());
David Rogers2761aeb2020-01-31 17:09:00 -0800462 StatusWithSize result = partition_.Read(
463 key_descriptor.address + sizeof(header) + header.key_length(),
Keir Mierle8c352dc2020-02-02 13:58:19 -0800464 value.subspan(0, read_size));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800465 TRY(result);
466 if (read_size != header.value_length()) {
467 return StatusWithSize(Status::RESOURCE_EXHAUSTED, read_size);
468 }
469 return StatusWithSize(read_size);
470}
471
David Rogers2761aeb2020-01-31 17:09:00 -0800472Status KeyValueStore::WriteEntryForExistingKey(KeyDescriptor* key_descriptor,
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800473 string_view key,
474 span<const byte> value) {
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800475 key_descriptor->state = KeyDescriptor::kValid;
476
David Rogers2761aeb2020-01-31 17:09:00 -0800477 SectorDescriptor* sector;
Wyatt Hepler93b889d2020-02-05 09:01:18 -0800478 TRY(FindOrRecoverSectorWithSpace(&sector, EntryHeader::size(key, value)));
David Rogers8ce55cd2020-02-04 19:41:48 -0800479 DBG("Writing existing entry; found sector: %zu", SectorIndex(sector));
David Rogers2761aeb2020-01-31 17:09:00 -0800480 return AppendEntry(sector, key_descriptor, key, value);
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800481}
482
483Status KeyValueStore::WriteEntryForNewKey(string_view key,
484 span<const byte> value) {
David Rogers2761aeb2020-01-31 17:09:00 -0800485 if (KeyListFull()) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800486 WRN("KVS full: trying to store a new entry, but can't. Have %zu entries",
487 key_descriptor_list_size_);
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800488 return Status::RESOURCE_EXHAUSTED;
489 }
490
David Rogers2761aeb2020-01-31 17:09:00 -0800491 // Modify the key descriptor at the end of the array, without bumping the map
492 // size so the key descriptor is prepared and written without committing
493 // first.
494 KeyDescriptor& key_descriptor =
495 key_descriptor_list_[key_descriptor_list_size_];
496 key_descriptor.key_hash = HashKey(key);
497 key_descriptor.key_version = 0; // will be incremented by AppendEntry()
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800498 key_descriptor.state = KeyDescriptor::kValid;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800499
David Rogers2761aeb2020-01-31 17:09:00 -0800500 SectorDescriptor* sector;
Wyatt Hepler93b889d2020-02-05 09:01:18 -0800501 TRY(FindOrRecoverSectorWithSpace(&sector, EntryHeader::size(key, value)));
David Rogers8ce55cd2020-02-04 19:41:48 -0800502 DBG("Writing new entry; found sector: %zu", SectorIndex(sector));
David Rogers2761aeb2020-01-31 17:09:00 -0800503 TRY(AppendEntry(sector, &key_descriptor, key, value));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800504
Keir Mierle8c352dc2020-02-02 13:58:19 -0800505 // Only increment bump our size when we are certain the write succeeded.
David Rogers2761aeb2020-01-31 17:09:00 -0800506 key_descriptor_list_size_ += 1;
Wyatt Heplerb7609542020-01-24 10:29:54 -0800507 return Status::OK;
508}
509
David Rogers2761aeb2020-01-31 17:09:00 -0800510Status KeyValueStore::RelocateEntry(KeyDescriptor& key_descriptor) {
David Rogersf0a35442020-02-04 12:16:38 -0800511 struct TempEntry {
512 std::array<char, kMaxKeyLength + 1> key;
513 std::array<char, sizeof(working_buffer_) - sizeof(key)> value;
514 };
515 TempEntry* entry = reinterpret_cast<TempEntry*>(working_buffer_.data());
516
517 // Read the entry to be relocated. Store the header in a local variable and
518 // store the key and value in the TempEntry stored in the static allocated
519 // working_buffer_.
520 EntryHeader header;
Wyatt Hepler4d78cd62020-02-05 13:05:58 -0800521 TRY(ReadEntryHeader(key_descriptor.address, &header));
522 TRY(ReadEntryKey(
523 key_descriptor.address, header.key_length(), entry->key.data()));
David Rogersf0a35442020-02-04 12:16:38 -0800524 string_view key = string_view(entry->key.data(), header.key_length());
525 StatusWithSize result = ReadEntryValue(
526 key_descriptor, header, as_writable_bytes(span(entry->value)));
527 if (!result.status().ok()) {
528 return Status::INTERNAL;
529 }
530
531 auto value = span(entry->value.data(), result.size());
532
533 TRY(header.VerifyChecksum(
534 entry_header_format_.checksum, key, as_bytes(value)));
535
536 SectorDescriptor* old_sector = SectorFromAddress(key_descriptor.address);
537 if (old_sector == nullptr) {
538 return Status::INTERNAL;
539 }
540
541 // Find a new sector for the entry and write it to the new location.
David Rogers8ce55cd2020-02-04 19:41:48 -0800542 SectorDescriptor* new_sector;
543 TRY(FindSectorWithSpace(&new_sector, header.size(), old_sector, true));
David Rogersf0a35442020-02-04 12:16:38 -0800544 return AppendEntry(new_sector, &key_descriptor, key, as_bytes(value));
David Rogersa12786b2020-01-31 16:02:33 -0800545}
546
David Rogers8db5a722020-02-03 18:28:34 -0800547// Find either an existing sector with enough space that is not the sector to
548// skip, or an empty sector. Maintains the invariant that there is always at
549// least 1 empty sector unless set to bypass the rule.
David Rogers8ce55cd2020-02-04 19:41:48 -0800550Status KeyValueStore::FindSectorWithSpace(SectorDescriptor** found_sector,
551 size_t size,
552 SectorDescriptor* sector_to_skip,
553 bool bypass_empty_sector_rule) {
David Rogers8ce55cd2020-02-04 19:41:48 -0800554 // The last_new_sector_ is the sector that was last selected as the "new empty
555 // sector" to write to. This last new sector is used as the starting point for
556 // the next "find a new empty sector to write to" operation. By using the last
557 // new sector as the start point we will cycle which empty sector is selected
558 // next, spreading the wear across all the empty sectors and get a wear
559 // leveling benefit, rather than putting more wear on the lower number
560 // sectors.
561 //
562 // Locally use the sector index for ease of iterating through the sectors. For
563 // the persistent storage use SectorDescriptor* rather than sector index
564 // because SectorDescriptor* is the standard way to identify a sector.
565 size_t last_new_sector_index_ = SectorIndex(last_new_sector_);
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800566 size_t start = (last_new_sector_index_ + 1) % sector_map_size_;
David Rogers2761aeb2020-01-31 17:09:00 -0800567 SectorDescriptor* first_empty_sector = nullptr;
David Rogers8db5a722020-02-03 18:28:34 -0800568 bool at_least_two_empty_sectors = bypass_empty_sector_rule;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800569
David Rogers8ce55cd2020-02-04 19:41:48 -0800570 // Look for a partial sector to use with enough space. Immediately use the
571 // first one of those that is found. While scanning for a partial sector, keep
572 // track of the first empty sector and if a second sector was seen.
573 for (size_t i = start; i != last_new_sector_index_;
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800574 i = (i + 1) % sector_map_size_) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800575 DBG("Examining sector %zu", i);
David Rogers2761aeb2020-01-31 17:09:00 -0800576 SectorDescriptor& sector = sector_map_[i];
Keir Mierle8c352dc2020-02-02 13:58:19 -0800577
David Rogers8db5a722020-02-03 18:28:34 -0800578 if (sector_to_skip == &sector) {
579 DBG("Skipping the skip sector");
580 continue;
581 }
582
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800583 if (!SectorEmpty(sector) && sector.HasSpace(size)) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800584 DBG("Partially occupied sector with enough space; done!");
David Rogers8ce55cd2020-02-04 19:41:48 -0800585 *found_sector = &sector;
586 return Status::OK;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800587 }
588
589 if (SectorEmpty(sector)) {
590 if (first_empty_sector == nullptr) {
591 first_empty_sector = &sector;
592 } else {
593 at_least_two_empty_sectors = true;
Wyatt Hepler2ad60672020-01-21 08:00:16 -0800594 }
Wyatt Heplerb7609542020-01-24 10:29:54 -0800595 }
596 }
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800597
David Rogers8ce55cd2020-02-04 19:41:48 -0800598 // If the scan for a partial sector does not find a suitable sector, use the
599 // first empty sector that was found. Normally it is required to keep 1 empty
600 // sector after the sector found here, but that rule can be bypassed in
601 // special circumstances (such as during garbage collection).
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800602 if (at_least_two_empty_sectors) {
David Rogers8ce55cd2020-02-04 19:41:48 -0800603 DBG("Found a usable empty sector; returning the first found (%zu)",
604 SectorIndex(first_empty_sector));
605 last_new_sector_ = first_empty_sector;
606 *found_sector = first_empty_sector;
607 return Status::OK;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800608 }
David Rogers8ce55cd2020-02-04 19:41:48 -0800609
610 // No sector was found.
611 *found_sector = nullptr;
612 return Status::RESOURCE_EXHAUSTED;
Wyatt Heplerb7609542020-01-24 10:29:54 -0800613}
614
David Rogers2761aeb2020-01-31 17:09:00 -0800615Status KeyValueStore::FindOrRecoverSectorWithSpace(SectorDescriptor** sector,
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800616 size_t size) {
David Rogers8ce55cd2020-02-04 19:41:48 -0800617 Status result = FindSectorWithSpace(sector, size);
618 if (result.ok()) {
619 return result;
Wyatt Heplerb7609542020-01-24 10:29:54 -0800620 }
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800621 if (options_.partial_gc_on_write) {
622 return GarbageCollectOneSector(sector);
623 }
David Rogers8ce55cd2020-02-04 19:41:48 -0800624 return result;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800625}
626
David Rogers2761aeb2020-01-31 17:09:00 -0800627KeyValueStore::SectorDescriptor* KeyValueStore::FindSectorToGarbageCollect() {
628 SectorDescriptor* sector_candidate = nullptr;
David Rogersa12786b2020-01-31 16:02:33 -0800629 size_t candidate_bytes = 0;
630
631 // Step 1: Try to find a sectors with stale keys and no valid keys (no
632 // relocation needed). If any such sectors are found, use the sector with the
633 // most reclaimable bytes.
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800634 for (auto& sector : sectors()) {
David Rogersa12786b2020-01-31 16:02:33 -0800635 if ((sector.valid_bytes == 0) &&
636 (RecoverableBytes(sector) > candidate_bytes)) {
637 sector_candidate = &sector;
638 candidate_bytes = RecoverableBytes(sector);
639 }
640 }
641
642 // Step 2: If step 1 yields no sectors, just find the sector with the most
643 // reclaimable bytes.
644 if (sector_candidate == nullptr) {
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800645 for (auto& sector : sectors()) {
David Rogersa12786b2020-01-31 16:02:33 -0800646 if (RecoverableBytes(sector) > candidate_bytes) {
647 sector_candidate = &sector;
648 candidate_bytes = RecoverableBytes(sector);
649 }
650 }
651 }
652
653 return sector_candidate;
654}
655
David Rogers2761aeb2020-01-31 17:09:00 -0800656Status KeyValueStore::GarbageCollectOneSector(SectorDescriptor** sector) {
David Rogersa12786b2020-01-31 16:02:33 -0800657 // Step 1: Find the sector to garbage collect
David Rogers2761aeb2020-01-31 17:09:00 -0800658 SectorDescriptor* sector_to_gc = FindSectorToGarbageCollect();
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800659
David Rogersa12786b2020-01-31 16:02:33 -0800660 if (sector_to_gc == nullptr) {
661 return Status::RESOURCE_EXHAUSTED;
662 }
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800663
David Rogersa12786b2020-01-31 16:02:33 -0800664 // Step 2: Move any valid entries in the GC sector to other sectors
665 if (sector_to_gc->valid_bytes != 0) {
David Rogers2761aeb2020-01-31 17:09:00 -0800666 for (auto& descriptor : key_descriptors()) {
667 if (AddressInSector(*sector_to_gc, descriptor.address)) {
668 TRY(RelocateEntry(descriptor));
David Rogersa12786b2020-01-31 16:02:33 -0800669 }
Wyatt Heplerb7609542020-01-24 10:29:54 -0800670 }
671 }
Wyatt Heplerb7609542020-01-24 10:29:54 -0800672
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800673 if (sector_to_gc->valid_bytes != 0) {
Wyatt Heplerb7609542020-01-24 10:29:54 -0800674 return Status::INTERNAL;
675 }
676
David Rogersa12786b2020-01-31 16:02:33 -0800677 // Step 3: Reinitialize the sector
678 sector_to_gc->tail_free_bytes = 0;
679 TRY(partition_.Erase(SectorBaseAddress(sector_to_gc), 1));
680 sector_to_gc->tail_free_bytes = partition_.sector_size_bytes();
Wyatt Heplerb7609542020-01-24 10:29:54 -0800681
David Rogersa12786b2020-01-31 16:02:33 -0800682 *sector = sector_to_gc;
683 return Status::OK;
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800684}
685
David Rogers2761aeb2020-01-31 17:09:00 -0800686Status KeyValueStore::AppendEntry(SectorDescriptor* sector,
687 KeyDescriptor* key_descriptor,
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800688 const string_view key,
689 span<const byte> value) {
690 // write header, key, and value
Wyatt Hepler6c24c062020-02-05 15:30:49 -0800691 EntryHeader header;
692
693 if (key_descriptor->deleted()) {
694 header = EntryHeader::Tombstone(entry_header_format_.magic,
695 entry_header_format_.checksum,
696 key,
697 key_descriptor->key_version + 1);
698 } else {
699 header = EntryHeader::Valid(entry_header_format_.magic,
700 entry_header_format_.checksum,
701 key,
702 value,
703 key_descriptor->key_version + 1);
704 }
705
Keir Mierle8c352dc2020-02-02 13:58:19 -0800706 DBG("Appending entry with key version: %zx", size_t(header.key_version()));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800707
708 // Handles writing multiple concatenated buffers, while breaking up the writes
709 // into alignment-sized blocks.
710 Address address = NextWritableAddress(sector);
Keir Mierle8c352dc2020-02-02 13:58:19 -0800711 DBG("Appending to address: %zx", size_t(address));
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800712 TRY_ASSIGN(
713 size_t written,
714 partition_.Write(
715 address, {as_bytes(span(&header, 1)), as_bytes(span(key)), value}));
716
717 if (options_.verify_on_write) {
Wyatt Hepler0a223582020-02-04 17:47:40 -0800718 TRY(header.VerifyChecksumInFlash(
719 &partition_, address, entry_header_format_.checksum));
Wyatt Heplerb7609542020-01-24 10:29:54 -0800720 }
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800721
David Rogers2761aeb2020-01-31 17:09:00 -0800722 key_descriptor->address = address;
723 key_descriptor->key_version = header.key_version();
Wyatt Hepler4da1fcb2020-01-30 17:32:18 -0800724 sector->valid_bytes += written;
725 sector->tail_free_bytes -= written;
726 return Status::OK;
Wyatt Heplerb7609542020-01-24 10:29:54 -0800727}
728
Keir Mierle8c352dc2020-02-02 13:58:19 -0800729void KeyValueStore::LogDebugInfo() {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800730 const size_t sector_size_bytes = partition_.sector_size_bytes();
731 DBG("====================== KEY VALUE STORE DUMP =========================");
732 DBG(" ");
733 DBG("Flash partition:");
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800734 DBG(" Sector count = %zu", partition_.sector_count());
735 DBG(" Sector max count = %zu", kMaxUsableSectors);
736 DBG(" Sectors in use = %zu", sector_map_size_);
Keir Mierle8c352dc2020-02-02 13:58:19 -0800737 DBG(" Sector size = %zu", sector_size_bytes);
738 DBG(" Total size = %zu", partition_.size_bytes());
739 DBG(" Alignment = %zu", partition_.alignment_bytes());
740 DBG(" ");
741 DBG("Key descriptors:");
742 DBG(" Entry count = %zu", key_descriptor_list_size_);
743 DBG(" Max entry count = %zu", kMaxEntries);
744 DBG(" ");
745 DBG(" # hash version address address (hex)");
746 for (size_t i = 0; i < key_descriptor_list_size_; ++i) {
747 const KeyDescriptor& kd = key_descriptor_list_[i];
748 DBG(" |%3zu: | %8zx |%8zu | %8zu | %8zx",
749 i,
750 size_t(kd.key_hash),
751 size_t(kd.key_version),
752 size_t(kd.address),
753 size_t(kd.address));
754 }
755 DBG(" ");
756
757 DBG("Sector descriptors:");
758 DBG(" # tail free valid has_space");
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800759 for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800760 const SectorDescriptor& sd = sector_map_[sector_id];
761 DBG(" |%3zu: | %8zu |%8zu | %s",
762 sector_id,
763 size_t(sd.tail_free_bytes),
764 size_t(sd.valid_bytes),
765 sd.tail_free_bytes ? "YES" : "");
766 }
767 DBG(" ");
768
769 // TODO: This should stop logging after some threshold.
770 // size_t dumped_bytes = 0;
771 DBG("Sector raw data:");
Wyatt Heplerad0a7932020-02-06 08:20:38 -0800772 for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
Keir Mierle8c352dc2020-02-02 13:58:19 -0800773 // Read sector data. Yes, this will blow the stack on embedded.
774 std::array<byte, 500> raw_sector_data; // TODO
775 StatusWithSize sws =
776 partition_.Read(sector_id * sector_size_bytes, raw_sector_data);
777 DBG("Read: %zu bytes", sws.size());
778
779 DBG(" base addr offs 0 1 2 3 4 5 6 7");
780 for (size_t i = 0; i < sector_size_bytes; i += 8) {
781 DBG(" %3zu %8zx %5zu | %02x %02x %02x %02x %02x %02x %02x %02x",
782 sector_id,
783 (sector_id * sector_size_bytes) + i,
784 i,
785 static_cast<unsigned int>(raw_sector_data[i + 0]),
786 static_cast<unsigned int>(raw_sector_data[i + 1]),
787 static_cast<unsigned int>(raw_sector_data[i + 2]),
788 static_cast<unsigned int>(raw_sector_data[i + 3]),
789 static_cast<unsigned int>(raw_sector_data[i + 4]),
790 static_cast<unsigned int>(raw_sector_data[i + 5]),
791 static_cast<unsigned int>(raw_sector_data[i + 6]),
792 static_cast<unsigned int>(raw_sector_data[i + 7]));
793
794 // TODO: Fix exit condition.
795 if (i > 128) {
796 break;
797 }
798 }
799 DBG(" ");
800 }
801
802 DBG("////////////////////// KEY VALUE STORE DUMP END /////////////////////");
803}
804
Wyatt Hepler2ad60672020-01-21 08:00:16 -0800805} // namespace pw::kvs