blob: a37245573aa2d35ec8ab113ef7696b82bf7a5ebc [file] [log] [blame]
Keir Mierle866cff42020-04-28 22:24:44 -07001// Copyright 2020 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14
15#include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
16
17#include <cstddef>
18#include <cstdint>
19
Wyatt Heplerf298de42021-03-19 15:06:36 -070020#include "pw_assert/check.h"
Keir Mierle866cff42020-04-28 22:24:44 -070021#include "pw_containers/vector.h"
22#include "pw_unit_test/framework.h"
23
24using std::byte;
25
26namespace pw {
27namespace ring_buffer {
28namespace {
Prashanth Swaminathan16541e72021-06-21 09:22:25 -070029using Entry = PrefixedEntryRingBufferMulti::Entry;
30using iterator = PrefixedEntryRingBufferMulti::iterator;
Keir Mierle866cff42020-04-28 22:24:44 -070031
32TEST(PrefixedEntryRingBuffer, NoBuffer) {
33 PrefixedEntryRingBuffer ring(false);
34
35 byte buf[32];
36 size_t count;
37
38 EXPECT_EQ(ring.EntryCount(), 0u);
Wyatt Heplere2cbadf2020-06-22 11:21:45 -070039 EXPECT_EQ(ring.SetBuffer(std::span<byte>(nullptr, 10u)),
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070040 Status::InvalidArgument());
41 EXPECT_EQ(ring.SetBuffer(std::span(buf, 0u)), Status::InvalidArgument());
Keir Mierle866cff42020-04-28 22:24:44 -070042 EXPECT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
43
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070044 EXPECT_EQ(ring.PushBack(buf), Status::FailedPrecondition());
Keir Mierle866cff42020-04-28 22:24:44 -070045 EXPECT_EQ(ring.EntryCount(), 0u);
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070046 EXPECT_EQ(ring.PeekFront(buf, &count), Status::FailedPrecondition());
Keir Mierle866cff42020-04-28 22:24:44 -070047 EXPECT_EQ(count, 0u);
48 EXPECT_EQ(ring.EntryCount(), 0u);
49 EXPECT_EQ(ring.PeekFrontWithPreamble(buf, &count),
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070050 Status::FailedPrecondition());
Keir Mierle866cff42020-04-28 22:24:44 -070051 EXPECT_EQ(count, 0u);
52 EXPECT_EQ(ring.EntryCount(), 0u);
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070053 EXPECT_EQ(ring.PopFront(), Status::FailedPrecondition());
Keir Mierle866cff42020-04-28 22:24:44 -070054 EXPECT_EQ(ring.EntryCount(), 0u);
55}
56
57// Single entry to write/read/pop over and over again.
58constexpr byte single_entry_data[] = {byte(1),
59 byte(2),
60 byte(3),
61 byte(4),
62 byte(5),
63 byte(6),
64 byte(7),
65 byte(8),
66 byte(9)};
67constexpr size_t single_entry_total_size = sizeof(single_entry_data) + 1;
68constexpr size_t single_entry_test_buffer_size =
69 (single_entry_total_size * 7) / 2;
70
71// Make sure the single_entry_size is even so single_entry_buffer_Size gets the
72// proper wrap/even behavior when getting to the end of the buffer.
73static_assert((single_entry_total_size % 2) == 0u);
74constexpr size_t kSingleEntryCycles = 300u;
75
76// Repeatedly write the same data, read it, and pop it, done over and over
77// again.
78void SingleEntryWriteReadTest(bool user_data) {
79 PrefixedEntryRingBuffer ring(user_data);
80 byte test_buffer[single_entry_test_buffer_size];
81
82 byte read_buffer[single_entry_total_size];
83
84 // Set read_size to an unexpected value to make sure result checks don't luck
85 // out and happen to see a previous value.
86 size_t read_size = 500U;
Prashanth Swaminathand9be1132021-03-10 14:55:23 -080087 uint32_t user_preamble = 0U;
Keir Mierle866cff42020-04-28 22:24:44 -070088
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -080089 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -070090
91 EXPECT_EQ(ring.EntryCount(), 0u);
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070092 EXPECT_EQ(ring.PopFront(), Status::OutOfRange());
Keir Mierle866cff42020-04-28 22:24:44 -070093 EXPECT_EQ(ring.EntryCount(), 0u);
Wyatt Heplere2cbadf2020-06-22 11:21:45 -070094 EXPECT_EQ(
95 ring.PushBack(std::span(single_entry_data, sizeof(test_buffer) + 5)),
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070096 Status::OutOfRange());
Keir Mierle866cff42020-04-28 22:24:44 -070097 EXPECT_EQ(ring.EntryCount(), 0u);
Wyatt Heplerd78f7c62020-09-28 14:27:32 -070098 EXPECT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OutOfRange());
Keir Mierle866cff42020-04-28 22:24:44 -070099 EXPECT_EQ(read_size, 0u);
100 read_size = 500U;
101 EXPECT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size),
Wyatt Heplerd78f7c62020-09-28 14:27:32 -0700102 Status::OutOfRange());
Keir Mierle866cff42020-04-28 22:24:44 -0700103 EXPECT_EQ(read_size, 0u);
104
105 size_t user_preamble_bytes = (user_data ? 1 : 0);
106 size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
107 size_t data_offset = single_entry_total_size - data_size;
108
109 byte expect_buffer[single_entry_total_size] = {};
110 expect_buffer[user_preamble_bytes] = byte(data_size);
111 memcpy(expect_buffer + data_offset, single_entry_data, data_size);
112
113 for (size_t i = 0; i < kSingleEntryCycles; i++) {
114 ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
115 ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
116
Prashanth Swaminathan44202c92021-03-03 13:23:54 -0800117 // Limit the value of the preamble to a single byte, to ensure that we
118 // retain a static `single_entry_buffer_size` during the test. Single
119 // bytes are varint-encoded to the same value.
120 uint32_t preamble_byte = i % 128;
121 ASSERT_EQ(
122 ring.PushBack(std::span(single_entry_data, data_size), preamble_byte),
123 OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700124 ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
125 ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
126
127 read_size = 500U;
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800128 ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700129 ASSERT_EQ(read_size, data_size);
130
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700131 // ASSERT_THAT(std::span(expect_buffer).last(data_size),
132 // testing::ElementsAreArray(std::span(read_buffer, data_size)));
133 ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
134 read_buffer,
135 data_size),
136 0);
Keir Mierle866cff42020-04-28 22:24:44 -0700137
138 read_size = 500U;
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800139 ASSERT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700140 ASSERT_EQ(read_size, single_entry_total_size);
Keir Mierle866cff42020-04-28 22:24:44 -0700141
142 if (user_data) {
Prashanth Swaminathan44202c92021-03-03 13:23:54 -0800143 expect_buffer[0] = byte(preamble_byte);
Keir Mierle866cff42020-04-28 22:24:44 -0700144 }
145
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700146 // ASSERT_THAT(std::span(expect_buffer),
147 // testing::ElementsAreArray(std::span(read_buffer)));
Keir Mierle866cff42020-04-28 22:24:44 -0700148 ASSERT_EQ(memcmp(expect_buffer, read_buffer, single_entry_total_size), 0);
Prashanth Swaminathand9be1132021-03-10 14:55:23 -0800149
150 if (user_data) {
151 user_preamble = 0U;
152 ASSERT_EQ(
153 ring.PeekFrontWithPreamble(read_buffer, user_preamble, read_size),
154 OkStatus());
155 ASSERT_EQ(read_size, data_size);
156 ASSERT_EQ(user_preamble, preamble_byte);
157 ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
158 read_buffer,
159 data_size),
160 0);
161 }
162
163 ASSERT_EQ(ring.PopFront(), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700164 }
165}
166
167TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadNoUserData) {
168 SingleEntryWriteReadTest(false);
169}
170
171TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadYesUserData) {
172 SingleEntryWriteReadTest(true);
173}
174
Keir Mierle32d1c122020-06-04 12:40:14 -0700175// TODO(pwbug/196): Increase this to 5000 once we have a way to detect targets
176// with more computation and memory oomph.
177constexpr size_t kOuterCycles = 50u;
Keir Mierle866cff42020-04-28 22:24:44 -0700178constexpr size_t kCountingUpMaxExpectedEntries =
179 single_entry_test_buffer_size / single_entry_total_size;
180
181// Write data that is filled with a byte value that increments each write. Write
182// many times without read/pop and then check to make sure correct contents are
183// in the ring buffer.
Ewout van Bekkum5ea33402021-03-31 11:00:02 -0700184template <bool kUserData>
Keir Mierle866cff42020-04-28 22:24:44 -0700185void CountingUpWriteReadTest() {
Ewout van Bekkum5ea33402021-03-31 11:00:02 -0700186 PrefixedEntryRingBuffer ring(kUserData);
Keir Mierle866cff42020-04-28 22:24:44 -0700187 byte test_buffer[single_entry_test_buffer_size];
188
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800189 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700190 EXPECT_EQ(ring.EntryCount(), 0u);
191
Ewout van Bekkum5ea33402021-03-31 11:00:02 -0700192 constexpr size_t kDataSize = sizeof(single_entry_data) - (kUserData ? 1 : 0);
Keir Mierle866cff42020-04-28 22:24:44 -0700193
194 for (size_t i = 0; i < kOuterCycles; i++) {
195 size_t seed = i;
196
Ewout van Bekkum5ea33402021-03-31 11:00:02 -0700197 byte write_buffer[kDataSize];
Keir Mierle866cff42020-04-28 22:24:44 -0700198
199 size_t j;
200 for (j = 0; j < kSingleEntryCycles; j++) {
201 memset(write_buffer, j + seed, sizeof(write_buffer));
202
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800203 ASSERT_EQ(ring.PushBack(write_buffer), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700204
205 size_t expected_count = (j < kCountingUpMaxExpectedEntries)
206 ? j + 1
207 : kCountingUpMaxExpectedEntries;
208 ASSERT_EQ(ring.EntryCount(), expected_count);
209 }
210 size_t final_write_j = j;
211 size_t fill_val = seed + final_write_j - kCountingUpMaxExpectedEntries;
212
213 for (j = 0; j < kCountingUpMaxExpectedEntries; j++) {
214 byte read_buffer[sizeof(write_buffer)];
215 size_t read_size;
216 memset(write_buffer, fill_val + j, sizeof(write_buffer));
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800217 ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700218
Ewout van Bekkum5ea33402021-03-31 11:00:02 -0700219 ASSERT_EQ(memcmp(write_buffer, read_buffer, kDataSize), 0);
Keir Mierle866cff42020-04-28 22:24:44 -0700220
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800221 ASSERT_EQ(ring.PopFront(), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700222 }
223 }
224}
225
226TEST(PrefixedEntryRingBuffer, CountingUpWriteReadNoUserData) {
227 CountingUpWriteReadTest<false>();
228}
229
230TEST(PrefixedEntryRingBuffer, CountingUpWriteReadYesUserData) {
231 CountingUpWriteReadTest<true>();
232}
233
234// Create statically to prevent allocating a capture in the lambda below.
235static pw::Vector<byte, single_entry_total_size> read_buffer;
236
237// Repeatedly write the same data, read it, and pop it, done over and over
238// again.
239void SingleEntryWriteReadWithSectionWriterTest(bool user_data) {
240 PrefixedEntryRingBuffer ring(user_data);
241 byte test_buffer[single_entry_test_buffer_size];
242
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800243 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700244
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700245 auto output = [](std::span<const byte> src) -> Status {
Keir Mierle866cff42020-04-28 22:24:44 -0700246 for (byte b : src) {
247 read_buffer.push_back(b);
248 }
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800249 return OkStatus();
Keir Mierle866cff42020-04-28 22:24:44 -0700250 };
251
252 size_t user_preamble_bytes = (user_data ? 1 : 0);
253 size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
254 size_t data_offset = single_entry_total_size - data_size;
255
256 byte expect_buffer[single_entry_total_size] = {};
257 expect_buffer[user_preamble_bytes] = byte(data_size);
258 memcpy(expect_buffer + data_offset, single_entry_data, data_size);
259
260 for (size_t i = 0; i < kSingleEntryCycles; i++) {
261 ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
262 ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
263
Prashanth Swaminathan44202c92021-03-03 13:23:54 -0800264 // Limit the value of the preamble to a single byte, to ensure that we
265 // retain a static `single_entry_buffer_size` during the test. Single
266 // bytes are varint-encoded to the same value.
267 uint32_t preamble_byte = i % 128;
268 ASSERT_EQ(
269 ring.PushBack(std::span(single_entry_data, data_size), preamble_byte),
270 OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700271 ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
272 ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
273
274 read_buffer.clear();
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800275 ASSERT_EQ(ring.PeekFront(output), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700276 ASSERT_EQ(read_buffer.size(), data_size);
277
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700278 ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
Keir Mierle866cff42020-04-28 22:24:44 -0700279 read_buffer.data(),
280 data_size),
281 0);
282
283 read_buffer.clear();
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800284 ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700285 ASSERT_EQ(read_buffer.size(), single_entry_total_size);
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800286 ASSERT_EQ(ring.PopFront(), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700287
288 if (user_data) {
Prashanth Swaminathan44202c92021-03-03 13:23:54 -0800289 expect_buffer[0] = byte(preamble_byte);
Keir Mierle866cff42020-04-28 22:24:44 -0700290 }
291
292 ASSERT_EQ(
293 memcmp(expect_buffer, read_buffer.data(), single_entry_total_size), 0);
294 }
295}
296
297TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadWithSectionWriterNoUserData) {
298 SingleEntryWriteReadWithSectionWriterTest(false);
299}
300
301TEST(PrefixedEntryRingBuffer,
302 SingleEntryWriteReadWithSectionWriterYesUserData) {
303 SingleEntryWriteReadWithSectionWriterTest(true);
304}
305
306constexpr size_t kEntrySizeBytes = 8u;
307constexpr size_t kTotalEntryCount = 20u;
308constexpr size_t kBufferExtraBytes = 5u;
309constexpr size_t kTestBufferSize =
310 (kEntrySizeBytes * kTotalEntryCount) + kBufferExtraBytes;
311
312// Create statically to prevent allocating a capture in the lambda below.
313static pw::Vector<byte, kTestBufferSize> actual_result;
314
315void DeringTest(bool preload) {
316 PrefixedEntryRingBuffer ring;
317
318 byte test_buffer[kTestBufferSize];
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800319 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700320
321 // Entry data is entry size - preamble (single byte in this case).
Armando Montanez888370d2020-05-01 18:29:22 -0700322 byte single_entry_buffer[kEntrySizeBytes - 1u];
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700323 auto entry_data = std::span(single_entry_buffer);
Keir Mierle866cff42020-04-28 22:24:44 -0700324 size_t i;
325
Keir Mierle32d1c122020-06-04 12:40:14 -0700326 // TODO(pwbug/196): Increase this to 500 once we have a way to detect targets
327 // with more computation and memory oomph.
328 size_t loop_goal = preload ? 50 : 1;
Keir Mierle866cff42020-04-28 22:24:44 -0700329
330 for (size_t main_loop_count = 0; main_loop_count < loop_goal;
331 main_loop_count++) {
332 if (preload) {
333 // Prime the ringbuffer with some junk data to get the buffer
334 // wrapped.
335 for (i = 0; i < (kTotalEntryCount * (main_loop_count % 64u)); i++) {
Armando Montanez888370d2020-05-01 18:29:22 -0700336 memset(single_entry_buffer, i, sizeof(single_entry_buffer));
Adrien Larbanetd1ca56c2021-06-10 14:20:45 +0000337 ring.PushBack(single_entry_buffer)
338 .IgnoreError(); // TODO(pwbug/387): Handle Status properly
Keir Mierle866cff42020-04-28 22:24:44 -0700339 }
340 }
341
342 // Build up the expected buffer and fill the ring buffer with the test data.
343 pw::Vector<byte, kTestBufferSize> expected_result;
344 for (i = 0; i < kTotalEntryCount; i++) {
345 // First component of the entry: the varint size.
Armando Montanez888370d2020-05-01 18:29:22 -0700346 static_assert(sizeof(single_entry_buffer) < 127);
347 expected_result.push_back(byte(sizeof(single_entry_buffer)));
Keir Mierle866cff42020-04-28 22:24:44 -0700348
349 // Second component of the entry: the raw data.
Armando Montanez888370d2020-05-01 18:29:22 -0700350 memset(single_entry_buffer, 'a' + i, sizeof(single_entry_buffer));
Keir Mierle866cff42020-04-28 22:24:44 -0700351 for (byte b : entry_data) {
352 expected_result.push_back(b);
353 }
354
355 // The ring buffer internally pushes the varint size byte.
Adrien Larbanetd1ca56c2021-06-10 14:20:45 +0000356 ring.PushBack(single_entry_buffer)
357 .IgnoreError(); // TODO(pwbug/387): Handle Status properly
Keir Mierle866cff42020-04-28 22:24:44 -0700358 }
359
360 // Check values before doing the dering.
361 EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
362 EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
363
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800364 ASSERT_EQ(ring.Dering(), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700365
366 // Check values after doing the dering.
367 EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
368 EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
369
370 // Read out the entries of the ring buffer.
371 actual_result.clear();
Wyatt Heplere2cbadf2020-06-22 11:21:45 -0700372 auto output = [](std::span<const byte> src) -> Status {
Keir Mierle866cff42020-04-28 22:24:44 -0700373 for (byte b : src) {
374 actual_result.push_back(b);
375 }
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800376 return OkStatus();
Keir Mierle866cff42020-04-28 22:24:44 -0700377 };
378 while (ring.EntryCount()) {
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800379 ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
380 ASSERT_EQ(ring.PopFront(), OkStatus());
Keir Mierle866cff42020-04-28 22:24:44 -0700381 }
382
383 // Ensure the actual result out of the ring buffer matches our manually
384 // computed result.
385 EXPECT_EQ(expected_result.size(), actual_result.size());
386 ASSERT_EQ(memcmp(test_buffer, actual_result.data(), actual_result.size()),
387 0);
388 ASSERT_EQ(
389 memcmp(
390 expected_result.data(), actual_result.data(), actual_result.size()),
391 0);
392 }
393}
394
395TEST(PrefixedEntryRingBuffer, Dering) { DeringTest(true); }
396TEST(PrefixedEntryRingBuffer, DeringNoPreload) { DeringTest(false); }
397
Keir Mierlebcdf4602020-05-07 11:39:45 -0700398template <typename T>
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700399Status PushBack(PrefixedEntryRingBufferMulti& ring,
400 T element,
401 uint32_t user_preamble = 0) {
Keir Mierlebcdf4602020-05-07 11:39:45 -0700402 union {
403 std::array<byte, sizeof(element)> buffer;
404 T item;
405 } aliased;
406 aliased.item = element;
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700407 return ring.PushBack(aliased.buffer, user_preamble);
Keir Mierlebcdf4602020-05-07 11:39:45 -0700408}
409
410template <typename T>
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700411Status TryPushBack(PrefixedEntryRingBufferMulti& ring,
412 T element,
413 uint32_t user_preamble = 0) {
Keir Mierlebcdf4602020-05-07 11:39:45 -0700414 union {
415 std::array<byte, sizeof(element)> buffer;
416 T item;
417 } aliased;
418 aliased.item = element;
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700419 return ring.TryPushBack(aliased.buffer, user_preamble);
Keir Mierlebcdf4602020-05-07 11:39:45 -0700420}
421
422template <typename T>
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700423T PeekFront(PrefixedEntryRingBufferMulti::Reader& reader,
424 uint32_t* user_preamble_out = nullptr) {
Keir Mierlebcdf4602020-05-07 11:39:45 -0700425 union {
426 std::array<byte, sizeof(T)> buffer;
427 T item;
428 } aliased;
429 size_t bytes_read = 0;
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700430 uint32_t user_preamble = 0;
431 PW_CHECK_OK(
432 reader.PeekFrontWithPreamble(aliased.buffer, user_preamble, bytes_read));
Keir Mierlebcdf4602020-05-07 11:39:45 -0700433 PW_CHECK_INT_EQ(bytes_read, sizeof(T));
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700434 if (user_preamble_out) {
435 *user_preamble_out = user_preamble;
436 }
437 return aliased.item;
438}
439
440template <typename T>
441T GetEntry(std::span<const std::byte> lhs) {
442 union {
443 std::array<byte, sizeof(T)> buffer;
444 T item;
445 } aliased;
446 std::memcpy(aliased.buffer.data(), lhs.data(), lhs.size_bytes());
Keir Mierlebcdf4602020-05-07 11:39:45 -0700447 return aliased.item;
448}
449
Prashanth Swaminathan3eb97d42021-07-13 10:14:38 -0700450void EmptyDataPushBackTest(bool user_data) {
451 PrefixedEntryRingBuffer ring(user_data);
452 byte test_buffer[kTestBufferSize];
453 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
454
455 // Push back an empty span and a non-empty span.
456 EXPECT_EQ(ring.PushBack(std::span<std::byte>(), 1u), OkStatus());
457 EXPECT_EQ(ring.EntryCount(), 1u);
458 EXPECT_EQ(ring.PushBack(single_entry_data, 2u), OkStatus());
459 EXPECT_EQ(ring.EntryCount(), 2u);
460
461 // Confirm that both entries can be read back.
462 byte entry_buffer[kTestBufferSize];
463 uint32_t user_preamble = 0;
464 size_t bytes_read = 0;
465 // Read empty span.
466 EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
467 OkStatus());
468 EXPECT_EQ(user_preamble, user_data ? 1u : 0u);
469 EXPECT_EQ(bytes_read, 0u);
470 EXPECT_EQ(ring.PopFront(), OkStatus());
471 EXPECT_EQ(ring.EntryCount(), 1u);
472 // Read non-empty span.
473 EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
474 OkStatus());
475 EXPECT_EQ(user_preamble, user_data ? 2u : 0u);
476 ASSERT_EQ(bytes_read, sizeof(single_entry_data));
477 EXPECT_EQ(memcmp(entry_buffer, single_entry_data, bytes_read), 0);
478 EXPECT_EQ(ring.PopFront(), OkStatus());
479 EXPECT_EQ(ring.EntryCount(), 0u);
480}
481
482TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestWithPreamble) {
483 EmptyDataPushBackTest(true);
484}
485TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestNoPreamble) {
486 EmptyDataPushBackTest(false);
487}
488
Keir Mierlebcdf4602020-05-07 11:39:45 -0700489TEST(PrefixedEntryRingBuffer, TryPushBack) {
490 PrefixedEntryRingBuffer ring;
491 byte test_buffer[kTestBufferSize];
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800492 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
Keir Mierlebcdf4602020-05-07 11:39:45 -0700493
494 // Fill up the ring buffer with a constant.
495 int total_items = 0;
496 while (true) {
497 Status status = TryPushBack<int>(ring, 5);
498 if (status.ok()) {
499 total_items++;
500 } else {
Wyatt Heplerd78f7c62020-09-28 14:27:32 -0700501 EXPECT_EQ(status, Status::ResourceExhausted());
Keir Mierlebcdf4602020-05-07 11:39:45 -0700502 break;
503 }
504 }
505 EXPECT_EQ(PeekFront<int>(ring), 5);
506
507 // Should be unable to push more items.
508 for (int i = 0; i < total_items; ++i) {
Wyatt Heplerd78f7c62020-09-28 14:27:32 -0700509 EXPECT_EQ(TryPushBack<int>(ring, 100), Status::ResourceExhausted());
Keir Mierlebcdf4602020-05-07 11:39:45 -0700510 EXPECT_EQ(PeekFront<int>(ring), 5);
511 }
512
513 // Fill up the ring buffer with a constant.
514 for (int i = 0; i < total_items; ++i) {
Wyatt Hepler1b3da3a2021-01-07 13:26:57 -0800515 EXPECT_EQ(PushBack<int>(ring, 100), OkStatus());
Keir Mierlebcdf4602020-05-07 11:39:45 -0700516 }
517 EXPECT_EQ(PeekFront<int>(ring), 100);
518}
519
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700520TEST(PrefixedEntryRingBuffer, Iterator) {
521 PrefixedEntryRingBuffer ring;
522 byte test_buffer[kTestBufferSize];
523 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
524
525 // Fill up the ring buffer with a constant value.
526 size_t entry_count = 0;
527 while (TryPushBack<size_t>(ring, entry_count).ok()) {
528 entry_count++;
529 }
530
531 // Iterate over all entries and confirm entry count.
532 size_t validated_entries = 0;
533 for (Result<const Entry> entry_info : ring) {
534 EXPECT_TRUE(entry_info.status().ok());
535 EXPECT_EQ(GetEntry<size_t>(entry_info.value().buffer), validated_entries);
536 validated_entries++;
537 }
538 EXPECT_EQ(validated_entries, entry_count);
539}
540
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800541TEST(PrefixedEntryRingBufferMulti, TryPushBack) {
542 PrefixedEntryRingBufferMulti ring;
543 byte test_buffer[kTestBufferSize];
544 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
545
546 PrefixedEntryRingBufferMulti::Reader fast_reader;
547 PrefixedEntryRingBufferMulti::Reader slow_reader;
548
549 EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
550 EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
551
552 // Fill up the ring buffer with an increasing count.
553 int total_items = 0;
554 while (true) {
555 Status status = TryPushBack<int>(ring, total_items);
556 if (status.ok()) {
557 total_items++;
558 } else {
559 EXPECT_EQ(status, Status::ResourceExhausted());
560 break;
561 }
562 }
563
564 // Run fast reader twice as fast as the slow reader.
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700565 size_t total_used_bytes = ring.TotalUsedBytes();
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800566 for (int i = 0; i < total_items; ++i) {
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700567 EXPECT_EQ(PeekFront<int>(fast_reader), i);
568 EXPECT_EQ(fast_reader.PopFront(), OkStatus());
569 EXPECT_EQ(ring.TotalUsedBytes(), total_used_bytes);
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800570 if (i % 2 == 0) {
571 EXPECT_EQ(PeekFront<int>(slow_reader), i / 2);
572 EXPECT_EQ(slow_reader.PopFront(), OkStatus());
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700573 EXPECT_TRUE(ring.TotalUsedBytes() < total_used_bytes);
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800574 }
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700575 total_used_bytes = ring.TotalUsedBytes();
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800576 }
577 EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700578 EXPECT_TRUE(ring.TotalUsedBytes() > 0u);
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800579
580 // Fill the buffer again, expect that the fast reader
581 // only sees half the entries as the slow reader.
582 size_t max_items = total_items;
583 while (true) {
584 Status status = TryPushBack<int>(ring, total_items);
585 if (status.ok()) {
586 total_items++;
587 } else {
588 EXPECT_EQ(status, Status::ResourceExhausted());
589 break;
590 }
591 }
592 EXPECT_EQ(slow_reader.EntryCount(), max_items);
593 EXPECT_EQ(fast_reader.EntryCount(), total_items - max_items);
594
595 for (int i = total_items - max_items; i < total_items; ++i) {
596 EXPECT_EQ(PeekFront<int>(slow_reader), i);
597 EXPECT_EQ(slow_reader.PopFront(), OkStatus());
598 if (static_cast<size_t>(i) >= max_items) {
599 EXPECT_EQ(PeekFront<int>(fast_reader), i);
600 EXPECT_EQ(fast_reader.PopFront(), OkStatus());
601 }
602 }
603 EXPECT_EQ(slow_reader.PopFront(), Status::OutOfRange());
604 EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700605 EXPECT_EQ(ring.TotalUsedBytes(), 0u);
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800606}
607
608TEST(PrefixedEntryRingBufferMulti, PushBack) {
609 PrefixedEntryRingBufferMulti ring;
610 byte test_buffer[kTestBufferSize];
611 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
612
613 PrefixedEntryRingBufferMulti::Reader fast_reader;
614 PrefixedEntryRingBufferMulti::Reader slow_reader;
615
616 EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
617 EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
618
619 // Fill up the ring buffer with an increasing count.
620 size_t total_items = 0;
621 while (true) {
622 Status status = TryPushBack<uint32_t>(ring, total_items);
623 if (status.ok()) {
624 total_items++;
625 } else {
626 EXPECT_EQ(status, Status::ResourceExhausted());
627 break;
628 }
629 }
630 EXPECT_EQ(slow_reader.EntryCount(), total_items);
631
632 // The following test:
633 // - Moves the fast reader forward by one entry.
634 // - Writes a single entry that is guaranteed to be larger than the size of a
635 // single entry in the buffer (uint64_t entry > uint32_t entry).
636 // - Checks to see that both readers were moved forward.
637 EXPECT_EQ(fast_reader.PopFront(), OkStatus());
638 EXPECT_EQ(PushBack<uint64_t>(ring, 5u), OkStatus());
639 // The readers have moved past values 0 and 1.
640 EXPECT_EQ(PeekFront<uint32_t>(slow_reader), 2u);
641 EXPECT_EQ(PeekFront<uint32_t>(fast_reader), 2u);
642 // The readers have lost two entries, but gained an entry.
643 EXPECT_EQ(slow_reader.EntryCount(), total_items - 1);
644 EXPECT_EQ(fast_reader.EntryCount(), total_items - 1);
645}
646
647TEST(PrefixedEntryRingBufferMulti, ReaderAddRemove) {
648 PrefixedEntryRingBufferMulti ring;
649 byte test_buffer[kTestBufferSize];
650 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
651
652 PrefixedEntryRingBufferMulti::Reader reader;
653 PrefixedEntryRingBufferMulti::Reader transient_reader;
654
655 EXPECT_EQ(ring.AttachReader(reader), OkStatus());
656
657 // Fill up the ring buffer with a constant value.
Prashanth Swaminathan099f7162021-07-15 13:42:20 -0700658 size_t total_items = 0;
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800659 while (true) {
Prashanth Swaminathan099f7162021-07-15 13:42:20 -0700660 Status status = TryPushBack<size_t>(ring, total_items);
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800661 if (status.ok()) {
662 total_items++;
663 } else {
664 EXPECT_EQ(status, Status::ResourceExhausted());
665 break;
666 }
667 }
Prashanth Swaminathan099f7162021-07-15 13:42:20 -0700668 EXPECT_EQ(reader.EntryCount(), total_items);
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800669
670 // Add new reader after filling the buffer.
671 EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
Prashanth Swaminathan099f7162021-07-15 13:42:20 -0700672 EXPECT_EQ(transient_reader.EntryCount(), total_items);
673
674 // Confirm that the transient reader observes all values, even though it was
675 // attached after entries were pushed.
676 for (size_t i = 0; i < total_items; i++) {
677 EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
678 EXPECT_EQ(transient_reader.PopFront(), OkStatus());
679 }
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800680 EXPECT_EQ(transient_reader.EntryCount(), 0u);
681
Prashanth Swaminathan099f7162021-07-15 13:42:20 -0700682 // Confirm that re-attaching the reader resets it back to the oldest
683 // available entry.
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800684 EXPECT_EQ(ring.DetachReader(transient_reader), OkStatus());
685 EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
Prashanth Swaminathan099f7162021-07-15 13:42:20 -0700686 EXPECT_EQ(transient_reader.EntryCount(), total_items);
687
688 for (size_t i = 0; i < total_items; i++) {
689 EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
690 EXPECT_EQ(transient_reader.PopFront(), OkStatus());
691 }
Prashanth Swaminathanbf6e2e92021-02-09 20:12:22 -0800692 EXPECT_EQ(transient_reader.EntryCount(), 0u);
693}
694
695TEST(PrefixedEntryRingBufferMulti, SingleBufferPerReader) {
696 PrefixedEntryRingBufferMulti ring_one;
697 PrefixedEntryRingBufferMulti ring_two;
698 byte test_buffer[kTestBufferSize];
699 EXPECT_EQ(ring_one.SetBuffer(test_buffer), OkStatus());
700
701 PrefixedEntryRingBufferMulti::Reader reader;
702 EXPECT_EQ(ring_one.AttachReader(reader), OkStatus());
703 EXPECT_EQ(ring_two.AttachReader(reader), Status::InvalidArgument());
704
705 EXPECT_EQ(ring_one.DetachReader(reader), OkStatus());
706 EXPECT_EQ(ring_two.AttachReader(reader), OkStatus());
707 EXPECT_EQ(ring_one.AttachReader(reader), Status::InvalidArgument());
708}
709
Prashanth Swaminathan16541e72021-06-21 09:22:25 -0700710TEST(PrefixedEntryRingBufferMulti, IteratorEmptyBuffer) {
711 PrefixedEntryRingBufferMulti ring;
712 // Pick a buffer that can't contain any valid sections.
713 byte test_buffer[1] = {std::byte(0xFF)};
714
715 PrefixedEntryRingBufferMulti::Reader reader;
716 EXPECT_EQ(ring.AttachReader(reader), OkStatus());
717 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
718
719 EXPECT_EQ(ring.begin(), ring.end());
720}
721
722TEST(PrefixedEntryRingBufferMulti, IteratorValidEntries) {
723 PrefixedEntryRingBufferMulti ring;
724 byte test_buffer[kTestBufferSize];
725 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
726
727 PrefixedEntryRingBufferMulti::Reader reader;
728 EXPECT_EQ(ring.AttachReader(reader), OkStatus());
729
730 // Buffer only contains valid entries. This happens after populating
731 // the buffer and no entries have been read.
732 // E.g. [VALID|VALID|VALID|INVALID]
733
734 // Fill up the ring buffer with a constant value.
735 size_t entry_count = 0;
736 while (TryPushBack<size_t>(ring, entry_count).ok()) {
737 entry_count++;
738 }
739
740 // Iterate over all entries and confirm entry count.
741 size_t validated_entries = 0;
742 for (const Entry& entry_info : ring) {
743 EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
744 validated_entries++;
745 }
746 EXPECT_EQ(validated_entries, entry_count);
747}
748
749TEST(PrefixedEntryRingBufferMulti, IteratorValidEntriesWithPreamble) {
750 PrefixedEntryRingBufferMulti ring(true);
751 byte test_buffer[kTestBufferSize];
752 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
753
754 PrefixedEntryRingBufferMulti::Reader reader;
755 EXPECT_EQ(ring.AttachReader(reader), OkStatus());
756
757 // Buffer only contains valid entries. This happens after populating
758 // the buffer and no entries have been read.
759 // E.g. [VALID|VALID|VALID|INVALID]
760
761 // Fill up the ring buffer with a constant value.
762 size_t entry_count = 0;
763 while (TryPushBack<size_t>(ring, entry_count, entry_count).ok()) {
764 entry_count++;
765 }
766
767 // Iterate over all entries and confirm entry count.
768 size_t validated_entries = 0;
769 for (const Entry& entry_info : ring) {
770 EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
771 EXPECT_EQ(entry_info.preamble, validated_entries);
772 validated_entries++;
773 }
774 EXPECT_EQ(validated_entries, entry_count);
775}
776
777TEST(PrefixedEntryRingBufferMulti, IteratorStaleEntries) {
778 PrefixedEntryRingBufferMulti ring;
779 byte test_buffer[kTestBufferSize];
780 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
781
782 // Buffer only contains stale, valid entries. This happens when after
783 // populating the buffer, all entries are read. The buffer retains the
784 // data but has an entry count of zero.
785 // E.g. [STALE|STALE|STALE]
786 PrefixedEntryRingBufferMulti::Reader trailing_reader;
787 EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
788
789 PrefixedEntryRingBufferMulti::Reader reader;
790 EXPECT_EQ(ring.AttachReader(reader), OkStatus());
791
792 // Push and pop all the entries.
793 size_t entry_count = 0;
794 while (TryPushBack<size_t>(ring, entry_count).ok()) {
795 entry_count++;
796 }
797
798 while (reader.PopFront().ok()) {
799 }
800
801 // Iterate over all entries and confirm entry count.
802 size_t validated_entries = 0;
803 for (const Entry& entry_info : ring) {
804 EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
805 validated_entries++;
806 }
807 EXPECT_EQ(validated_entries, entry_count);
808}
809
810TEST(PrefixedEntryRingBufferMulti, IteratorValidStaleEntries) {
811 PrefixedEntryRingBufferMulti ring;
812 byte test_buffer[kTestBufferSize];
813 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
814
815 // Buffer contains both valid and stale entries. This happens when after
816 // populating the buffer, only some of the entries are read.
817 // E.g. [VALID|INVALID|STALE|STALE]
818 PrefixedEntryRingBufferMulti::Reader trailing_reader;
819 EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
820
821 PrefixedEntryRingBufferMulti::Reader reader;
822 EXPECT_EQ(ring.AttachReader(reader), OkStatus());
823
824 // Fill the buffer with entries.
825 size_t entry_count = 0;
826 while (TryPushBack<size_t>(ring, entry_count).ok()) {
827 entry_count++;
828 }
829
830 // Pop roughly half the entries.
831 while (reader.EntryCount() > (entry_count / 2)) {
832 EXPECT_TRUE(reader.PopFront().ok());
833 }
834
835 // Iterate over all entries and confirm entry count.
836 size_t validated_entries = 0;
837 for (const Entry& entry_info : ring) {
838 EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
839 validated_entries++;
840 }
841 EXPECT_EQ(validated_entries, entry_count);
842}
843
844TEST(PrefixedEntryRingBufferMulti, IteratorBufferCorruption) {
845 PrefixedEntryRingBufferMulti ring;
846 byte test_buffer[kTestBufferSize];
847 EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
848
849 // Buffer contains partially written entries. This may happen if writing
850 // is pre-empted (e.g. a crash occurs). In this state, we expect a series
851 // of valid entries followed by an invalid entry.
852 PrefixedEntryRingBufferMulti::Reader trailing_reader;
853 EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
854
855 // Add one entry to capture the second entry index.
856 size_t entry_count = 0;
857 EXPECT_TRUE(TryPushBack<size_t>(ring, entry_count++).ok());
858 size_t entry_size = ring.TotalUsedBytes();
859
860 // Fill the buffer with entries.
861 while (TryPushBack<size_t>(ring, entry_count++).ok()) {
862 }
863
864 // Push another entry to move the write index forward and force the oldest
865 // reader forward. This will require the iterator to dering.
866 EXPECT_TRUE(PushBack<size_t>(ring, 0).ok());
867 EXPECT_TRUE(ring.CheckForCorruption().ok());
868
869 // The first entry is overwritten. Corrupt all data past the fifth entry.
870 // Note that because the first entry has shifted, the entry_count recorded
871 // in each entry is shifted by 1.
872 constexpr size_t valid_entries = 5;
873 size_t offset = valid_entries * entry_size;
874 memset(test_buffer + offset, 0xFF, kTestBufferSize - offset);
875 EXPECT_FALSE(ring.CheckForCorruption().ok());
876
877 // Iterate over all entries and confirm entry count.
878 size_t validated_entries = 0;
879 iterator it = ring.begin();
880 for (; it != ring.end(); it++) {
881 EXPECT_EQ(GetEntry<size_t>(it->buffer), validated_entries + 1);
882 validated_entries++;
883 }
884 // The final entry will fail to be read.
885 EXPECT_EQ(it.status(), Status::DataLoss());
886 EXPECT_EQ(validated_entries, valid_entries);
887}
888
Keir Mierle866cff42020-04-28 22:24:44 -0700889} // namespace
890} // namespace ring_buffer
891} // namespace pw