blob: 1cafe977ee8b2f29b4b7993b08720157a8af6c44 [file] [log] [blame]
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <asm-generic/mman.h>
18#include <gtest/gtest.h>
19#include <atomic>
20#include <cstdlib>
21#include <sstream>
22#include <thread>
23#include <fmq/MessageQueue.h>
24#include <fmq/EventFlag.h>
25
26enum EventFlagBits : uint32_t {
27 kFmqNotEmpty = 1 << 0,
28 kFmqNotFull = 1 << 1,
29};
30
31class SynchronizedReadWrites : public ::testing::Test {
32protected:
33 virtual void TearDown() {
34 delete mQueue;
35 }
36
37 virtual void SetUp() {
38 static constexpr size_t kNumElementsInQueue = 2048;
39 mQueue = new (std::nothrow) android::hardware::MessageQueue<uint8_t,
40 android::hardware::kSynchronizedReadWrite>(kNumElementsInQueue);
41 ASSERT_NE(nullptr, mQueue);
42 ASSERT_TRUE(mQueue->isValid());
43 mNumMessagesMax = mQueue->getQuantumCount();
44 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
45 }
46
47 android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>*
48 mQueue = nullptr;
49 size_t mNumMessagesMax = 0;
50};
51
52class UnsynchronizedWrite : public ::testing::Test {
53protected:
54 virtual void TearDown() {
55 delete mQueue;
56 }
57
58 virtual void SetUp() {
59 static constexpr size_t kNumElementsInQueue = 2048;
60 mQueue = new (std::nothrow) android::hardware::MessageQueue<uint8_t,
61 android::hardware::kUnsynchronizedWrite>(kNumElementsInQueue);
62 ASSERT_NE(nullptr, mQueue);
63 ASSERT_TRUE(mQueue->isValid());
64 mNumMessagesMax = mQueue->getQuantumCount();
65 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
66 }
67
68 android::hardware::MessageQueue<uint8_t,
69 android::hardware::kUnsynchronizedWrite>* mQueue = nullptr;
70 size_t mNumMessagesMax = 0;
71};
72
73class BlockingReadWrites : public ::testing::Test {
74protected:
75 virtual void TearDown() {
76 delete mQueue;
77 }
78 virtual void SetUp() {
79 static constexpr size_t kNumElementsInQueue = 2048;
80 mQueue = new (std::nothrow) android::hardware::MessageQueue<
81 uint8_t, android::hardware::kSynchronizedReadWrite>(kNumElementsInQueue);
82 ASSERT_NE(nullptr, mQueue);
83 ASSERT_TRUE(mQueue->isValid());
84 mNumMessagesMax = mQueue->getQuantumCount();
85 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -080086 /*
87 * Initialize the EventFlag word to indicate Queue is not full.
88 */
89 std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -080090 }
91
92 android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>* mQueue;
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -080093 std::atomic<uint32_t> mFw;
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -080094 size_t mNumMessagesMax = 0;
95};
96
97/*
98 * This thread will attempt to read and block. When wait returns
99 * it checks if the kFmqNotEmpty bit is actually set.
100 * If the read is succesful, it signals Wake to kFmqNotFull.
101 */
102void ReaderThreadBlocking(
103 android::hardware::MessageQueue<uint8_t,
104 android::hardware::kSynchronizedReadWrite>* fmq,
105 std::atomic<uint32_t>* fwAddr) {
106 const size_t dataLen = 64;
107 uint8_t data[dataLen];
108 android::hardware::EventFlag* efGroup = nullptr;
109 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
110 ASSERT_EQ(android::NO_ERROR, status);
111 ASSERT_NE(nullptr, efGroup);
112
113 while (true) {
114 uint32_t efState = 0;
Hridya Valsaraju10f59dc2016-12-20 12:50:44 -0800115 android::status_t ret = efGroup->wait(kFmqNotEmpty,
116 &efState,
117 5000000000 /* timeoutNanoSeconds */);
118 /*
119 * Wait should not time out here after 5s
120 */
121 ASSERT_NE(android::TIMED_OUT, ret);
122
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800123 if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
124 efGroup->wake(kFmqNotFull);
125 break;
126 }
127 }
128
129 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
130 ASSERT_EQ(android::NO_ERROR, status);
131}
132
133/*
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800134 * This thread will attempt to read and block using the readBlocking() API and
135 * passes in a pointer to an EventFlag object.
136 */
137void ReaderThreadBlocking2(
138 android::hardware::MessageQueue<uint8_t,
139 android::hardware::kSynchronizedReadWrite>* fmq,
140 std::atomic<uint32_t>* fwAddr) {
141 const size_t dataLen = 64;
142 uint8_t data[dataLen];
143 android::hardware::EventFlag* efGroup = nullptr;
144 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
145 ASSERT_EQ(android::NO_ERROR, status);
146 ASSERT_NE(nullptr, efGroup);
147 bool ret = fmq->readBlocking(data,
148 dataLen,
149 static_cast<uint32_t>(kFmqNotFull),
150 static_cast<uint32_t>(kFmqNotEmpty),
151 5000000000 /* timeOutNanos */,
152 efGroup);
153 ASSERT_TRUE(ret);
154 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
155 ASSERT_EQ(android::NO_ERROR, status);
156}
157
158/*
159 * Test that basic blocking works. This test uses the non-blocking read()/write()
160 * APIs.
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800161 */
162TEST_F(BlockingReadWrites, SmallInputTest1) {
163 const size_t dataLen = 64;
164 uint8_t data[dataLen] = {0};
165
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800166 android::hardware::EventFlag* efGroup = nullptr;
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800167 android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800168
169 ASSERT_EQ(android::NO_ERROR, status);
170 ASSERT_NE(nullptr, efGroup);
171
172 /*
173 * Start a thread that will try to read and block on kFmqNotEmpty.
174 */
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800175 std::thread Reader(ReaderThreadBlocking, mQueue, &mFw);
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800176 struct timespec waitTime = {0, 100 * 1000000};
177 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
178
179 /*
180 * After waiting for some time write into the FMQ
181 * and call Wake on kFmqNotEmpty.
182 */
183 ASSERT_TRUE(mQueue->write(data, dataLen));
184 status = efGroup->wake(kFmqNotEmpty);
185 ASSERT_EQ(android::NO_ERROR, status);
186
187 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
188 Reader.join();
189
190 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
191 ASSERT_EQ(android::NO_ERROR, status);
192}
193
194/*
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800195 * Test that basic blocking works. This test uses the
196 * writeBlocking()/readBlocking() APIs.
197 */
198TEST_F(BlockingReadWrites, SmallInputTest2) {
199 const size_t dataLen = 64;
200 uint8_t data[dataLen] = {0};
201
202 android::hardware::EventFlag* efGroup = nullptr;
203 android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
204
205 ASSERT_EQ(android::NO_ERROR, status);
206 ASSERT_NE(nullptr, efGroup);
207
208 /*
209 * Start a thread that will try to read and block on kFmqNotEmpty. It will
210 * call wake() on kFmqNotFull when the read is successful.
211 */
212 std::thread Reader(ReaderThreadBlocking2, mQueue, &mFw);
213 bool ret = mQueue->writeBlocking(data,
214 dataLen,
215 static_cast<uint32_t>(kFmqNotFull),
216 static_cast<uint32_t>(kFmqNotEmpty),
217 5000000000 /* timeOutNanos */,
218 efGroup);
219 ASSERT_TRUE(ret);
220 Reader.join();
221
222 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
223 ASSERT_EQ(android::NO_ERROR, status);
224}
225
226/*
Hridya Valsaraju10f59dc2016-12-20 12:50:44 -0800227 * Test that basic blocking times out as intended.
228 */
229TEST_F(BlockingReadWrites, BlockingTimeOutTest) {
230 android::hardware::EventFlag* efGroup = nullptr;
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800231 android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
Hridya Valsaraju10f59dc2016-12-20 12:50:44 -0800232
233 ASSERT_EQ(android::NO_ERROR, status);
234 ASSERT_NE(nullptr, efGroup);
235
236 /* Block on an EventFlag bit that no one will wake and time out in 1s */
237 uint32_t efState = 0;
238 android::status_t ret = efGroup->wait(kFmqNotEmpty,
239 &efState,
240 1000000000 /* timeoutNanoSeconds */);
241 /*
242 * Wait should time out in a second.
243 */
244 EXPECT_EQ(android::TIMED_OUT, ret);
245
246 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
247 ASSERT_EQ(android::NO_ERROR, status);
248}
249
250/*
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800251 * Verify that a few bytes of data can be successfully written and read.
252 */
253TEST_F(SynchronizedReadWrites, SmallInputTest1) {
254 const size_t dataLen = 16;
255 ASSERT_LE(dataLen, mNumMessagesMax);
256 uint8_t data[dataLen];
257
258 for (size_t i = 0; i < dataLen; i++) {
259 data[i] = i & 0xFF;
260 }
261
262 ASSERT_TRUE(mQueue->write(data, dataLen));
263 uint8_t readData[dataLen] = {};
264 ASSERT_TRUE(mQueue->read(readData, dataLen));
265 ASSERT_EQ(0, memcmp(data, readData, dataLen));
266}
267
268/*
269 * Verify that read() returns false when trying to read from an empty queue.
270 */
271TEST_F(SynchronizedReadWrites, ReadWhenEmpty) {
272 ASSERT_EQ(0UL, mQueue->availableToRead());
273 const size_t dataLen = 2;
274 ASSERT_LE(dataLen, mNumMessagesMax);
275 uint8_t readData[dataLen];
276 ASSERT_FALSE(mQueue->read(readData, dataLen));
277}
278
279/*
280 * Write the queue until full. Verify that another write is unsuccessful.
281 * Verify that availableToWrite() returns 0 as expected.
282 */
283
284TEST_F(SynchronizedReadWrites, WriteWhenFull) {
285 ASSERT_EQ(0UL, mQueue->availableToRead());
286 std::vector<uint8_t> data(mNumMessagesMax);
287
288 for (size_t i = 0; i < mNumMessagesMax; i++) {
289 data[i] = i & 0xFF;
290 }
291
292 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
293 ASSERT_EQ(0UL, mQueue->availableToWrite());
294 ASSERT_FALSE(mQueue->write(&data[0], 1));
295
296 std::vector<uint8_t> readData(mNumMessagesMax);
297 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
298 ASSERT_EQ(data, readData);
299}
300
301/*
302 * Write a chunk of data equal to the queue size.
303 * Verify that the write is successful and the subsequent read
304 * returns the expected data.
305 */
306TEST_F(SynchronizedReadWrites, LargeInputTest1) {
307 std::vector<uint8_t> data(mNumMessagesMax);
308 for (size_t i = 0; i < mNumMessagesMax; i++) {
309 data[i] = i & 0xFF;
310 }
311
312 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
313 std::vector<uint8_t> readData(mNumMessagesMax);
314 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
315 ASSERT_EQ(data, readData);
316}
317
318/*
319 * Attempt to write a chunk of data larger than the queue size.
320 * Verify that it fails. Verify that a subsequent read fails and
321 * the queue is still empty.
322 */
323TEST_F(SynchronizedReadWrites, LargeInputTest2) {
324 ASSERT_EQ(0UL, mQueue->availableToRead());
325 const size_t dataLen = 4096;
326 ASSERT_GT(dataLen, mNumMessagesMax);
327 std::vector<uint8_t> data(dataLen);
328 for (size_t i = 0; i < dataLen; i++) {
329 data[i] = i & 0xFF;
330 }
331 ASSERT_FALSE(mQueue->write(&data[0], dataLen));
332 std::vector<uint8_t> readData(mNumMessagesMax);
333 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
334 ASSERT_NE(data, readData);
335 ASSERT_EQ(0UL, mQueue->availableToRead());
336}
337
338/*
339 * After the queue is full, try to write more data. Verify that
340 * the attempt returns false. Verify that the attempt did not
341 * affect the pre-existing data in the queue.
342 */
343TEST_F(SynchronizedReadWrites, LargeInputTest3) {
344 std::vector<uint8_t> data(mNumMessagesMax);
345 for (size_t i = 0; i < mNumMessagesMax; i++) {
346 data[i] = i & 0xFF;
347 }
348 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
349 ASSERT_FALSE(mQueue->write(&data[0], 1));
350 std::vector<uint8_t> readData(mNumMessagesMax);
351 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
352 ASSERT_EQ(data, readData);
353}
354
355/*
356 * Verify that multiple reads one after the other return expected data.
357 */
358TEST_F(SynchronizedReadWrites, MultipleRead) {
359 const size_t chunkSize = 100;
360 const size_t chunkNum = 5;
361 const size_t dataLen = chunkSize * chunkNum;
362 ASSERT_LE(dataLen, mNumMessagesMax);
363 uint8_t data[dataLen];
364 for (size_t i = 0; i < dataLen; i++) {
365 data[i] = i & 0xFF;
366 }
367 ASSERT_TRUE(mQueue->write(data, dataLen));
368 uint8_t readData[dataLen] = {};
369 for (size_t i = 0; i < chunkNum; i++) {
370 ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
371 }
372 ASSERT_EQ(0, memcmp(readData, data, dataLen));
373}
374
375/*
376 * Verify that multiple writes one after the other happens correctly.
377 */
378TEST_F(SynchronizedReadWrites, MultipleWrite) {
379 const int chunkSize = 100;
380 const int chunkNum = 5;
381 const size_t dataLen = chunkSize * chunkNum;
382 ASSERT_LE(dataLen, mNumMessagesMax);
383 uint8_t data[dataLen];
384 for (size_t i = 0; i < dataLen; i++) {
385 data[i] = i & 0xFF;
386 }
387 for (unsigned int i = 0; i < chunkNum; i++) {
388 ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
389 }
390 uint8_t readData[dataLen] = {};
391 ASSERT_TRUE(mQueue->read(readData, dataLen));
392 ASSERT_EQ(0, memcmp(readData, data, dataLen));
393}
394
395/*
396 * Write enough messages into the FMQ to fill half of it
397 * and read back the same.
398 * Write mNumMessagesMax messages into the queue. This will cause a
399 * wrap around. Read and verify the data.
400 */
401TEST_F(SynchronizedReadWrites, ReadWriteWrapAround) {
402 size_t numMessages = mNumMessagesMax / 2;
403 std::vector<uint8_t> data(mNumMessagesMax);
404 std::vector<uint8_t> readData(mNumMessagesMax);
405 for (size_t i = 0; i < mNumMessagesMax; i++) {
406 data[i] = i & 0xFF;
407 }
408 ASSERT_TRUE(mQueue->write(&data[0], numMessages));
409 ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
410 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
411 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
412 ASSERT_EQ(data, readData);
413}
414
415/*
416 * Verify that a few bytes of data can be successfully written and read.
417 */
418TEST_F(UnsynchronizedWrite, SmallInputTest1) {
419 const size_t dataLen = 16;
420 ASSERT_LE(dataLen, mNumMessagesMax);
421 uint8_t data[dataLen];
422 for (size_t i = 0; i < dataLen; i++) {
423 data[i] = i & 0xFF;
424 }
425 ASSERT_TRUE(mQueue->write(data, dataLen));
426 uint8_t readData[dataLen] = {};
427 ASSERT_TRUE(mQueue->read(readData, dataLen));
428 ASSERT_EQ(0, memcmp(data, readData, dataLen));
429}
430
431/*
432 * Verify that read() returns false when trying to read from an empty queue.
433 */
434TEST_F(UnsynchronizedWrite, ReadWhenEmpty) {
435 ASSERT_EQ(0UL, mQueue->availableToRead());
436 const size_t dataLen = 2;
437 ASSERT_TRUE(dataLen < mNumMessagesMax);
438 uint8_t readData[dataLen];
439 ASSERT_FALSE(mQueue->read(readData, dataLen));
440}
441
442/*
443 * Write the queue when full. Verify that a subsequent writes is succesful.
444 * Verify that availableToWrite() returns 0 as expected.
445 */
446
447TEST_F(UnsynchronizedWrite, WriteWhenFull) {
448 ASSERT_EQ(0UL, mQueue->availableToRead());
449 std::vector<uint8_t> data(mNumMessagesMax);
450 for (size_t i = 0; i < mNumMessagesMax; i++) {
451 data[i] = i & 0xFF;
452 }
453 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
454 ASSERT_EQ(0UL, mQueue->availableToWrite());
455 ASSERT_TRUE(mQueue->write(&data[0], 1));
456
457 std::vector<uint8_t> readData(mNumMessagesMax);
458 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
459}
460
461/*
462 * Write a chunk of data equal to the queue size.
463 * Verify that the write is successful and the subsequent read
464 * returns the expected data.
465 */
466TEST_F(UnsynchronizedWrite, LargeInputTest1) {
467 std::vector<uint8_t> data(mNumMessagesMax);
468 for (size_t i = 0; i < mNumMessagesMax; i++) {
469 data[i] = i & 0xFF;
470 }
471 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
472 std::vector<uint8_t> readData(mNumMessagesMax);
473 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
474 ASSERT_EQ(data, readData);
475}
476
477/*
478 * Attempt to write a chunk of data larger than the queue size.
479 * Verify that it fails. Verify that a subsequent read fails and
480 * the queue is still empty.
481 */
482TEST_F(UnsynchronizedWrite, LargeInputTest2) {
483 ASSERT_EQ(0UL, mQueue->availableToRead());
484 const size_t dataLen = 4096;
485 ASSERT_GT(dataLen, mNumMessagesMax);
486 std::vector<uint8_t> data(dataLen);
487 for (size_t i = 0; i < dataLen; i++) {
488 data[i] = i & 0xFF;
489 }
490 ASSERT_FALSE(mQueue->write(&data[0], dataLen));
491 std::vector<uint8_t> readData(mNumMessagesMax);
492 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
493 ASSERT_NE(data, readData);
494 ASSERT_EQ(0UL, mQueue->availableToRead());
495}
496
497/*
498 * After the queue is full, try to write more data. Verify that
499 * the attempt is succesful. Verify that the read fails
500 * as expected.
501 */
502TEST_F(UnsynchronizedWrite, LargeInputTest3) {
503 std::vector<uint8_t> data(mNumMessagesMax);
504 for (size_t i = 0; i < mNumMessagesMax; i++) {
505 data[i] = i & 0xFF;
506 }
507 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
508 ASSERT_TRUE(mQueue->write(&data[0], 1));
509 std::vector<uint8_t> readData(mNumMessagesMax);
510 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
511}
512
513/*
514 * Verify that multiple reads one after the other return expected data.
515 */
516TEST_F(UnsynchronizedWrite, MultipleRead) {
517 const size_t chunkSize = 100;
518 const size_t chunkNum = 5;
519 const size_t dataLen = chunkSize * chunkNum;
520 ASSERT_LE(dataLen, mNumMessagesMax);
521 uint8_t data[dataLen];
522 for (size_t i = 0; i < dataLen; i++) {
523 data[i] = i & 0xFF;
524 }
525 ASSERT_TRUE(mQueue->write(data, dataLen));
526 uint8_t readData[dataLen] = {};
527 for (size_t i = 0; i < chunkNum; i++) {
528 ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
529 }
530 ASSERT_EQ(0, memcmp(readData, data, dataLen));
531}
532
533/*
534 * Verify that multiple writes one after the other happens correctly.
535 */
536TEST_F(UnsynchronizedWrite, MultipleWrite) {
537 const size_t chunkSize = 100;
538 const size_t chunkNum = 5;
539 const size_t dataLen = chunkSize * chunkNum;
540 ASSERT_LE(dataLen, mNumMessagesMax);
541 uint8_t data[dataLen];
542 for (size_t i = 0; i < dataLen; i++) {
543 data[i] = i & 0xFF;
544 }
545 for (size_t i = 0; i < chunkNum; i++) {
546 ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
547 }
548 uint8_t readData[dataLen] = {};
549 ASSERT_TRUE(mQueue->read(readData, dataLen));
550 ASSERT_EQ(0, memcmp(readData, data, dataLen));
551}
552
553/*
554 * Write enough messages into the FMQ to fill half of it
555 * and read back the same.
556 * Write mNumMessagesMax messages into the queue. This will cause a
557 * wrap around. Read and verify the data.
558 */
559TEST_F(UnsynchronizedWrite, ReadWriteWrapAround) {
560 size_t numMessages = mNumMessagesMax / 2;
561 std::vector<uint8_t> data(mNumMessagesMax);
562 std::vector<uint8_t> readData(mNumMessagesMax);
563 for (size_t i = 0; i < mNumMessagesMax; i++) {
564 data[i] = i & 0xFF;
565 }
566 ASSERT_TRUE(mQueue->write(&data[0], numMessages));
567 ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
568 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
569 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
570 ASSERT_EQ(data, readData);
571}