blob: 3748cfee8de729ee45df4921745e036ce7454328 [file] [log] [blame]
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <asm-generic/mman.h>
18#include <gtest/gtest.h>
19#include <atomic>
20#include <cstdlib>
21#include <sstream>
22#include <thread>
23#include <fmq/MessageQueue.h>
24#include <fmq/EventFlag.h>
25
26enum EventFlagBits : uint32_t {
27 kFmqNotEmpty = 1 << 0,
28 kFmqNotFull = 1 << 1,
29};
30
31class SynchronizedReadWrites : public ::testing::Test {
32protected:
33 virtual void TearDown() {
34 delete mQueue;
35 }
36
37 virtual void SetUp() {
38 static constexpr size_t kNumElementsInQueue = 2048;
39 mQueue = new (std::nothrow) android::hardware::MessageQueue<uint8_t,
40 android::hardware::kSynchronizedReadWrite>(kNumElementsInQueue);
41 ASSERT_NE(nullptr, mQueue);
42 ASSERT_TRUE(mQueue->isValid());
43 mNumMessagesMax = mQueue->getQuantumCount();
44 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
45 }
46
47 android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>*
48 mQueue = nullptr;
49 size_t mNumMessagesMax = 0;
50};
51
52class UnsynchronizedWrite : public ::testing::Test {
53protected:
54 virtual void TearDown() {
55 delete mQueue;
56 }
57
58 virtual void SetUp() {
59 static constexpr size_t kNumElementsInQueue = 2048;
60 mQueue = new (std::nothrow) android::hardware::MessageQueue<uint8_t,
61 android::hardware::kUnsynchronizedWrite>(kNumElementsInQueue);
62 ASSERT_NE(nullptr, mQueue);
63 ASSERT_TRUE(mQueue->isValid());
64 mNumMessagesMax = mQueue->getQuantumCount();
65 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
66 }
67
68 android::hardware::MessageQueue<uint8_t,
69 android::hardware::kUnsynchronizedWrite>* mQueue = nullptr;
70 size_t mNumMessagesMax = 0;
71};
72
73class BlockingReadWrites : public ::testing::Test {
74protected:
75 virtual void TearDown() {
76 delete mQueue;
77 }
78 virtual void SetUp() {
79 static constexpr size_t kNumElementsInQueue = 2048;
80 mQueue = new (std::nothrow) android::hardware::MessageQueue<
81 uint8_t, android::hardware::kSynchronizedReadWrite>(kNumElementsInQueue);
82 ASSERT_NE(nullptr, mQueue);
83 ASSERT_TRUE(mQueue->isValid());
84 mNumMessagesMax = mQueue->getQuantumCount();
85 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -080086 /*
87 * Initialize the EventFlag word to indicate Queue is not full.
88 */
89 std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -080090 }
91
92 android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>* mQueue;
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -080093 std::atomic<uint32_t> mFw;
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -080094 size_t mNumMessagesMax = 0;
95};
96
Hridya Valsaraju2fb3a0c2017-01-10 14:31:43 -080097class QueueSizeOdd : public ::testing::Test {
98 protected:
99 virtual void TearDown() {
100 delete mQueue;
101 }
102 virtual void SetUp() {
103 static constexpr size_t kNumElementsInQueue = 2049;
104 mQueue = new (std::nothrow) android::hardware::MessageQueue<
105 uint8_t, android::hardware::kSynchronizedReadWrite>(kNumElementsInQueue,
106 true /* configureEventFlagWord */);
107 ASSERT_NE(nullptr, mQueue);
108 ASSERT_TRUE(mQueue->isValid());
109 mNumMessagesMax = mQueue->getQuantumCount();
110 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
111 auto evFlagWordPtr = mQueue->getEventFlagWord();
112 ASSERT_NE(nullptr, evFlagWordPtr);
113 /*
114 * Initialize the EventFlag word to indicate Queue is not full.
115 */
116 std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(kFmqNotFull));
117 }
118
119 android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>* mQueue;
120 size_t mNumMessagesMax = 0;
121};
122
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800123/*
124 * This thread will attempt to read and block. When wait returns
125 * it checks if the kFmqNotEmpty bit is actually set.
126 * If the read is succesful, it signals Wake to kFmqNotFull.
127 */
128void ReaderThreadBlocking(
129 android::hardware::MessageQueue<uint8_t,
130 android::hardware::kSynchronizedReadWrite>* fmq,
131 std::atomic<uint32_t>* fwAddr) {
132 const size_t dataLen = 64;
133 uint8_t data[dataLen];
134 android::hardware::EventFlag* efGroup = nullptr;
135 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
136 ASSERT_EQ(android::NO_ERROR, status);
137 ASSERT_NE(nullptr, efGroup);
138
139 while (true) {
140 uint32_t efState = 0;
Hridya Valsaraju10f59dc2016-12-20 12:50:44 -0800141 android::status_t ret = efGroup->wait(kFmqNotEmpty,
142 &efState,
143 5000000000 /* timeoutNanoSeconds */);
144 /*
145 * Wait should not time out here after 5s
146 */
147 ASSERT_NE(android::TIMED_OUT, ret);
148
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800149 if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
150 efGroup->wake(kFmqNotFull);
151 break;
152 }
153 }
154
155 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
156 ASSERT_EQ(android::NO_ERROR, status);
157}
158
159/*
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800160 * This thread will attempt to read and block using the readBlocking() API and
161 * passes in a pointer to an EventFlag object.
162 */
163void ReaderThreadBlocking2(
164 android::hardware::MessageQueue<uint8_t,
165 android::hardware::kSynchronizedReadWrite>* fmq,
166 std::atomic<uint32_t>* fwAddr) {
167 const size_t dataLen = 64;
168 uint8_t data[dataLen];
169 android::hardware::EventFlag* efGroup = nullptr;
170 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
171 ASSERT_EQ(android::NO_ERROR, status);
172 ASSERT_NE(nullptr, efGroup);
173 bool ret = fmq->readBlocking(data,
174 dataLen,
175 static_cast<uint32_t>(kFmqNotFull),
176 static_cast<uint32_t>(kFmqNotEmpty),
177 5000000000 /* timeOutNanos */,
178 efGroup);
179 ASSERT_TRUE(ret);
180 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
181 ASSERT_EQ(android::NO_ERROR, status);
182}
183
184/*
185 * Test that basic blocking works. This test uses the non-blocking read()/write()
186 * APIs.
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800187 */
188TEST_F(BlockingReadWrites, SmallInputTest1) {
189 const size_t dataLen = 64;
190 uint8_t data[dataLen] = {0};
191
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800192 android::hardware::EventFlag* efGroup = nullptr;
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800193 android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800194
195 ASSERT_EQ(android::NO_ERROR, status);
196 ASSERT_NE(nullptr, efGroup);
197
198 /*
199 * Start a thread that will try to read and block on kFmqNotEmpty.
200 */
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800201 std::thread Reader(ReaderThreadBlocking, mQueue, &mFw);
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800202 struct timespec waitTime = {0, 100 * 1000000};
203 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
204
205 /*
206 * After waiting for some time write into the FMQ
207 * and call Wake on kFmqNotEmpty.
208 */
209 ASSERT_TRUE(mQueue->write(data, dataLen));
210 status = efGroup->wake(kFmqNotEmpty);
211 ASSERT_EQ(android::NO_ERROR, status);
212
213 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
214 Reader.join();
215
216 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
217 ASSERT_EQ(android::NO_ERROR, status);
218}
219
220/*
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800221 * Test that basic blocking works. This test uses the
222 * writeBlocking()/readBlocking() APIs.
223 */
224TEST_F(BlockingReadWrites, SmallInputTest2) {
225 const size_t dataLen = 64;
226 uint8_t data[dataLen] = {0};
227
228 android::hardware::EventFlag* efGroup = nullptr;
229 android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
230
231 ASSERT_EQ(android::NO_ERROR, status);
232 ASSERT_NE(nullptr, efGroup);
233
234 /*
235 * Start a thread that will try to read and block on kFmqNotEmpty. It will
236 * call wake() on kFmqNotFull when the read is successful.
237 */
238 std::thread Reader(ReaderThreadBlocking2, mQueue, &mFw);
239 bool ret = mQueue->writeBlocking(data,
240 dataLen,
241 static_cast<uint32_t>(kFmqNotFull),
242 static_cast<uint32_t>(kFmqNotEmpty),
243 5000000000 /* timeOutNanos */,
244 efGroup);
245 ASSERT_TRUE(ret);
246 Reader.join();
247
248 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
249 ASSERT_EQ(android::NO_ERROR, status);
250}
251
252/*
Hridya Valsaraju10f59dc2016-12-20 12:50:44 -0800253 * Test that basic blocking times out as intended.
254 */
255TEST_F(BlockingReadWrites, BlockingTimeOutTest) {
256 android::hardware::EventFlag* efGroup = nullptr;
Hridya Valsarajuf0ffb832016-12-28 08:46:42 -0800257 android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
Hridya Valsaraju10f59dc2016-12-20 12:50:44 -0800258
259 ASSERT_EQ(android::NO_ERROR, status);
260 ASSERT_NE(nullptr, efGroup);
261
262 /* Block on an EventFlag bit that no one will wake and time out in 1s */
263 uint32_t efState = 0;
264 android::status_t ret = efGroup->wait(kFmqNotEmpty,
265 &efState,
266 1000000000 /* timeoutNanoSeconds */);
267 /*
268 * Wait should time out in a second.
269 */
270 EXPECT_EQ(android::TIMED_OUT, ret);
271
272 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
273 ASSERT_EQ(android::NO_ERROR, status);
274}
275
276/*
Hridya Valsaraju2fb3a0c2017-01-10 14:31:43 -0800277 * Test that odd queue sizes do not cause unaligned error
278 * on access to EventFlag object.
279 */
280TEST_F(QueueSizeOdd, EventFlagTest) {
281 const size_t dataLen = 64;
282 uint8_t data[dataLen] = {0};
283
284 bool ret = mQueue->writeBlocking(data,
285 dataLen,
286 static_cast<uint32_t>(kFmqNotFull),
287 static_cast<uint32_t>(kFmqNotEmpty),
288 5000000000 /* timeOutNanos */);
289 ASSERT_TRUE(ret);
290}
291
292/*
Hridya Valsaraju8b0d5a52016-12-16 10:29:03 -0800293 * Verify that a few bytes of data can be successfully written and read.
294 */
295TEST_F(SynchronizedReadWrites, SmallInputTest1) {
296 const size_t dataLen = 16;
297 ASSERT_LE(dataLen, mNumMessagesMax);
298 uint8_t data[dataLen];
299
300 for (size_t i = 0; i < dataLen; i++) {
301 data[i] = i & 0xFF;
302 }
303
304 ASSERT_TRUE(mQueue->write(data, dataLen));
305 uint8_t readData[dataLen] = {};
306 ASSERT_TRUE(mQueue->read(readData, dataLen));
307 ASSERT_EQ(0, memcmp(data, readData, dataLen));
308}
309
310/*
311 * Verify that read() returns false when trying to read from an empty queue.
312 */
313TEST_F(SynchronizedReadWrites, ReadWhenEmpty) {
314 ASSERT_EQ(0UL, mQueue->availableToRead());
315 const size_t dataLen = 2;
316 ASSERT_LE(dataLen, mNumMessagesMax);
317 uint8_t readData[dataLen];
318 ASSERT_FALSE(mQueue->read(readData, dataLen));
319}
320
321/*
322 * Write the queue until full. Verify that another write is unsuccessful.
323 * Verify that availableToWrite() returns 0 as expected.
324 */
325
326TEST_F(SynchronizedReadWrites, WriteWhenFull) {
327 ASSERT_EQ(0UL, mQueue->availableToRead());
328 std::vector<uint8_t> data(mNumMessagesMax);
329
330 for (size_t i = 0; i < mNumMessagesMax; i++) {
331 data[i] = i & 0xFF;
332 }
333
334 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
335 ASSERT_EQ(0UL, mQueue->availableToWrite());
336 ASSERT_FALSE(mQueue->write(&data[0], 1));
337
338 std::vector<uint8_t> readData(mNumMessagesMax);
339 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
340 ASSERT_EQ(data, readData);
341}
342
343/*
344 * Write a chunk of data equal to the queue size.
345 * Verify that the write is successful and the subsequent read
346 * returns the expected data.
347 */
348TEST_F(SynchronizedReadWrites, LargeInputTest1) {
349 std::vector<uint8_t> data(mNumMessagesMax);
350 for (size_t i = 0; i < mNumMessagesMax; i++) {
351 data[i] = i & 0xFF;
352 }
353
354 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
355 std::vector<uint8_t> readData(mNumMessagesMax);
356 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
357 ASSERT_EQ(data, readData);
358}
359
360/*
361 * Attempt to write a chunk of data larger than the queue size.
362 * Verify that it fails. Verify that a subsequent read fails and
363 * the queue is still empty.
364 */
365TEST_F(SynchronizedReadWrites, LargeInputTest2) {
366 ASSERT_EQ(0UL, mQueue->availableToRead());
367 const size_t dataLen = 4096;
368 ASSERT_GT(dataLen, mNumMessagesMax);
369 std::vector<uint8_t> data(dataLen);
370 for (size_t i = 0; i < dataLen; i++) {
371 data[i] = i & 0xFF;
372 }
373 ASSERT_FALSE(mQueue->write(&data[0], dataLen));
374 std::vector<uint8_t> readData(mNumMessagesMax);
375 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
376 ASSERT_NE(data, readData);
377 ASSERT_EQ(0UL, mQueue->availableToRead());
378}
379
380/*
381 * After the queue is full, try to write more data. Verify that
382 * the attempt returns false. Verify that the attempt did not
383 * affect the pre-existing data in the queue.
384 */
385TEST_F(SynchronizedReadWrites, LargeInputTest3) {
386 std::vector<uint8_t> data(mNumMessagesMax);
387 for (size_t i = 0; i < mNumMessagesMax; i++) {
388 data[i] = i & 0xFF;
389 }
390 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
391 ASSERT_FALSE(mQueue->write(&data[0], 1));
392 std::vector<uint8_t> readData(mNumMessagesMax);
393 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
394 ASSERT_EQ(data, readData);
395}
396
397/*
398 * Verify that multiple reads one after the other return expected data.
399 */
400TEST_F(SynchronizedReadWrites, MultipleRead) {
401 const size_t chunkSize = 100;
402 const size_t chunkNum = 5;
403 const size_t dataLen = chunkSize * chunkNum;
404 ASSERT_LE(dataLen, mNumMessagesMax);
405 uint8_t data[dataLen];
406 for (size_t i = 0; i < dataLen; i++) {
407 data[i] = i & 0xFF;
408 }
409 ASSERT_TRUE(mQueue->write(data, dataLen));
410 uint8_t readData[dataLen] = {};
411 for (size_t i = 0; i < chunkNum; i++) {
412 ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
413 }
414 ASSERT_EQ(0, memcmp(readData, data, dataLen));
415}
416
417/*
418 * Verify that multiple writes one after the other happens correctly.
419 */
420TEST_F(SynchronizedReadWrites, MultipleWrite) {
421 const int chunkSize = 100;
422 const int chunkNum = 5;
423 const size_t dataLen = chunkSize * chunkNum;
424 ASSERT_LE(dataLen, mNumMessagesMax);
425 uint8_t data[dataLen];
426 for (size_t i = 0; i < dataLen; i++) {
427 data[i] = i & 0xFF;
428 }
429 for (unsigned int i = 0; i < chunkNum; i++) {
430 ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
431 }
432 uint8_t readData[dataLen] = {};
433 ASSERT_TRUE(mQueue->read(readData, dataLen));
434 ASSERT_EQ(0, memcmp(readData, data, dataLen));
435}
436
437/*
438 * Write enough messages into the FMQ to fill half of it
439 * and read back the same.
440 * Write mNumMessagesMax messages into the queue. This will cause a
441 * wrap around. Read and verify the data.
442 */
443TEST_F(SynchronizedReadWrites, ReadWriteWrapAround) {
444 size_t numMessages = mNumMessagesMax / 2;
445 std::vector<uint8_t> data(mNumMessagesMax);
446 std::vector<uint8_t> readData(mNumMessagesMax);
447 for (size_t i = 0; i < mNumMessagesMax; i++) {
448 data[i] = i & 0xFF;
449 }
450 ASSERT_TRUE(mQueue->write(&data[0], numMessages));
451 ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
452 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
453 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
454 ASSERT_EQ(data, readData);
455}
456
457/*
458 * Verify that a few bytes of data can be successfully written and read.
459 */
460TEST_F(UnsynchronizedWrite, SmallInputTest1) {
461 const size_t dataLen = 16;
462 ASSERT_LE(dataLen, mNumMessagesMax);
463 uint8_t data[dataLen];
464 for (size_t i = 0; i < dataLen; i++) {
465 data[i] = i & 0xFF;
466 }
467 ASSERT_TRUE(mQueue->write(data, dataLen));
468 uint8_t readData[dataLen] = {};
469 ASSERT_TRUE(mQueue->read(readData, dataLen));
470 ASSERT_EQ(0, memcmp(data, readData, dataLen));
471}
472
473/*
474 * Verify that read() returns false when trying to read from an empty queue.
475 */
476TEST_F(UnsynchronizedWrite, ReadWhenEmpty) {
477 ASSERT_EQ(0UL, mQueue->availableToRead());
478 const size_t dataLen = 2;
479 ASSERT_TRUE(dataLen < mNumMessagesMax);
480 uint8_t readData[dataLen];
481 ASSERT_FALSE(mQueue->read(readData, dataLen));
482}
483
484/*
485 * Write the queue when full. Verify that a subsequent writes is succesful.
486 * Verify that availableToWrite() returns 0 as expected.
487 */
488
489TEST_F(UnsynchronizedWrite, WriteWhenFull) {
490 ASSERT_EQ(0UL, mQueue->availableToRead());
491 std::vector<uint8_t> data(mNumMessagesMax);
492 for (size_t i = 0; i < mNumMessagesMax; i++) {
493 data[i] = i & 0xFF;
494 }
495 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
496 ASSERT_EQ(0UL, mQueue->availableToWrite());
497 ASSERT_TRUE(mQueue->write(&data[0], 1));
498
499 std::vector<uint8_t> readData(mNumMessagesMax);
500 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
501}
502
503/*
504 * Write a chunk of data equal to the queue size.
505 * Verify that the write is successful and the subsequent read
506 * returns the expected data.
507 */
508TEST_F(UnsynchronizedWrite, LargeInputTest1) {
509 std::vector<uint8_t> data(mNumMessagesMax);
510 for (size_t i = 0; i < mNumMessagesMax; i++) {
511 data[i] = i & 0xFF;
512 }
513 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
514 std::vector<uint8_t> readData(mNumMessagesMax);
515 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
516 ASSERT_EQ(data, readData);
517}
518
519/*
520 * Attempt to write a chunk of data larger than the queue size.
521 * Verify that it fails. Verify that a subsequent read fails and
522 * the queue is still empty.
523 */
524TEST_F(UnsynchronizedWrite, LargeInputTest2) {
525 ASSERT_EQ(0UL, mQueue->availableToRead());
526 const size_t dataLen = 4096;
527 ASSERT_GT(dataLen, mNumMessagesMax);
528 std::vector<uint8_t> data(dataLen);
529 for (size_t i = 0; i < dataLen; i++) {
530 data[i] = i & 0xFF;
531 }
532 ASSERT_FALSE(mQueue->write(&data[0], dataLen));
533 std::vector<uint8_t> readData(mNumMessagesMax);
534 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
535 ASSERT_NE(data, readData);
536 ASSERT_EQ(0UL, mQueue->availableToRead());
537}
538
539/*
540 * After the queue is full, try to write more data. Verify that
541 * the attempt is succesful. Verify that the read fails
542 * as expected.
543 */
544TEST_F(UnsynchronizedWrite, LargeInputTest3) {
545 std::vector<uint8_t> data(mNumMessagesMax);
546 for (size_t i = 0; i < mNumMessagesMax; i++) {
547 data[i] = i & 0xFF;
548 }
549 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
550 ASSERT_TRUE(mQueue->write(&data[0], 1));
551 std::vector<uint8_t> readData(mNumMessagesMax);
552 ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
553}
554
555/*
556 * Verify that multiple reads one after the other return expected data.
557 */
558TEST_F(UnsynchronizedWrite, MultipleRead) {
559 const size_t chunkSize = 100;
560 const size_t chunkNum = 5;
561 const size_t dataLen = chunkSize * chunkNum;
562 ASSERT_LE(dataLen, mNumMessagesMax);
563 uint8_t data[dataLen];
564 for (size_t i = 0; i < dataLen; i++) {
565 data[i] = i & 0xFF;
566 }
567 ASSERT_TRUE(mQueue->write(data, dataLen));
568 uint8_t readData[dataLen] = {};
569 for (size_t i = 0; i < chunkNum; i++) {
570 ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
571 }
572 ASSERT_EQ(0, memcmp(readData, data, dataLen));
573}
574
575/*
576 * Verify that multiple writes one after the other happens correctly.
577 */
578TEST_F(UnsynchronizedWrite, MultipleWrite) {
579 const size_t chunkSize = 100;
580 const size_t chunkNum = 5;
581 const size_t dataLen = chunkSize * chunkNum;
582 ASSERT_LE(dataLen, mNumMessagesMax);
583 uint8_t data[dataLen];
584 for (size_t i = 0; i < dataLen; i++) {
585 data[i] = i & 0xFF;
586 }
587 for (size_t i = 0; i < chunkNum; i++) {
588 ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
589 }
590 uint8_t readData[dataLen] = {};
591 ASSERT_TRUE(mQueue->read(readData, dataLen));
592 ASSERT_EQ(0, memcmp(readData, data, dataLen));
593}
594
595/*
596 * Write enough messages into the FMQ to fill half of it
597 * and read back the same.
598 * Write mNumMessagesMax messages into the queue. This will cause a
599 * wrap around. Read and verify the data.
600 */
601TEST_F(UnsynchronizedWrite, ReadWriteWrapAround) {
602 size_t numMessages = mNumMessagesMax / 2;
603 std::vector<uint8_t> data(mNumMessagesMax);
604 std::vector<uint8_t> readData(mNumMessagesMax);
605 for (size_t i = 0; i < mNumMessagesMax; i++) {
606 data[i] = i & 0xFF;
607 }
608 ASSERT_TRUE(mQueue->write(&data[0], numMessages));
609 ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
610 ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
611 ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
612 ASSERT_EQ(data, readData);
613}