blob: 0900ace7587c34291ea88c3151e05be7dd3977d3 [file] [log] [blame]
Eric Secklerc65693d2019-01-11 15:12:48 +00001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Primiano Tucci2c5488f2019-06-01 03:27:28 +010017#include "perfetto/ext/tracing/core/startup_trace_writer.h"
Eric Secklerc65693d2019-01-11 15:12:48 +000018
Primiano Tucci2c5488f2019-06-01 03:27:28 +010019#include "perfetto/ext/tracing/core/startup_trace_writer_registry.h"
20#include "perfetto/ext/tracing/core/trace_packet.h"
21#include "perfetto/ext/tracing/core/tracing_service.h"
Victor Costan4da8c672019-03-01 02:40:49 -080022#include "src/base/test/gtest_test_suite.h"
Eric Secklerc65693d2019-01-11 15:12:48 +000023#include "src/base/test/test_task_runner.h"
Eric Secklerb52d52a2019-07-24 14:55:40 +010024#include "src/tracing/core/patch_list.h"
Eric Secklerc65693d2019-01-11 15:12:48 +000025#include "src/tracing/core/shared_memory_arbiter_impl.h"
Eric Seckler42777e52019-01-23 10:13:22 +000026#include "src/tracing/core/sliced_protobuf_input_stream.h"
27#include "src/tracing/core/trace_buffer.h"
Eric Secklerc65693d2019-01-11 15:12:48 +000028#include "src/tracing/test/aligned_buffer_test.h"
29#include "src/tracing/test/fake_producer_endpoint.h"
Primiano Tucci919ca1e2019-08-21 20:26:58 +020030#include "test/gtest_and_gmock.h"
Eric Secklerc65693d2019-01-11 15:12:48 +000031
Primiano Tucci355b8c82019-08-29 08:37:51 +020032#include "protos/perfetto/trace/test_event.pbzero.h"
33#include "protos/perfetto/trace/trace_packet.pb.h"
34#include "protos/perfetto/trace/trace_packet.pbzero.h"
Eric Secklerc65693d2019-01-11 15:12:48 +000035
36namespace perfetto {
Eric Secklerc65693d2019-01-11 15:12:48 +000037
38class StartupTraceWriterTest : public AlignedBufferTest {
39 public:
40 void SetUp() override {
41 SharedMemoryArbiterImpl::set_default_layout_for_testing(
Eric Seckler06375782019-07-10 14:16:58 +010042 SharedMemoryABI::PageLayout::kPageDiv1);
Eric Secklerc65693d2019-01-11 15:12:48 +000043 AlignedBufferTest::SetUp();
44 task_runner_.reset(new base::TestTaskRunner());
45 arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
46 &fake_producer_endpoint_,
47 task_runner_.get()));
48 }
49
50 void TearDown() override {
51 arbiter_.reset();
52 task_runner_.reset();
53 }
54
Eric Seckler42777e52019-01-23 10:13:22 +000055 std::unique_ptr<StartupTraceWriter> CreateUnboundWriter() {
56 std::shared_ptr<StartupTraceWriterRegistryHandle> registry;
Eric Seckler3d99b0f2019-08-09 15:15:19 +010057 return std::unique_ptr<StartupTraceWriter>(
58 new StartupTraceWriter(registry, BufferExhaustedPolicy::kDrop));
Eric Seckler42777e52019-01-23 10:13:22 +000059 }
60
Eric Seckler06375782019-07-10 14:16:58 +010061 bool BindWriter(StartupTraceWriter* writer, size_t chunks_per_batch = 0) {
Eric Seckler42777e52019-01-23 10:13:22 +000062 const BufferID kBufId = 42;
Eric Seckler06375782019-07-10 14:16:58 +010063 return writer->BindToArbiter(arbiter_.get(), kBufId, chunks_per_batch);
Eric Seckler42777e52019-01-23 10:13:22 +000064 }
65
Eric Secklerc65693d2019-01-11 15:12:48 +000066 void WritePackets(StartupTraceWriter* writer, size_t packet_count) {
67 for (size_t i = 0; i < packet_count; i++) {
68 auto packet = writer->NewTracePacket();
Eric Seckler42777e52019-01-23 10:13:22 +000069 packet->set_for_testing()->set_str(kPacketPayload);
Eric Secklerc65693d2019-01-11 15:12:48 +000070 }
71 }
72
Eric Seckler06375782019-07-10 14:16:58 +010073 size_t CountCompleteChunksInSMB() {
74 SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
75 size_t num_complete_chunks = 0;
76 for (size_t page_idx = 0; page_idx < kNumPages; page_idx++) {
77 uint32_t page_layout = abi->GetPageLayout(page_idx);
78 size_t num_chunks = SharedMemoryABI::GetNumChunksForLayout(page_layout);
79 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
80 auto chunk_state = abi->GetChunkState(page_idx, chunk_idx);
Eric Seckler06375782019-07-10 14:16:58 +010081 if (chunk_state == SharedMemoryABI::kChunkComplete)
82 num_complete_chunks++;
83 }
84 }
85 return num_complete_chunks;
86 }
87
Eric Seckler42777e52019-01-23 10:13:22 +000088 void VerifyPackets(size_t expected_count) {
Eric Secklerc65693d2019-01-11 15:12:48 +000089 SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
Eric Seckler42777e52019-01-23 10:13:22 +000090 auto buffer = TraceBuffer::Create(abi->size());
91
92 size_t total_packets_count = 0;
Eric Secklerc65693d2019-01-11 15:12:48 +000093 ChunkID current_max_chunk_id = 0;
94 for (size_t page_idx = 0; page_idx < kNumPages; page_idx++) {
95 uint32_t page_layout = abi->GetPageLayout(page_idx);
96 size_t num_chunks = SharedMemoryABI::GetNumChunksForLayout(page_layout);
97 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
98 auto chunk_state = abi->GetChunkState(page_idx, chunk_idx);
99 ASSERT_TRUE(chunk_state == SharedMemoryABI::kChunkFree ||
100 chunk_state == SharedMemoryABI::kChunkComplete);
101 auto chunk = abi->TryAcquireChunkForReading(page_idx, chunk_idx);
102 if (!chunk.is_valid())
103 continue;
104
105 // Should only see new chunks with IDs larger than the previous read
106 // since our reads and writes are serialized.
107 ChunkID chunk_id = chunk.header()->chunk_id.load();
Eric Seckler4a0ee4e2019-01-15 08:37:46 +0000108 if (last_read_max_chunk_id_ != 0) {
Eric Secklerc65693d2019-01-11 15:12:48 +0000109 EXPECT_LT(last_read_max_chunk_id_, chunk_id);
Eric Seckler4a0ee4e2019-01-15 08:37:46 +0000110 }
Eric Secklerc65693d2019-01-11 15:12:48 +0000111 current_max_chunk_id = std::max(current_max_chunk_id, chunk_id);
112
113 auto packets_header = chunk.header()->packets.load();
Eric Seckler42777e52019-01-23 10:13:22 +0000114 total_packets_count += packets_header.count;
Eric Secklerc65693d2019-01-11 15:12:48 +0000115 if (packets_header.flags &
116 SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk) {
117 // Don't count fragmented packets twice.
Eric Seckler42777e52019-01-23 10:13:22 +0000118 total_packets_count--;
Eric Secklerc65693d2019-01-11 15:12:48 +0000119 }
Eric Seckler42777e52019-01-23 10:13:22 +0000120
121 buffer->CopyChunkUntrusted(
122 /*producer_id_trusted=*/1, /*producer_uid_trusted=*/1,
123 chunk.header()->writer_id.load(), chunk_id, packets_header.count,
124 packets_header.flags, /*chunk_complete=*/true,
125 chunk.payload_begin(), chunk.payload_size());
Eric Secklerc65693d2019-01-11 15:12:48 +0000126 abi->ReleaseChunkAsFree(std::move(chunk));
127 }
128 }
129 last_read_max_chunk_id_ = current_max_chunk_id;
Eric Seckler42777e52019-01-23 10:13:22 +0000130 EXPECT_EQ(expected_count, total_packets_count);
131
132 // Now verify chunk and packet contents.
133 buffer->BeginRead();
134 size_t num_packets_read = 0;
135 while (true) {
136 TracePacket packet;
Eric Secklerd0ac7ca2019-02-06 09:13:45 +0000137 TraceBuffer::PacketSequenceProperties sequence_properties{};
Eric Seckler062be902019-03-08 17:05:12 +0000138 bool previous_packet_dropped;
139 if (!buffer->ReadNextTracePacket(&packet, &sequence_properties,
140 &previous_packet_dropped)) {
Eric Seckler42777e52019-01-23 10:13:22 +0000141 break;
Eric Seckler062be902019-03-08 17:05:12 +0000142 }
Eric Secklerd0ac7ca2019-02-06 09:13:45 +0000143 EXPECT_EQ(static_cast<uid_t>(1),
144 sequence_properties.producer_uid_trusted);
Eric Seckler42777e52019-01-23 10:13:22 +0000145
146 SlicedProtobufInputStream stream(&packet.slices());
147 size_t size = 0;
148 for (const Slice& slice : packet.slices())
149 size += slice.size;
150 protos::TracePacket parsed_packet;
151 bool success = parsed_packet.ParseFromBoundedZeroCopyStream(
152 &stream, static_cast<int>(size));
153 EXPECT_TRUE(success);
154 if (!success)
155 break;
156 EXPECT_TRUE(parsed_packet.has_for_testing());
157 EXPECT_EQ(kPacketPayload, parsed_packet.for_testing().str());
158 num_packets_read++;
159 }
160 EXPECT_EQ(expected_count, num_packets_read);
Eric Secklerc65693d2019-01-11 15:12:48 +0000161 }
162
Eric Seckler42777e52019-01-23 10:13:22 +0000163 size_t GetUnboundWriterCount(
164 const StartupTraceWriterRegistry& registry) const {
165 return registry.unbound_writers_.size() +
166 registry.unbound_owned_writers_.size();
167 }
168
169 size_t GetBindingRegistriesCount(
170 const SharedMemoryArbiterImpl& arbiter) const {
171 return arbiter.startup_trace_writer_registries_.size();
172 }
173
174 size_t GetUnboundWriterCount(const SharedMemoryArbiterImpl& arbiter) const {
175 size_t count = 0u;
176 for (const auto& reg : arbiter.startup_trace_writer_registries_) {
177 count += reg->unbound_writers_.size();
178 count += reg->unbound_owned_writers_.size();
179 }
180 return count;
181 }
182
183 protected:
184 static constexpr char kPacketPayload[] = "foo";
185
Eric Secklerc65693d2019-01-11 15:12:48 +0000186 FakeProducerEndpoint fake_producer_endpoint_;
187 std::unique_ptr<base::TestTaskRunner> task_runner_;
188 std::unique_ptr<SharedMemoryArbiterImpl> arbiter_;
189 std::function<void(const std::vector<uint32_t>&)> on_pages_complete_;
190
191 ChunkID last_read_max_chunk_id_ = 0;
192};
193
Eric Seckler42777e52019-01-23 10:13:22 +0000194constexpr char StartupTraceWriterTest::kPacketPayload[];
195
196namespace {
197
Eric Seckler06375782019-07-10 14:16:58 +0100198size_t const kPageSizes[] = {4096, 32768};
Victor Costan4da8c672019-03-01 02:40:49 -0800199INSTANTIATE_TEST_SUITE_P(PageSize,
200 StartupTraceWriterTest,
201 ::testing::ValuesIn(kPageSizes));
Eric Secklerc65693d2019-01-11 15:12:48 +0000202
203TEST_P(StartupTraceWriterTest, CreateUnboundAndBind) {
Eric Seckler42777e52019-01-23 10:13:22 +0000204 auto writer = CreateUnboundWriter();
Eric Secklerc65693d2019-01-11 15:12:48 +0000205
Eric Seckler42777e52019-01-23 10:13:22 +0000206 // Bind writer right away without having written any data before.
207 EXPECT_TRUE(BindWriter(writer.get()));
Eric Secklerc65693d2019-01-11 15:12:48 +0000208
209 const size_t kNumPackets = 32;
210 WritePackets(writer.get(), kNumPackets);
211 // Finalizes the last packet and returns the chunk.
212 writer.reset();
213
Eric Seckler42777e52019-01-23 10:13:22 +0000214 VerifyPackets(kNumPackets);
Eric Secklerc65693d2019-01-11 15:12:48 +0000215}
216
217TEST_P(StartupTraceWriterTest, CreateBound) {
218 // Create a bound writer immediately.
219 const BufferID kBufId = 42;
220 std::unique_ptr<StartupTraceWriter> writer(
221 new StartupTraceWriter(arbiter_->CreateTraceWriter(kBufId)));
222
223 const size_t kNumPackets = 32;
224 WritePackets(writer.get(), kNumPackets);
225 // Finalizes the last packet and returns the chunk.
226 writer.reset();
227
Eric Seckler42777e52019-01-23 10:13:22 +0000228 VerifyPackets(kNumPackets);
Eric Secklerc65693d2019-01-11 15:12:48 +0000229}
230
231TEST_P(StartupTraceWriterTest, WriteWhileUnboundAndDiscard) {
Eric Seckler42777e52019-01-23 10:13:22 +0000232 auto writer = CreateUnboundWriter();
Eric Secklerc65693d2019-01-11 15:12:48 +0000233
234 const size_t kNumPackets = 32;
235 WritePackets(writer.get(), kNumPackets);
236
237 // Should discard the written data.
238 writer.reset();
239
Eric Seckler42777e52019-01-23 10:13:22 +0000240 VerifyPackets(0);
Eric Secklerc65693d2019-01-11 15:12:48 +0000241}
242
243TEST_P(StartupTraceWriterTest, WriteWhileUnboundAndBind) {
Eric Seckler42777e52019-01-23 10:13:22 +0000244 auto writer = CreateUnboundWriter();
Eric Secklerc65693d2019-01-11 15:12:48 +0000245
246 const size_t kNumPackets = 32;
247 WritePackets(writer.get(), kNumPackets);
248
249 // Binding the writer should cause the previously written packets to be
250 // written to the SMB and committed.
Eric Seckler42777e52019-01-23 10:13:22 +0000251 EXPECT_TRUE(BindWriter(writer.get()));
Eric Secklerc65693d2019-01-11 15:12:48 +0000252
Eric Seckler42777e52019-01-23 10:13:22 +0000253 VerifyPackets(kNumPackets);
Eric Secklerc65693d2019-01-11 15:12:48 +0000254
255 // Any further packets should be written to the SMB directly.
256 const size_t kNumAdditionalPackets = 16;
257 WritePackets(writer.get(), kNumAdditionalPackets);
258 // Finalizes the last packet and returns the chunk.
259 writer.reset();
260
Eric Seckler42777e52019-01-23 10:13:22 +0000261 VerifyPackets(kNumAdditionalPackets);
Eric Secklerc65693d2019-01-11 15:12:48 +0000262}
263
264TEST_P(StartupTraceWriterTest, WriteMultipleChunksWhileUnboundAndBind) {
Eric Seckler42777e52019-01-23 10:13:22 +0000265 auto writer = CreateUnboundWriter();
Eric Secklerc65693d2019-01-11 15:12:48 +0000266
267 // Write a single packet to determine its size in the buffer.
268 WritePackets(writer.get(), 1);
269 size_t packet_size = writer->used_buffer_size();
270
271 // Write at least 3 pages worth of packets.
272 const size_t kNumPackets = (page_size() * 3 + packet_size - 1) / packet_size;
273 WritePackets(writer.get(), kNumPackets);
274
275 // Binding the writer should cause the previously written packets to be
276 // written to the SMB and committed.
Eric Seckler42777e52019-01-23 10:13:22 +0000277 EXPECT_TRUE(BindWriter(writer.get()));
Eric Secklerc65693d2019-01-11 15:12:48 +0000278
Eric Seckler42777e52019-01-23 10:13:22 +0000279 VerifyPackets(kNumPackets + 1);
Eric Secklerc65693d2019-01-11 15:12:48 +0000280
281 // Any further packets should be written to the SMB directly.
282 const size_t kNumAdditionalPackets = 16;
283 WritePackets(writer.get(), kNumAdditionalPackets);
284 // Finalizes the last packet and returns the chunk.
285 writer.reset();
286
Eric Seckler42777e52019-01-23 10:13:22 +0000287 VerifyPackets(kNumAdditionalPackets);
Eric Secklerc65693d2019-01-11 15:12:48 +0000288}
289
290TEST_P(StartupTraceWriterTest, BindingWhileWritingFails) {
Eric Seckler42777e52019-01-23 10:13:22 +0000291 auto writer = CreateUnboundWriter();
Eric Secklerc65693d2019-01-11 15:12:48 +0000292
Eric Secklerc65693d2019-01-11 15:12:48 +0000293 {
Eric Seckler42777e52019-01-23 10:13:22 +0000294 // Begin a write by opening a TracePacket.
Eric Secklerc65693d2019-01-11 15:12:48 +0000295 auto packet = writer->NewTracePacket();
Eric Seckler42777e52019-01-23 10:13:22 +0000296 packet->set_for_testing()->set_str(kPacketPayload);
Eric Secklerc65693d2019-01-11 15:12:48 +0000297
298 // Binding while writing should fail.
Eric Seckler42777e52019-01-23 10:13:22 +0000299 EXPECT_FALSE(BindWriter(writer.get()));
Eric Secklerc65693d2019-01-11 15:12:48 +0000300 }
301
302 // Packet was completed, so binding should work now and emit the packet.
Eric Seckler42777e52019-01-23 10:13:22 +0000303 EXPECT_TRUE(BindWriter(writer.get()));
304 VerifyPackets(1);
305}
306
307TEST_P(StartupTraceWriterTest, CreateAndBindViaRegistry) {
308 std::unique_ptr<StartupTraceWriterRegistry> registry(
309 new StartupTraceWriterRegistry());
310
311 // Create unbound writers.
Eric Seckler3d99b0f2019-08-09 15:15:19 +0100312 auto writer1 =
313 registry->CreateUnboundTraceWriter(BufferExhaustedPolicy::kDrop);
314 auto writer2 =
315 registry->CreateUnboundTraceWriter(BufferExhaustedPolicy::kDrop);
Eric Seckler42777e52019-01-23 10:13:22 +0000316
317 EXPECT_EQ(2u, GetUnboundWriterCount(*registry));
318
319 // Return |writer2|. It should be kept alive until the registry is bound.
Eric Seckler75ed5502019-05-28 16:20:51 +0100320 StartupTraceWriter::ReturnToRegistry(std::move(writer2));
Eric Seckler42777e52019-01-23 10:13:22 +0000321
322 {
323 // Begin a write by opening a TracePacket on |writer1|.
324 auto packet = writer1->NewTracePacket();
325
326 // Binding |writer1| writing should fail, but |writer2| should be bound.
327 const BufferID kBufId = 42;
328 arbiter_->BindStartupTraceWriterRegistry(std::move(registry), kBufId);
329 EXPECT_EQ(1u, GetUnboundWriterCount(*arbiter_));
330 }
331
332 // Wait for |writer1| to be bound and the registry to be deleted.
333 auto checkpoint_name = "all_bound";
334 auto all_bound = task_runner_->CreateCheckpoint(checkpoint_name);
335 std::function<void()> task;
336 task = [&task, &all_bound, this]() {
337 if (!GetBindingRegistriesCount(*arbiter_)) {
338 all_bound();
339 return;
340 }
341 task_runner_->PostDelayedTask(task, 1);
342 };
343 task_runner_->PostDelayedTask(task, 1);
344 task_runner_->RunUntilCheckpoint(checkpoint_name);
Eric Seckler75ed5502019-05-28 16:20:51 +0100345
346 StartupTraceWriter::ReturnToRegistry(std::move(writer1));
Eric Secklerc65693d2019-01-11 15:12:48 +0000347}
348
Eric Seckler06375782019-07-10 14:16:58 +0100349TEST_P(StartupTraceWriterTest, BindAndCommitInBatches) {
350 auto writer = CreateUnboundWriter();
351
352 // Write a single packet to determine its size in the buffer.
353 WritePackets(writer.get(), 1);
354 size_t packet_size = writer->used_buffer_size();
355
356 // Write at least 3 pages/chunks worth of packets.
357 const size_t kNumPackets = (page_size() * 3 + packet_size - 1) / packet_size;
358 WritePackets(writer.get(), kNumPackets);
359
360 static constexpr size_t kChunksPerBatch = 2;
361
362 // Binding the writer with a batch size of 2 chunks should cause the first 2
363 // chunks of previously written packets to be written to the SMB and
364 // committed. The remaining chunks will be written when the
365 // |commit_data_callback| is executed.
366 EXPECT_TRUE(BindWriter(writer.get(), kChunksPerBatch));
367
368 EXPECT_EQ(
369 fake_producer_endpoint_.last_commit_data_request.chunks_to_move().size(),
370 kChunksPerBatch);
371 EXPECT_EQ(CountCompleteChunksInSMB(), kChunksPerBatch);
372 auto commit_data_callback = fake_producer_endpoint_.last_commit_data_callback;
373 EXPECT_TRUE(commit_data_callback);
374
375 // Send a commit with a single packet from the bound trace writer before the
376 // remaining chunk batches of the buffered data are written.
377 const size_t kNumAdditionalPackets = 1;
378 WritePackets(writer.get(), 1);
379 // Finalizes the packet and returns the chunk.
380 writer.reset();
381
382 // The packet should fit into a chunk.
383 EXPECT_EQ(
384 fake_producer_endpoint_.last_commit_data_request.chunks_to_move().size(),
Ryan Savitski45b6e592019-07-11 13:36:20 +0100385 1u);
Eric Seckler06375782019-07-10 14:16:58 +0100386 EXPECT_EQ(CountCompleteChunksInSMB(), kChunksPerBatch + 1);
387
388 // Write and commit the remaining chunks to the SMB.
389 while (commit_data_callback) {
390 commit_data_callback();
391 commit_data_callback = fake_producer_endpoint_.last_commit_data_callback;
392 }
393
394 // Verify that all chunks + packets are in the SMB.
395 VerifyPackets(1 + kNumPackets + kNumAdditionalPackets);
396}
397
Eric Secklerb52d52a2019-07-24 14:55:40 +0100398TEST_P(StartupTraceWriterTest, BindAndCommitInBatchesWithSMBExhaustion) {
399 auto writer = CreateUnboundWriter();
400
401 // Write a single packet to determine its size in the buffer.
402 WritePackets(writer.get(), 1);
403 size_t packet_size = writer->used_buffer_size();
404
405 // Write at least 3 pages/chunks worth of packets.
406 const size_t kNumPackets = (page_size() * 3 + packet_size - 1) / packet_size;
407 WritePackets(writer.get(), kNumPackets);
408
409 // Acquire all chunks in the SMB.
410 static constexpr size_t kTotChunks = kNumPages;
411 SharedMemoryABI::Chunk chunks[kTotChunks];
412 for (size_t i = 0; i < kTotChunks; i++) {
Eric Seckler3d99b0f2019-08-09 15:15:19 +0100413 chunks[i] = arbiter_->GetNewChunk({}, BufferExhaustedPolicy::kDrop);
Eric Secklerb52d52a2019-07-24 14:55:40 +0100414 ASSERT_TRUE(chunks[i].is_valid());
415 }
416
417 // Binding the writer should fail if no chunks are available in the SMB.
418 static constexpr size_t kChunksPerBatch = 2;
419 EXPECT_FALSE(BindWriter(writer.get(), kChunksPerBatch));
420
421 // Return and free the first chunk, so that there is only a single free chunk.
422 PatchList ignored;
423 arbiter_->ReturnCompletedChunk(std::move(chunks[0]), 0, &ignored);
424 chunks[0] =
425 arbiter_->shmem_abi_for_testing()->TryAcquireChunkForReading(0, 0);
426 ASSERT_TRUE(chunks[0].is_valid());
427 arbiter_->shmem_abi_for_testing()->ReleaseChunkAsFree(std::move(chunks[0]));
428 arbiter_->FlushPendingCommitDataRequests();
429
430 // Binding the writer should only cause the first chunks of previously written
431 // packets to be written to the SMB and committed because no further chunks
432 // are available in the SMB. The remaining chunks will be written when the
433 // |commit_data_callback| is executed.
434 EXPECT_TRUE(BindWriter(writer.get(), kChunksPerBatch));
435
436 EXPECT_EQ(
437 fake_producer_endpoint_.last_commit_data_request.chunks_to_move().size(),
438 1u);
439 EXPECT_EQ(CountCompleteChunksInSMB(), 1u);
440 auto commit_data_callback = fake_producer_endpoint_.last_commit_data_callback;
441 EXPECT_TRUE(commit_data_callback);
442
443 // Free up the other SMB chunks.
444 for (size_t i = 1; i < kTotChunks; i++) {
445 arbiter_->ReturnCompletedChunk(std::move(chunks[i]), 0, &ignored);
446 chunks[i] =
447 arbiter_->shmem_abi_for_testing()->TryAcquireChunkForReading(i, 0);
448 ASSERT_TRUE(chunks[i].is_valid());
449 arbiter_->shmem_abi_for_testing()->ReleaseChunkAsFree(std::move(chunks[i]));
450 }
451 arbiter_->FlushPendingCommitDataRequests();
452
453 // Write and commit the remaining buffered startup writer data to the SMB.
454 while (commit_data_callback) {
455 commit_data_callback();
456 commit_data_callback = fake_producer_endpoint_.last_commit_data_callback;
457 }
458 EXPECT_GT(
459 fake_producer_endpoint_.last_commit_data_request.chunks_to_move().size(),
460 0u);
461
462 // Verify that all chunks + packets are in the SMB.
463 VerifyPackets(1 + kNumPackets);
464}
465
Eric Secklerc65693d2019-01-11 15:12:48 +0000466} // namespace
467} // namespace perfetto