Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 1 | /* |
Primiano Tucci | 89795fd | 2019-02-18 23:08:06 +0000 | [diff] [blame] | 2 | * Copyright (C) 2018 The Android Open Source Project |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef SRC_TRACE_PROCESSOR_TRACE_SORTER_H_ |
| 18 | #define SRC_TRACE_PROCESSOR_TRACE_SORTER_H_ |
| 19 | |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 20 | #include <vector> |
| 21 | |
Primiano Tucci | 2c5488f | 2019-06-01 03:27:28 +0100 | [diff] [blame] | 22 | #include "perfetto/ext/base/circular_queue.h" |
Ioannis Ilkos | eff38f5 | 2018-10-29 10:37:55 +0000 | [diff] [blame] | 23 | #include "perfetto/trace_processor/basic_types.h" |
Primiano Tucci | 3264b59 | 2021-11-08 18:20:51 +0000 | [diff] [blame^] | 24 | #include "perfetto/trace_processor/trace_blob_view.h" |
Lalit Maganti | 7010b33 | 2020-02-07 10:51:15 +0000 | [diff] [blame] | 25 | #include "src/trace_processor/storage/trace_storage.h" |
Eric Seckler | 1a2ea13 | 2019-10-16 11:35:31 +0100 | [diff] [blame] | 26 | #include "src/trace_processor/timestamped_trace_piece.h" |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 27 | |
Deepanjan Roy | 01994ca | 2019-04-02 11:05:34 -0700 | [diff] [blame] | 28 | namespace Json { |
Eric Seckler | 1a2ea13 | 2019-10-16 11:35:31 +0100 | [diff] [blame] | 29 | class Value; |
Deepanjan Roy | 01994ca | 2019-04-02 11:05:34 -0700 | [diff] [blame] | 30 | } // namespace Json |
Deepanjan Roy | 01994ca | 2019-04-02 11:05:34 -0700 | [diff] [blame] | 31 | |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 32 | namespace perfetto { |
| 33 | namespace trace_processor { |
| 34 | |
Eric Seckler | ee1cdea | 2019-10-22 12:13:06 +0100 | [diff] [blame] | 35 | class FuchsiaProviderView; |
Eric Seckler | 771960c | 2019-10-22 15:37:12 +0100 | [diff] [blame] | 36 | class PacketSequenceState; |
Lalit Maganti | 31afb82 | 2020-03-05 17:36:57 +0000 | [diff] [blame] | 37 | struct SystraceLine; |
Eric Seckler | ee1cdea | 2019-10-22 12:13:06 +0100 | [diff] [blame] | 38 | |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 39 | // This class takes care of sorting events parsed from the trace stream in |
| 40 | // arbitrary order and pushing them to the next pipeline stages (parsing) in |
| 41 | // order. In order to support streaming use-cases, sorting happens within a |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 42 | // window. |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 43 | // |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 44 | // Events are held in the TraceSorter staging area (events_) until either: |
| 45 | // 1. We can determine that it's safe to extract events by observing |
| 46 | // TracingServiceEvent Flush and ReadBuffer events |
| 47 | // 2. The trace EOF is reached |
| 48 | // |
| 49 | // Incremental extraction |
| 50 | // |
| 51 | // Incremental extraction happens by using a combination of flush and read |
| 52 | // buffer events from the tracing service. Note that incremental extraction |
| 53 | // is only applicable for write_into_file traces; ring-buffer traces will |
| 54 | // be sorted fully in-memory implicitly because there is only a single read |
| 55 | // buffer call at the end. |
| 56 | // |
| 57 | // The algorithm for incremental extraction is explained in detail at |
| 58 | // go/trace-sorting-is-complicated. |
| 59 | // |
| 60 | // Sorting algorithm |
| 61 | // |
| 62 | // The sorting algorithm is designed around the assumption that: |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 63 | // - Most events come from ftrace. |
| 64 | // - Ftrace events are sorted within each cpu most of the times. |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 65 | // |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 66 | // Due to this, this class is oprerates as a streaming merge-sort of N+1 queues |
| 67 | // (N = num cpus + 1 for non-ftrace events). Each queue in turn gets sorted (if |
| 68 | // necessary) before proceeding with the global merge-sort-extract. |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 69 | // |
| 70 | // When an event is pushed through, it is just appended to the end of one of |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 71 | // the N queues. While appending, we keep track of the fact that the queue |
| 72 | // is still ordered or just lost ordering. When an out-of-order event is |
| 73 | // detected on a queue we keep track of: (1) the offset within the queue where |
| 74 | // the chaos begun, (2) the timestamp that broke the ordering. |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 75 | // |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 76 | // When we decide to extract events from the queues into the next stages of |
| 77 | // the trace processor, we re-sort the events in the queue. Rather than |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 78 | // re-sorting everything all the times, we use the above knowledge to restrict |
| 79 | // sorting to the (hopefully smaller) tail of the |events_| staging area. |
| 80 | // At any time, the first partition of |events_| [0 .. sort_start_idx_) is |
| 81 | // ordered, and the second partition [sort_start_idx_.. end] is not. |
| 82 | // We use a logarithmic bound search operation to figure out what is the index |
| 83 | // within the first partition where sorting should start, and sort all events |
| 84 | // from there to the end. |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 85 | class TraceSorter { |
| 86 | public: |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 87 | enum class SortingMode { |
| 88 | kDefault, |
| 89 | kFullSort, |
| 90 | }; |
| 91 | |
| 92 | TraceSorter(TraceProcessorContext* context, |
| 93 | std::unique_ptr<TraceParser> parser, |
| 94 | SortingMode); |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 95 | |
Florian Mayer | 5716fc1 | 2019-06-24 11:50:51 -0700 | [diff] [blame] | 96 | inline void PushTracePacket(int64_t timestamp, |
Eric Seckler | 771960c | 2019-10-22 15:37:12 +0100 | [diff] [blame] | 97 | PacketSequenceState* state, |
Florian Mayer | 5716fc1 | 2019-06-24 11:50:51 -0700 | [diff] [blame] | 98 | TraceBlobView packet) { |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 99 | AppendNonFtraceEvent(TimestampedTracePiece(timestamp, packet_idx_++, |
| 100 | std::move(packet), |
| 101 | state->current_generation())); |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 102 | } |
| 103 | |
Lalit Maganti | 0732e00 | 2021-03-10 17:05:18 +0000 | [diff] [blame] | 104 | inline void PushJsonValue(int64_t timestamp, std::string json_value) { |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 105 | AppendNonFtraceEvent( |
Deepanjan Roy | 01994ca | 2019-04-02 11:05:34 -0700 | [diff] [blame] | 106 | TimestampedTracePiece(timestamp, packet_idx_++, std::move(json_value))); |
Deepanjan Roy | 01994ca | 2019-04-02 11:05:34 -0700 | [diff] [blame] | 107 | } |
| 108 | |
Eric Seckler | 67e15a9 | 2020-01-03 13:20:46 +0000 | [diff] [blame] | 109 | inline void PushFuchsiaRecord(int64_t timestamp, |
| 110 | std::unique_ptr<FuchsiaRecord> record) { |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 111 | AppendNonFtraceEvent( |
Eric Seckler | 67e15a9 | 2020-01-03 13:20:46 +0000 | [diff] [blame] | 112 | TimestampedTracePiece(timestamp, packet_idx_++, std::move(record))); |
Brian Hamrick | d57e133 | 2019-04-24 11:25:36 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Lalit Maganti | 31afb82 | 2020-03-05 17:36:57 +0000 | [diff] [blame] | 115 | inline void PushSystraceLine(std::unique_ptr<SystraceLine> systrace_line) { |
Lalit Maganti | 8deb158 | 2021-05-19 18:08:26 +0100 | [diff] [blame] | 116 | int64_t timestamp = systrace_line->ts; |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 117 | AppendNonFtraceEvent(TimestampedTracePiece(timestamp, packet_idx_++, |
| 118 | std::move(systrace_line))); |
Lalit Maganti | e0e71bc | 2021-05-18 16:30:40 +0100 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | inline void PushTrackEventPacket(int64_t timestamp, |
| 122 | std::unique_ptr<TrackEventData> data) { |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 123 | AppendNonFtraceEvent( |
Lalit Maganti | e0e71bc | 2021-05-18 16:30:40 +0100 | [diff] [blame] | 124 | TimestampedTracePiece(timestamp, packet_idx_++, std::move(data))); |
Lalit Maganti | 31afb82 | 2020-03-05 17:36:57 +0000 | [diff] [blame] | 125 | } |
| 126 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 127 | inline void PushFtraceEvent(uint32_t cpu, |
| 128 | int64_t timestamp, |
Lalit Maganti | 24305ca7 | 2020-10-15 17:18:32 +0100 | [diff] [blame] | 129 | TraceBlobView event, |
| 130 | PacketSequenceState* state) { |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 131 | auto* queue = GetQueue(cpu + 1); |
| 132 | queue->Append(TimestampedTracePiece( |
Lalit Maganti | 24305ca7 | 2020-10-15 17:18:32 +0100 | [diff] [blame] | 133 | timestamp, packet_idx_++, |
| 134 | FtraceEventData{std::move(event), state->current_generation()})); |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 135 | UpdateGlobalTs(queue); |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 136 | } |
Ryan Savitski | eb65cc6 | 2019-09-24 16:39:08 +0100 | [diff] [blame] | 137 | inline void PushInlineFtraceEvent(uint32_t cpu, |
| 138 | int64_t timestamp, |
Eric Seckler | 67e15a9 | 2020-01-03 13:20:46 +0000 | [diff] [blame] | 139 | InlineSchedSwitch inline_sched_switch) { |
Lalit Maganti | 8deb158 | 2021-05-19 18:08:26 +0100 | [diff] [blame] | 140 | // TODO(rsavitski): if a trace has a mix of normal & "compact" events (being |
| 141 | // pushed through this function), the ftrace batches will no longer be fully |
| 142 | // sorted by timestamp. In such situations, we will have to sort at the end |
| 143 | // of the batch. We can do better as both sub-sequences are sorted however. |
| 144 | // Consider adding extra queues, or pushing them in a merge-sort fashion |
| 145 | // instead. |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 146 | auto* queue = GetQueue(cpu + 1); |
| 147 | queue->Append( |
| 148 | TimestampedTracePiece(timestamp, packet_idx_++, inline_sched_switch)); |
| 149 | UpdateGlobalTs(queue); |
Eric Seckler | 67e15a9 | 2020-01-03 13:20:46 +0000 | [diff] [blame] | 150 | } |
| 151 | inline void PushInlineFtraceEvent(uint32_t cpu, |
| 152 | int64_t timestamp, |
| 153 | InlineSchedWaking inline_sched_waking) { |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 154 | auto* queue = GetQueue(cpu + 1); |
| 155 | queue->Append( |
Eric Seckler | 67e15a9 | 2020-01-03 13:20:46 +0000 | [diff] [blame] | 156 | TimestampedTracePiece(timestamp, packet_idx_++, inline_sched_waking)); |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 157 | UpdateGlobalTs(queue); |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 158 | } |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 159 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 160 | void ExtractEventsForced() { |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 161 | SortAndExtractEventsUntilPacket(packet_idx_); |
Hector Dearman | 5ca4ab6 | 2019-11-28 17:34:15 +0000 | [diff] [blame] | 162 | queues_.resize(0); |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 163 | |
| 164 | packet_idx_for_extraction_ = packet_idx_; |
| 165 | flushes_since_extraction_ = 0; |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 166 | } |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 167 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 168 | void NotifyFlushEvent() { flushes_since_extraction_++; } |
Lalit Maganti | 295a861 | 2019-05-21 13:57:42 +0100 | [diff] [blame] | 169 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 170 | void NotifyReadBufferEvent() { |
| 171 | if (sorting_mode_ == SortingMode::kFullSort || |
| 172 | flushes_since_extraction_ < 2) { |
Lalit Maganti | 295a861 | 2019-05-21 13:57:42 +0100 | [diff] [blame] | 173 | return; |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | SortAndExtractEventsUntilPacket(packet_idx_for_extraction_); |
| 177 | packet_idx_for_extraction_ = packet_idx_; |
| 178 | flushes_since_extraction_ = 0; |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 179 | } |
| 180 | |
Eric Seckler | 5c7866e | 2019-10-18 08:38:16 +0100 | [diff] [blame] | 181 | int64_t max_timestamp() const { return global_max_ts_; } |
| 182 | |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 183 | private: |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 184 | static constexpr uint32_t kNoBatch = std::numeric_limits<uint32_t>::max(); |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 185 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 186 | struct Queue { |
| 187 | inline void Append(TimestampedTracePiece ttp) { |
| 188 | const int64_t timestamp = ttp.timestamp; |
| 189 | events_.emplace_back(std::move(ttp)); |
| 190 | min_ts_ = std::min(min_ts_, timestamp); |
| 191 | |
| 192 | // Events are often seen in order. |
| 193 | if (PERFETTO_LIKELY(timestamp >= max_ts_)) { |
| 194 | max_ts_ = timestamp; |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 195 | } else { |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 196 | // The event is breaking ordering. The first time it happens, keep |
| 197 | // track of which index we are at. We know that everything before that |
| 198 | // is sorted (because events were pushed monotonically). Everything |
| 199 | // after that index, instead, will need a sorting pass before moving |
| 200 | // events to the next pipeline stage. |
| 201 | if (sort_start_idx_ == 0) { |
| 202 | PERFETTO_DCHECK(events_.size() >= 2); |
| 203 | sort_start_idx_ = events_.size() - 1; |
| 204 | sort_min_ts_ = timestamp; |
| 205 | } else { |
| 206 | sort_min_ts_ = std::min(sort_min_ts_, timestamp); |
| 207 | } |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 208 | } |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 209 | |
| 210 | PERFETTO_DCHECK(min_ts_ <= max_ts_); |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 211 | } |
| 212 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 213 | bool needs_sorting() const { return sort_start_idx_ != 0; } |
| 214 | void Sort(); |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 215 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 216 | base::CircularQueue<TimestampedTracePiece> events_; |
| 217 | int64_t min_ts_ = std::numeric_limits<int64_t>::max(); |
| 218 | int64_t max_ts_ = 0; |
| 219 | size_t sort_start_idx_ = 0; |
Deepanjan Roy | 01994ca | 2019-04-02 11:05:34 -0700 | [diff] [blame] | 220 | int64_t sort_min_ts_ = std::numeric_limits<int64_t>::max(); |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 221 | }; |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 222 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 223 | void SortAndExtractEventsUntilPacket(uint64_t limit_packet_idx); |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 224 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 225 | inline Queue* GetQueue(size_t index) { |
| 226 | if (PERFETTO_UNLIKELY(index >= queues_.size())) |
| 227 | queues_.resize(index + 1); |
| 228 | return &queues_[index]; |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 229 | } |
| 230 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 231 | inline void AppendNonFtraceEvent(TimestampedTracePiece ttp) { |
Lalit Maganti | e0e71bc | 2021-05-18 16:30:40 +0100 | [diff] [blame] | 232 | Queue* queue = GetQueue(0); |
| 233 | queue->Append(std::move(ttp)); |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 234 | UpdateGlobalTs(queue); |
Lalit Maganti | e0e71bc | 2021-05-18 16:30:40 +0100 | [diff] [blame] | 235 | } |
| 236 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 237 | inline void UpdateGlobalTs(Queue* queue) { |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 238 | global_min_ts_ = std::min(global_min_ts_, queue->min_ts_); |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 239 | global_max_ts_ = std::max(global_max_ts_, queue->max_ts_); |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 240 | } |
| 241 | |
Lalit Maganti | 7b33b6c | 2021-05-18 22:57:40 +0100 | [diff] [blame] | 242 | void MaybePushEvent(size_t queue_idx, |
| 243 | TimestampedTracePiece ttp) PERFETTO_ALWAYS_INLINE; |
Lalit Maganti | e0e71bc | 2021-05-18 16:30:40 +0100 | [diff] [blame] | 244 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 245 | TraceProcessorContext* context_; |
Lalit Maganti | 6d1f7b5 | 2020-02-27 13:16:44 +0000 | [diff] [blame] | 246 | std::unique_ptr<TraceParser> parser_; |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 247 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 248 | // Whether we should ignore incremental extraction and just wait for |
| 249 | // forced extractionn at the end of the trace. |
| 250 | SortingMode sorting_mode_ = SortingMode::kDefault; |
| 251 | |
| 252 | // The packet index until which events should be extracted. Set based |
| 253 | // on the packet index in |OnReadBuffer|. |
| 254 | uint64_t packet_idx_for_extraction_ = 0; |
| 255 | |
| 256 | // The number of flushes which have happened since the last incremental |
| 257 | // extraction. |
| 258 | uint32_t flushes_since_extraction_ = 0; |
| 259 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 260 | // queues_[0] is the general (non-ftrace) queue. |
| 261 | // queues_[1] is the ftrace queue for CPU(0). |
| 262 | // queues_[x] is the ftrace queue for CPU(x - 1). |
| 263 | std::vector<Queue> queues_; |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 264 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 265 | // max(e.timestamp for e in queues_). |
| 266 | int64_t global_max_ts_ = 0; |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 267 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 268 | // min(e.timestamp for e in queues_). |
| 269 | int64_t global_min_ts_ = std::numeric_limits<int64_t>::max(); |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 270 | |
Lalit Maganti | fa21a28 | 2019-01-17 19:03:04 +0000 | [diff] [blame] | 271 | // Monotonic increasing value used to index timestamped trace pieces. |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 272 | uint64_t packet_idx_ = 0; |
Lalit Maganti | fa21a28 | 2019-01-17 19:03:04 +0000 | [diff] [blame] | 273 | |
Primiano Tucci | 7de4ef2 | 2019-02-21 17:13:42 +0000 | [diff] [blame] | 274 | // Used for performance tests. True when setting TRACE_PROCESSOR_SORT_ONLY=1. |
| 275 | bool bypass_next_stage_for_testing_ = false; |
Primiano Tucci | 81d0a39 | 2018-09-04 15:15:37 +0100 | [diff] [blame] | 276 | |
Lalit Maganti | 09e840c | 2021-09-22 15:23:17 +0100 | [diff] [blame] | 277 | // max(e.ts for e pushed to next stage) |
| 278 | int64_t latest_pushed_event_ts_ = std::numeric_limits<int64_t>::min(); |
Primiano Tucci | d933d91 | 2018-09-04 09:15:07 +0100 | [diff] [blame] | 279 | }; |
| 280 | |
| 281 | } // namespace trace_processor |
| 282 | } // namespace perfetto |
| 283 | |
| 284 | #endif // SRC_TRACE_PROCESSOR_TRACE_SORTER_H_ |