Merge "processor: Fix order of async events in json export"
diff --git a/src/trace_processor/export_json.cc b/src/trace_processor/export_json.cc
index ed6e5f8..1e78477 100644
--- a/src/trace_processor/export_json.cc
+++ b/src/trace_processor/export_json.cc
@@ -27,6 +27,7 @@
 #include <json/writer.h>
 #include <stdio.h>
 
+#include <algorithm>
 #include <cstring>
 #include <deque>
 #include <limits>
@@ -187,26 +188,146 @@
       if (label_filter_ && !label_filter_("traceEvents"))
         return;
 
-      // Pop end events with smaller or equal timestamps.
-      PopEndEvents(event["ts"].asInt64());
-
       DoWriteEvent(event);
     }
 
-    void PushEndEvent(const Json::Value& event) {
+    void AddAsyncBeginEvent(const Json::Value& event) {
       if (label_filter_ && !label_filter_("traceEvents"))
         return;
 
-      // Pop any end events that end before the new one.
-      PopEndEvents(event["ts"].asInt64() - 1);
+      async_begin_events_.push_back(event);
+    }
 
+    void AddAsyncInstantEvent(const Json::Value& event) {
+      if (label_filter_ && !label_filter_("traceEvents"))
+        return;
+
+      async_instant_events_.push_back(event);
+    }
+
+    void AddAsyncEndEvent(const Json::Value& event) {
+      if (label_filter_ && !label_filter_("traceEvents"))
+        return;
+
+      async_end_events_.push_back(event);
+    }
+
+    void SortAndEmitAsyncEvents() {
       // Catapult doesn't handle out-of-order begin/end events well, especially
       // when their timestamps are the same, but their order is incorrect. Since
-      // our events are sorted by begin timestamp, we only have to reorder end
-      // events. We do this by buffering them into a stack, so that both begin &
-      // end events of potential child events have been emitted before we emit
-      // the end of a parent event.
-      end_events_.push_back(event);
+      // we process events sorted by begin timestamp, |async_begin_events_| and
+      // |async_instant_events_| are already sorted. We now only have to sort
+      // |async_end_events_| and merge-sort all events into a single sequence.
+
+      // Sort |async_end_events_|. Note that we should order by ascending
+      // timestamp, but in reverse-stable order. This way, a child slices's end
+      // is emitted before its parent's end event, even if both end events have
+      // the same timestamp. To accomplish this, we perform a stable sort in
+      // descending order and later iterate via reverse iterators.
+      struct {
+        bool operator()(const Json::Value& a, const Json::Value& b) const {
+          return a["ts"].asInt64() > b["ts"].asInt64();
+        }
+      } CompareEvents;
+      std::stable_sort(async_end_events_.begin(), async_end_events_.end(),
+                       CompareEvents);
+
+      // Merge sort by timestamp. If events share the same timestamp, prefer
+      // instant events, then end events, so that old slices close before new
+      // ones are opened, but instant events remain in their deepest nesting
+      // level.
+      auto instant_event_it = async_instant_events_.begin();
+      auto end_event_it = async_end_events_.rbegin();
+      auto begin_event_it = async_begin_events_.begin();
+
+      auto has_instant_event = instant_event_it != async_instant_events_.end();
+      auto has_end_event = end_event_it != async_end_events_.rend();
+      auto has_begin_event = begin_event_it != async_begin_events_.end();
+
+      auto emit_next_instant = [&instant_event_it, &has_instant_event, this]() {
+        DoWriteEvent(*instant_event_it);
+        instant_event_it++;
+        has_instant_event = instant_event_it != async_instant_events_.end();
+      };
+      auto emit_next_end = [&end_event_it, &has_end_event, this]() {
+        DoWriteEvent(*end_event_it);
+        end_event_it++;
+        has_end_event = end_event_it != async_end_events_.rend();
+      };
+      auto emit_next_begin = [&begin_event_it, &has_begin_event, this]() {
+        DoWriteEvent(*begin_event_it);
+        begin_event_it++;
+        has_begin_event = begin_event_it != async_begin_events_.end();
+      };
+
+      auto emit_next_instant_or_end = [&instant_event_it, &end_event_it,
+                                       &emit_next_instant, &emit_next_end]() {
+        if ((*instant_event_it)["ts"].asInt64() <=
+            (*end_event_it)["ts"].asInt64()) {
+          emit_next_instant();
+        } else {
+          emit_next_end();
+        }
+      };
+      auto emit_next_instant_or_begin = [&instant_event_it, &begin_event_it,
+                                         &emit_next_instant,
+                                         &emit_next_begin]() {
+        if ((*instant_event_it)["ts"].asInt64() <=
+            (*begin_event_it)["ts"].asInt64()) {
+          emit_next_instant();
+        } else {
+          emit_next_begin();
+        }
+      };
+      auto emit_next_end_or_begin = [&end_event_it, &begin_event_it,
+                                     &emit_next_end, &emit_next_begin]() {
+        if ((*end_event_it)["ts"].asInt64() <=
+            (*begin_event_it)["ts"].asInt64()) {
+          emit_next_end();
+        } else {
+          emit_next_begin();
+        }
+      };
+
+      // While we still have events in all iterators, consider each.
+      while (has_instant_event && has_end_event && has_begin_event) {
+        if ((*instant_event_it)["ts"].asInt64() <=
+            (*end_event_it)["ts"].asInt64()) {
+          emit_next_instant_or_begin();
+        } else {
+          emit_next_end_or_begin();
+        }
+      }
+
+      // Only instant and end events left.
+      while (has_instant_event && has_end_event) {
+        emit_next_instant_or_end();
+      }
+
+      // Only instant and begin events left.
+      while (has_instant_event && has_begin_event) {
+        emit_next_instant_or_begin();
+      }
+
+      // Only end and begin events left.
+      while (has_end_event && has_begin_event) {
+        emit_next_end_or_begin();
+      }
+
+      // Remaining instant events.
+      while (has_instant_event) {
+        emit_next_instant();
+      }
+
+      // Remaining end events.
+      while (has_end_event) {
+        emit_next_end();
+      }
+
+      // Remaining begin events.
+      while (has_begin_event) {
+        emit_next_begin();
+      }
     }
 
     void WriteMetadataEvent(const char* metadata_type,
@@ -300,7 +421,7 @@
     }
 
     void WriteFooter() {
-      PopEndEvents(std::numeric_limits<int64_t>::max());
+      SortAndEmitAsyncEvents();
 
       // Filter metadata entries.
       if (metadata_filter_) {
@@ -372,16 +493,6 @@
       first_event_ = false;
     }
 
-    void PopEndEvents(int64_t max_ts) {
-      while (!end_events_.empty()) {
-        int64_t ts = end_events_.back()["ts"].asInt64();
-        if (ts > max_ts)
-          break;
-        DoWriteEvent(end_events_.back());
-        end_events_.pop_back();
-      }
-    }
-
     OutputWriter* output_;
     ArgumentFilterPredicate argument_filter_;
     MetadataFilterPredicate metadata_filter_;
@@ -391,7 +502,9 @@
     Json::Value metadata_;
     std::string system_trace_data_;
     std::string user_trace_data_;
-    std::deque<Json::Value> end_events_;
+    std::vector<Json::Value> async_begin_events_;
+    std::vector<Json::Value> async_instant_events_;
+    std::vector<Json::Value> async_end_events_;
   };
 
   class ArgsBuilder {
@@ -794,10 +907,10 @@
 
         if (duration_ns == 0) {  // Instant async event.
           event["ph"] = "n";
-          writer_.WriteCommonEvent(event);
+          writer_.AddAsyncInstantEvent(event);
         } else {  // Async start and end.
           event["ph"] = "b";
-          writer_.WriteCommonEvent(event);
+          writer_.AddAsyncBeginEvent(event);
           // If the slice didn't finish, the duration may be negative. Don't
           // write the end event in this case.
           if (duration_ns > 0) {
@@ -812,7 +925,7 @@
                   (thread_instruction_count + thread_instruction_delta));
             }
             event["args"].clear();
-            writer_.PushEndEvent(event);
+            writer_.AddAsyncEndEvent(event);
           }
         }
       } else {
diff --git a/src/trace_processor/export_json_unittest.cc b/src/trace_processor/export_json_unittest.cc
index 2c9ec19..6d149e6 100644
--- a/src/trace_processor/export_json_unittest.cc
+++ b/src/trace_processor/export_json_unittest.cc
@@ -886,10 +886,13 @@
 TEST_F(ExportJsonTest, AsyncEvents) {
   const int64_t kTimestamp = 10000000;
   const int64_t kDuration = 100000;
+  const int64_t kTimestamp3 = 10005000;
+  const int64_t kDuration3 = 100000;
   const uint32_t kProcessID = 100;
   const char* kCategory = "cat";
   const char* kName = "name";
   const char* kName2 = "name2";
+  const char* kName3 = "name3";
   const char* kArgName = "arg_name";
   const int kArgValue = 123;
 
@@ -897,11 +900,16 @@
   StringId cat_id = context_.storage->InternString(base::StringView(kCategory));
   StringId name_id = context_.storage->InternString(base::StringView(kName));
   StringId name2_id = context_.storage->InternString(base::StringView(kName2));
+  StringId name3_id = context_.storage->InternString(base::StringView(kName3));
 
   constexpr int64_t kSourceId = 235;
   TrackId track = context_.track_tracker->InternLegacyChromeAsyncTrack(
       name_id, upid, kSourceId, /*source_id_is_process_scoped=*/true,
       /*source_scope=*/kNullStringId);
+  constexpr int64_t kSourceId2 = 236;
+  TrackId track2 = context_.track_tracker->InternLegacyChromeAsyncTrack(
+      name3_id, upid, kSourceId2, /*source_id_is_process_scoped=*/true,
+      /*source_scope=*/kNullStringId);
   context_.args_tracker->Flush();  // Flush track args.
 
   context_.storage->mutable_slice_table()->Insert(
@@ -919,6 +927,10 @@
   context_.storage->mutable_slice_table()->Insert(
       {kTimestamp, kDuration, track.value, cat_id, name2_id, 0, 0, 0});
 
+  // Another overlapping async event on a different track.
+  context_.storage->mutable_slice_table()->Insert(
+      {kTimestamp3, kDuration3, track2.value, cat_id, name3_id, 0, 0, 0});
+
   base::TempFile temp_file = base::TempFile::Create();
   FILE* output = fopen(temp_file.path().c_str(), "w+");
   util::Status status = ExportJson(context_.storage.get(), output);
@@ -926,7 +938,10 @@
   EXPECT_TRUE(status.ok());
 
   Json::Value result = ToJsonValue(ReadFile(output));
-  EXPECT_EQ(result["traceEvents"].size(), 4u);
+  EXPECT_EQ(result["traceEvents"].size(), 6u);
+
+  // Events should be sorted by timestamp, with child slice's end before its
+  // parent.
 
   Json::Value begin_event1 = result["traceEvents"][0];
   EXPECT_EQ(begin_event1["ph"].asString(), "b");
@@ -951,19 +966,31 @@
   EXPECT_FALSE(begin_event2.isMember("tts"));
   EXPECT_FALSE(begin_event2.isMember("use_async_tts"));
 
+  Json::Value begin_event3 = result["traceEvents"][2];
+  EXPECT_EQ(begin_event3["ph"].asString(), "b");
+  EXPECT_EQ(begin_event3["ts"].asInt64(), kTimestamp3 / 1000);
+  EXPECT_EQ(begin_event3["pid"].asInt(), static_cast<int>(kProcessID));
+  EXPECT_EQ(begin_event3["id2"]["local"].asString(), "0xec");
+  EXPECT_EQ(begin_event3["cat"].asString(), kCategory);
+  EXPECT_EQ(begin_event3["name"].asString(), kName3);
+  EXPECT_TRUE(begin_event3["args"].isObject());
+  EXPECT_EQ(begin_event3["args"].size(), 0u);
+  EXPECT_FALSE(begin_event3.isMember("tts"));
+  EXPECT_FALSE(begin_event3.isMember("use_async_tts"));
+
   Json::Value end_event2 = result["traceEvents"][3];
   EXPECT_EQ(end_event2["ph"].asString(), "e");
   EXPECT_EQ(end_event2["ts"].asInt64(), (kTimestamp + kDuration) / 1000);
   EXPECT_EQ(end_event2["pid"].asInt(), static_cast<int>(kProcessID));
   EXPECT_EQ(end_event2["id2"]["local"].asString(), "0xeb");
   EXPECT_EQ(end_event2["cat"].asString(), kCategory);
-  EXPECT_EQ(end_event2["name"].asString(), kName);
+  EXPECT_EQ(end_event2["name"].asString(), kName2);
   EXPECT_TRUE(end_event2["args"].isObject());
   EXPECT_EQ(end_event2["args"].size(), 0u);
   EXPECT_FALSE(end_event2.isMember("tts"));
   EXPECT_FALSE(end_event2.isMember("use_async_tts"));
 
-  Json::Value end_event1 = result["traceEvents"][3];
+  Json::Value end_event1 = result["traceEvents"][4];
   EXPECT_EQ(end_event1["ph"].asString(), "e");
   EXPECT_EQ(end_event1["ts"].asInt64(), (kTimestamp + kDuration) / 1000);
   EXPECT_EQ(end_event1["pid"].asInt(), static_cast<int>(kProcessID));
@@ -974,6 +1001,18 @@
   EXPECT_EQ(end_event1["args"].size(), 0u);
   EXPECT_FALSE(end_event1.isMember("tts"));
   EXPECT_FALSE(end_event1.isMember("use_async_tts"));
+
+  Json::Value end_event3 = result["traceEvents"][5];
+  EXPECT_EQ(end_event3["ph"].asString(), "e");
+  EXPECT_EQ(end_event3["ts"].asInt64(), (kTimestamp3 + kDuration3) / 1000);
+  EXPECT_EQ(end_event3["pid"].asInt(), static_cast<int>(kProcessID));
+  EXPECT_EQ(end_event3["id2"]["local"].asString(), "0xec");
+  EXPECT_EQ(end_event3["cat"].asString(), kCategory);
+  EXPECT_EQ(end_event3["name"].asString(), kName3);
+  EXPECT_TRUE(end_event3["args"].isObject());
+  EXPECT_EQ(end_event3["args"].size(), 0u);
+  EXPECT_FALSE(end_event3.isMember("tts"));
+  EXPECT_FALSE(end_event3.isMember("use_async_tts"));
 }
 
 TEST_F(ExportJsonTest, AsyncEventWithThreadTimestamp) {