Function for on-demand flamegraph for heap graph.

Change-Id: If83cda516d288f18a95d7fbd13f3b08d12fc56ec
diff --git a/Android.bp b/Android.bp
index d721882..2e749e4 100644
--- a/Android.bp
+++ b/Android.bp
@@ -6096,6 +6096,7 @@
     "src/trace_processor/heap_profile_tracker_unittest.cc",
     "src/trace_processor/importers/fuchsia/fuchsia_trace_utils_unittest.cc",
     "src/trace_processor/importers/proto/args_table_utils_unittest.cc",
+    "src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc",
     "src/trace_processor/importers/proto/heap_graph_walker_unittest.cc",
     "src/trace_processor/importers/proto/proto_trace_parser_unittest.cc",
     "src/trace_processor/importers/systrace/systrace_parser_unittest.cc",
diff --git a/src/trace_processor/BUILD.gn b/src/trace_processor/BUILD.gn
index 0455917..df0efd5 100644
--- a/src/trace_processor/BUILD.gn
+++ b/src/trace_processor/BUILD.gn
@@ -371,6 +371,7 @@
     "ftrace_utils_unittest.cc",
     "heap_profile_tracker_unittest.cc",
     "importers/proto/args_table_utils_unittest.cc",
+    "importers/proto/heap_graph_tracker_unittest.cc",
     "importers/proto/heap_graph_walker_unittest.cc",
     "importers/proto/proto_trace_parser_unittest.cc",
     "importers/systrace/systrace_parser_unittest.cc",
diff --git a/src/trace_processor/db/typed_column.h b/src/trace_processor/db/typed_column.h
index 9a3dd5e..5be1dcc 100644
--- a/src/trace_processor/db/typed_column.h
+++ b/src/trace_processor/db/typed_column.h
@@ -67,6 +67,13 @@
     return Column::IndexOf(NumericToSqlValue(v));
   }
 
+  std::vector<T> ToVectorForTesting() const {
+    std::vector<T> result(row_map().size());
+    for (uint32_t i = 0; i < row_map().size(); ++i)
+      result[i] = (*this)[i];
+    return result;
+  }
+
   // Implements equality between two items of type |T|.
   static bool Equals(T a, T b) {
     // We need to use equal_to here as it could be T == double and because we
@@ -104,6 +111,13 @@
   // Inserts the value at the end of the column.
   void Append(base::Optional<T> v) { mutable_sparse_vector<T>()->Append(v); }
 
+  std::vector<base::Optional<T>> ToVectorForTesting() const {
+    std::vector<T> result(row_map().size());
+    for (uint32_t i = 0; i < row_map().size(); ++i)
+      result[i] = (*this)[i];
+    return result;
+  }
+
   // Implements equality between two items of type |T|.
   static bool Equals(base::Optional<T> a, base::Optional<T> b) {
     // We need to use equal_to here as it could be T == double and because we
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker.cc b/src/trace_processor/importers/proto/heap_graph_tracker.cc
index 3c1001f..cc631c5 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker.cc
+++ b/src/trace_processor/importers/proto/heap_graph_tracker.cc
@@ -187,25 +187,28 @@
     }
   }
 
-  auto* mapping_table =
-      context_->storage->mutable_stack_profile_mapping_table();
-
-  tables::StackProfileMappingTable::Row mapping_row{};
-  mapping_row.name = context_->storage->InternString("JAVA");
-  MappingId mapping_id = mapping_table->Insert(mapping_row);
-
-  uint32_t mapping_idx = *mapping_table->id().IndexOf(mapping_id);
-
   auto paths = sequence_state.walker.FindPathsFromRoot();
-  WriteFlamegraph(sequence_state, paths, mapping_idx);
+  walkers_.emplace(
+      std::make_pair(sequence_state.current_upid, sequence_state.current_ts),
+      std::move(sequence_state.walker));
 
   sequence_state_.erase(seq_id);
 }
 
-void HeapGraphTracker::WriteFlamegraph(
-    const SequenceState& sequence_state,
-    const HeapGraphWalker::PathFromRoot& init_path,
-    uint32_t mapping_row) {
+std::unique_ptr<tables::ExperimentalFlamegraphNodesTable>
+HeapGraphTracker::BuildFlamegraph(const UniquePid current_upid,
+                                  const int64_t current_ts) {
+  auto it = walkers_.find(std::make_pair(current_upid, current_ts));
+  if (it == walkers_.end())
+    return nullptr;
+
+  std::unique_ptr<tables::ExperimentalFlamegraphNodesTable> tbl(
+      new tables::ExperimentalFlamegraphNodesTable(
+          context_->storage->mutable_string_pool(), nullptr));
+
+  HeapGraphWalker::PathFromRoot init_path = it->second.FindPathsFromRoot();
+  auto java_mapping = context_->storage->InternString("JAVA");
+
   std::vector<int32_t> node_to_cumulative_size(init_path.nodes.size());
   std::vector<int32_t> node_to_cumulative_count(init_path.nodes.size());
   // i > 0 is to skip the artifical root node.
@@ -218,41 +221,31 @@
     node_to_cumulative_count[node.parent_id] += node_to_cumulative_count[i];
   }
 
-  std::vector<int32_t> node_to_row_id(init_path.nodes.size());
-  node_to_row_id[0] = -1;  // We use parent_id -1 for roots.
+  std::vector<uint32_t> node_to_row_idx(init_path.nodes.size());
   // i = 1 is to skip the artifical root node.
   for (size_t i = 1; i < init_path.nodes.size(); ++i) {
     const HeapGraphWalker::PathFromRoot::Node& node = init_path.nodes[i];
     PERFETTO_CHECK(node.parent_id < i);
-    const int32_t parent_row_id = node_to_row_id[node.parent_id];
+    base::Optional<uint32_t> parent_id;
+    if (node.parent_id != 0)
+      parent_id = node_to_row_idx[node.parent_id];
     const uint32_t depth = node.depth - 1;  // -1 because we do not have the
                                             // artificial root in the database.
 
-    tables::StackProfileFrameTable::Row row{};
-    PERFETTO_CHECK(node.class_name > 0);
-    row.name = StringId::Raw(static_cast<uint32_t>(node.class_name));
-    row.mapping = mapping_row;
-
-    auto id =
-        context_->storage->mutable_stack_profile_frame_table()->Insert(row);
-    int32_t frame_id = static_cast<int32_t>(id.value);
-
-    auto* callsites = context_->storage->mutable_stack_profile_callsite_table();
-    auto callsite_id = callsites->Insert({depth, parent_row_id, frame_id});
-    int32_t row_id = static_cast<int32_t>(callsite_id.value);
-    node_to_row_id[i] = row_id;
-
     tables::ExperimentalFlamegraphNodesTable::Row alloc_row{
-        sequence_state.current_ts,
-        sequence_state.current_upid,
-        row_id,
+        current_ts,
+        current_upid,
+        depth,
+        StringId::Raw(static_cast<uint32_t>(node.class_name)),
+        java_mapping,
         static_cast<int64_t>(node.count),
         static_cast<int64_t>(node_to_cumulative_count[i]),
         static_cast<int64_t>(node.size),
-        static_cast<int64_t>(node_to_cumulative_size[i])};
-    context_->storage->mutable_experimental_flamegraph_nodes_table()->Insert(
-        alloc_row);
+        static_cast<int64_t>(node_to_cumulative_size[i]),
+        parent_id};
+    node_to_row_idx[i] = *tbl->id().IndexOf(tbl->Insert(alloc_row));
   }
+  return tbl;
 }
 
 void HeapGraphTracker::MarkReachable(int64_t row) {
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker.h b/src/trace_processor/importers/proto/heap_graph_tracker.h
index 71bbb8f..3558410 100644
--- a/src/trace_processor/importers/proto/heap_graph_tracker.h
+++ b/src/trace_processor/importers/proto/heap_graph_tracker.h
@@ -86,6 +86,10 @@
     return &it->second;
   }
 
+  std::unique_ptr<tables::ExperimentalFlamegraphNodesTable> BuildFlamegraph(
+      const UniquePid current_upid,
+      const int64_t current_ts);
+
  private:
   struct SequenceState {
     SequenceState(HeapGraphTracker* tracker) : walker(tracker) {}
@@ -104,12 +108,10 @@
   SequenceState& GetOrCreateSequence(uint32_t seq_id);
   bool SetPidAndTimestamp(SequenceState* seq, UniquePid upid, int64_t ts);
 
-  void WriteFlamegraph(const SequenceState& sequence_state,
-                       const HeapGraphWalker::PathFromRoot& path,
-                       uint32_t mapping_row);
 
   TraceProcessorContext* const context_;
   std::map<uint32_t, SequenceState> sequence_state_;
+  std::map<std::pair<UniquePid, int64_t /* ts */>, HeapGraphWalker> walkers_;
 
   std::map<StringPool::Id, std::vector<int64_t>> class_to_rows_;
   std::map<StringPool::Id, std::vector<int64_t>> field_to_rows_;
diff --git a/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
new file mode 100644
index 0000000..3cb5c2a
--- /dev/null
+++ b/src/trace_processor/importers/proto/heap_graph_tracker_unittest.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/importers/proto/heap_graph_tracker.h"
+
+#include "perfetto/base/logging.h"
+#include "test/gtest_and_gmock.h"
+
+namespace perfetto {
+namespace trace_processor {
+namespace {
+
+using ::testing::UnorderedElementsAre;
+
+TEST(HeapGraphTrackerTest, BuildFlamegraph) {
+  //           4@A 5@B
+  //             \ /
+  //         2@Y 3@Y
+  //           \ /
+  //           1@X
+
+  constexpr uint64_t kSeqId = 1;
+  constexpr UniquePid kPid = 1;
+  constexpr int64_t kTimestamp = 1;
+
+  TraceProcessorContext context;
+  context.storage.reset(new TraceStorage());
+
+  HeapGraphTracker tracker(&context);
+
+  constexpr uint64_t kField = 1;
+
+  constexpr uint64_t kX = 1;
+  constexpr uint64_t kY = 2;
+  constexpr uint64_t kA = 3;
+  constexpr uint64_t kB = 4;
+
+  StringPool::Id field = context.storage->InternString("foo");
+  StringPool::Id x = context.storage->InternString("X");
+  StringPool::Id y = context.storage->InternString("Y");
+  StringPool::Id a = context.storage->InternString("A");
+  StringPool::Id b = context.storage->InternString("B");
+
+  tracker.AddInternedFieldName(kSeqId, kField, field);
+
+  tracker.AddInternedTypeName(kSeqId, kX, x);
+  tracker.AddInternedTypeName(kSeqId, kY, y);
+  tracker.AddInternedTypeName(kSeqId, kA, a);
+  tracker.AddInternedTypeName(kSeqId, kB, b);
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 1;
+    obj.self_size = 1;
+    obj.type_id = kX;
+    HeapGraphTracker::SourceObject::Reference ref;
+    ref.field_name_id = kField;
+    ref.owned_object_id = 2;
+    obj.references.emplace_back(std::move(ref));
+
+    ref.field_name_id = kField;
+    ref.owned_object_id = 3;
+    obj.references.emplace_back(std::move(ref));
+
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 2;
+    obj.self_size = 2;
+    obj.type_id = kY;
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 3;
+    obj.self_size = 3;
+    obj.type_id = kY;
+    HeapGraphTracker::SourceObject::Reference ref;
+    ref.field_name_id = kField;
+    ref.owned_object_id = 4;
+    obj.references.emplace_back(std::move(ref));
+
+    ref.field_name_id = kField;
+    ref.owned_object_id = 5;
+    obj.references.emplace_back(std::move(ref));
+
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 4;
+    obj.self_size = 4;
+    obj.type_id = kA;
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  {
+    HeapGraphTracker::SourceObject obj;
+    obj.object_id = 5;
+    obj.self_size = 5;
+    obj.type_id = kB;
+    tracker.AddObject(kSeqId, kPid, kTimestamp, std::move(obj));
+  }
+
+  HeapGraphTracker::SourceRoot root;
+  root.root_type = context.storage->InternString("ROOT");
+  root.object_ids.emplace_back(1);
+  tracker.AddRoot(kSeqId, kPid, kTimestamp, root);
+
+  tracker.FinalizeProfile(kSeqId);
+  std::unique_ptr<tables::ExperimentalFlamegraphNodesTable> flame =
+      tracker.BuildFlamegraph(kPid, kTimestamp);
+  ASSERT_NE(flame, nullptr);
+
+  auto cumulative_sizes = flame->cumulative_size().ToVectorForTesting();
+  EXPECT_THAT(cumulative_sizes, UnorderedElementsAre(15, 4, 14, 5));
+
+  auto cumulative_counts = flame->cumulative_count().ToVectorForTesting();
+  EXPECT_THAT(cumulative_counts, UnorderedElementsAre(5, 4, 1, 1));
+
+  auto sizes = flame->size().ToVectorForTesting();
+  EXPECT_THAT(sizes, UnorderedElementsAre(1, 5, 4, 5));
+
+  auto counts = flame->count().ToVectorForTesting();
+  EXPECT_THAT(counts, UnorderedElementsAre(1, 2, 1, 1));
+}
+
+}  // namespace
+}  // namespace trace_processor
+}  // namespace perfetto
diff --git a/src/trace_processor/tables/profiler_tables.h b/src/trace_processor/tables/profiler_tables.h
index 7cbf445..89f8cc5 100644
--- a/src/trace_processor/tables/profiler_tables.h
+++ b/src/trace_processor/tables/profiler_tables.h
@@ -87,18 +87,21 @@
 
 // This will eventually go away, when we also pre-compute the cumulative
 // sizes for native heap profiles.
-#define PERFETTO_TP_HEAP_GRAPH_ALLOCATION_DEF(NAME, PARENT, C)            \
+#define PERFETTO_TP_EXPERIMENTAL_FLAMEGRAPH_NODES(NAME, PARENT, C)        \
   NAME(ExperimentalFlamegraphNodesTable, "experimental_flamegraph_nodes") \
   PERFETTO_TP_ROOT_TABLE(PARENT, C)                                       \
   C(int64_t, ts, Column::Flag::kSorted)                                   \
   C(uint32_t, upid)                                                       \
-  C(int64_t, callsite_id)                                                 \
+  C(uint32_t, depth)                                                      \
+  C(StringPool::Id, name)                                                 \
+  C(StringPool::Id, map_name)                                             \
   C(int64_t, count)                                                       \
   C(int64_t, cumulative_count)                                            \
   C(int64_t, size)                                                        \
-  C(int64_t, cumulative_size)
+  C(int64_t, cumulative_size)                                             \
+  C(base::Optional<uint32_t>, parent_id)
 
-PERFETTO_TP_TABLE(PERFETTO_TP_HEAP_GRAPH_ALLOCATION_DEF);
+PERFETTO_TP_TABLE(PERFETTO_TP_EXPERIMENTAL_FLAMEGRAPH_NODES);
 
 #define PERFETTO_TP_HEAP_GRAPH_OBJECT_DEF(NAME, PARENT, C)  \
   NAME(HeapGraphObjectTable, "heap_graph_object")           \
diff --git a/src/trace_processor/trace_processor_impl.cc b/src/trace_processor/trace_processor_impl.cc
index d06580d..6430f72 100644
--- a/src/trace_processor/trace_processor_impl.cc
+++ b/src/trace_processor/trace_processor_impl.cc
@@ -448,9 +448,6 @@
       *db_, &storage->heap_profile_allocation_table(),
       storage->heap_profile_allocation_table().table_name());
   DbSqliteTable::RegisterTable(
-      *db_, &storage->experimental_flamegraph_nodes_table(),
-      storage->experimental_flamegraph_nodes_table().table_name());
-  DbSqliteTable::RegisterTable(
       *db_, &storage->cpu_profile_stack_sample_table(),
       storage->cpu_profile_stack_sample_table().table_name());
   DbSqliteTable::RegisterTable(
diff --git a/src/trace_processor/trace_storage.h b/src/trace_processor/trace_storage.h
index 8b0634b..2970d97 100644
--- a/src/trace_processor/trace_storage.h
+++ b/src/trace_processor/trace_storage.h
@@ -558,15 +558,6 @@
     return &heap_profile_allocation_table_;
   }
 
-  const tables::ExperimentalFlamegraphNodesTable&
-  experimental_flamegraph_nodes_table() const {
-    return experimental_flamegraph_nodes_table_;
-  }
-  tables::ExperimentalFlamegraphNodesTable*
-  mutable_experimental_flamegraph_nodes_table() {
-    return &experimental_flamegraph_nodes_table_;
-  }
-
   const tables::CpuProfileStackSampleTable& cpu_profile_stack_sample_table()
       const {
     return cpu_profile_stack_sample_table_;
@@ -611,6 +602,7 @@
   }
 
   const StringPool& string_pool() const { return string_pool_; }
+  StringPool* mutable_string_pool() { return &string_pool_; }
 
   // Number of interned strings in the pool. Includes the empty string w/ ID=0.
   size_t string_count() const { return string_pool_.size(); }
@@ -794,8 +786,6 @@
                                                                   nullptr};
   tables::HeapProfileAllocationTable heap_profile_allocation_table_{
       &string_pool_, nullptr};
-  tables::ExperimentalFlamegraphNodesTable experimental_flamegraph_nodes_table_{
-      &string_pool_, nullptr};
   tables::CpuProfileStackSampleTable cpu_profile_stack_sample_table_{
       &string_pool_, nullptr};