Add the ancestor_slice dynamic table.
This table given a slice ID finds all parents by following the chain of
|parent_id| values.
This is useful to allow queries to quickly find the parent of the given
slice (imagine joining ancestor_slice(id) ON depth = 0).
Bug: 160148605
Change-Id: I4a60b03a2f3fb8d6d2741fcc5b4c2217d70eb3fe
diff --git a/Android.bp b/Android.bp
index a0f9563..eb51eaa 100644
--- a/Android.bp
+++ b/Android.bp
@@ -6712,6 +6712,7 @@
filegroup {
name: "perfetto_src_trace_processor_lib",
srcs: [
+ "src/trace_processor/dynamic/ancestor_slice_generator.cc",
"src/trace_processor/dynamic/describe_slice_generator.cc",
"src/trace_processor/dynamic/experimental_counter_dur_generator.cc",
"src/trace_processor/dynamic/experimental_flamegraph_generator.cc",
diff --git a/BUILD b/BUILD
index 8cc0e41..739956d 100644
--- a/BUILD
+++ b/BUILD
@@ -936,6 +936,8 @@
filegroup(
name = "src_trace_processor_lib",
srcs = [
+ "src/trace_processor/dynamic/ancestor_slice_generator.cc",
+ "src/trace_processor/dynamic/ancestor_slice_generator.h",
"src/trace_processor/dynamic/describe_slice_generator.cc",
"src/trace_processor/dynamic/describe_slice_generator.h",
"src/trace_processor/dynamic/experimental_counter_dur_generator.cc",
diff --git a/docs/analysis/trace-processor.md b/docs/analysis/trace-processor.md
index 998e678..7fc102b 100644
--- a/docs/analysis/trace-processor.md
+++ b/docs/analysis/trace-processor.md
@@ -294,6 +294,31 @@
reasons, span join does attempt to dectect and error out in this situation;
instead, incorrect rows will silently be produced.
+### Ancestor slice
+ancestor_slice is a custom operator table that takes a
+[slice table's id column](/docs/analysis/sql-tables#slice) and computes all
+slices on the same track that are direct parents above that id (i.e. given a
+slice id it will return as rows all slices that can be found by following the
+parent_id column to the top slice (depth = 0)).
+
+The returned format is the same as the
+[slice table](/docs/analysis/sql-tables#slice)
+
+For example, the following finds the top level slice given a bunch of slices of
+interest.
+
+```sql
+CREATE VIEW interesting_slices AS
+SELECT id, ts, dur, track_id
+FROM slice WHERE name LIKE "%interesting slice name%";
+
+SELECT
+ *
+FROM
+ interesting_slices LEFT JOIN
+ ancestor_slice(interesting_slices.id) AS ancestor ON ancestor.depth = 0
+```
+
## Metrics
TIP: To see how to add to add a new metric to trace processor, see the checklist
diff --git a/src/trace_processor/BUILD.gn b/src/trace_processor/BUILD.gn
index 02925da..8ebb933 100644
--- a/src/trace_processor/BUILD.gn
+++ b/src/trace_processor/BUILD.gn
@@ -278,6 +278,8 @@
if (enable_perfetto_trace_processor_sqlite) {
source_set("lib") {
sources = [
+ "dynamic/ancestor_slice_generator.cc",
+ "dynamic/ancestor_slice_generator.h",
"dynamic/describe_slice_generator.cc",
"dynamic/describe_slice_generator.h",
"dynamic/experimental_counter_dur_generator.cc",
diff --git a/src/trace_processor/dynamic/ancestor_slice_generator.cc b/src/trace_processor/dynamic/ancestor_slice_generator.cc
new file mode 100644
index 0000000..ff17f91
--- /dev/null
+++ b/src/trace_processor/dynamic/ancestor_slice_generator.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/dynamic/ancestor_slice_generator.h"
+
+#include <memory>
+#include <set>
+
+#include "src/trace_processor/types/trace_processor_context.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+AncestorSliceGenerator::AncestorSliceGenerator(TraceProcessorContext* context)
+ : context_(context) {}
+
+AncestorSliceGenerator::~AncestorSliceGenerator() = default;
+
+util::Status AncestorSliceGenerator::ValidateConstraints(
+ const QueryConstraints& qc) {
+ const auto& cs = qc.constraints();
+
+ auto slice_id_fn = [this](const QueryConstraints::Constraint& c) {
+ return c.column == static_cast<int>(
+ context_->storage->slice_table().GetColumnCount()) &&
+ c.op == SQLITE_INDEX_CONSTRAINT_EQ;
+ };
+ bool has_slice_id_cs =
+ std::find_if(cs.begin(), cs.end(), slice_id_fn) != cs.end();
+
+ return has_slice_id_cs
+ ? util::OkStatus()
+ : util::ErrStatus("Failed to find required constraints");
+}
+
+std::unique_ptr<Table> AncestorSliceGenerator::ComputeTable(
+ const std::vector<Constraint>& cs,
+ const std::vector<Order>&) {
+ using S = tables::SliceTable;
+
+ auto it = std::find_if(cs.begin(), cs.end(), [this](const Constraint& c) {
+ return c.col_idx == context_->storage->slice_table().GetColumnCount() &&
+ c.op == FilterOp::kEq;
+ });
+ PERFETTO_DCHECK(it != cs.end());
+
+ const auto& slice = context_->storage->slice_table();
+ uint32_t child_id = static_cast<uint32_t>(it->value.AsLong());
+ auto start_row = slice.id().IndexOf(S::Id(child_id));
+
+ if (!start_row) {
+ // TODO(lalitm): Ideally this should result in an error, or be filtered out
+ // during ValidateConstraints so we can just dereference |start_row|
+ // directly. However ValidateConstraints doesn't know the value we're
+ // filtering for so can't ensure it exists. For now we return a nullptr
+ // which will cause the query to surface an error with the message "SQL
+ // error: constraint failed".
+ return nullptr;
+ }
+
+ // Build up all the parents row ids, and a new column that includes the
+ // constraint.
+ std::vector<uint32_t> ids;
+ std::unique_ptr<NullableVector<uint32_t>> child_ids(
+ new NullableVector<uint32_t>());
+
+ auto maybe_parent_id = slice.parent_id()[*start_row];
+ while (maybe_parent_id) {
+ ids.push_back(maybe_parent_id.value().value);
+ child_ids->Append(child_id);
+ // Update the loop variable by looking up the next parent_id.
+ maybe_parent_id = slice.parent_id()[*slice.id().IndexOf(*maybe_parent_id)];
+ }
+ return std::unique_ptr<Table>(
+ new Table(slice.Apply(RowMap(std::move(ids)))
+ .ExtendWithColumn("start_id", std::move(child_ids),
+ TypedColumn<uint32_t>::default_flags() |
+ TypedColumn<uint32_t>::kHidden)));
+}
+
+Table::Schema AncestorSliceGenerator::CreateSchema() {
+ auto schema = tables::SliceTable::Schema();
+ schema.columns.push_back(Table::Schema::Column{
+ "start_id", SqlValue::Type::kLong, /* is_id = */ false,
+ /* is_sorted = */ false, /* is_hidden = */ true});
+ return schema;
+}
+
+std::string AncestorSliceGenerator::TableName() {
+ return "ancestor_slice";
+}
+
+uint32_t AncestorSliceGenerator::EstimateRowCount() {
+ return 1;
+}
+} // namespace trace_processor
+} // namespace perfetto
diff --git a/src/trace_processor/dynamic/ancestor_slice_generator.h b/src/trace_processor/dynamic/ancestor_slice_generator.h
new file mode 100644
index 0000000..265172e
--- /dev/null
+++ b/src/trace_processor/dynamic/ancestor_slice_generator.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_DYNAMIC_ANCESTOR_SLICE_GENERATOR_H_
+#define SRC_TRACE_PROCESSOR_DYNAMIC_ANCESTOR_SLICE_GENERATOR_H_
+
+#include "src/trace_processor/sqlite/db_sqlite_table.h"
+
+#include "src/trace_processor/storage/trace_storage.h"
+
+namespace perfetto {
+namespace trace_processor {
+
+class TraceProcessorContext;
+
+// Dynamic table for implementing the table.
+// See /docs/analysis.md for details about the functionality and usage of this
+// table.
+class AncestorSliceGenerator : public DbSqliteTable::DynamicTableGenerator {
+ public:
+ explicit AncestorSliceGenerator(TraceProcessorContext* context);
+ ~AncestorSliceGenerator() override;
+
+ Table::Schema CreateSchema() override;
+ std::string TableName() override;
+ uint32_t EstimateRowCount() override;
+ util::Status ValidateConstraints(const QueryConstraints&) override;
+ std::unique_ptr<Table> ComputeTable(const std::vector<Constraint>& cs,
+ const std::vector<Order>& ob) override;
+
+ private:
+ TraceProcessorContext* context_ = nullptr;
+};
+
+} // namespace trace_processor
+} // namespace perfetto
+
+#endif // SRC_TRACE_PROCESSOR_DYNAMIC_ANCESTOR_SLICE_GENERATOR_H_
diff --git a/src/trace_processor/trace_processor_impl.cc b/src/trace_processor/trace_processor_impl.cc
index 7ff4f18..736c806 100644
--- a/src/trace_processor/trace_processor_impl.cc
+++ b/src/trace_processor/trace_processor_impl.cc
@@ -23,6 +23,7 @@
#include "perfetto/base/time.h"
#include "perfetto/ext/base/string_splitter.h"
#include "perfetto/ext/base/string_utils.h"
+#include "src/trace_processor/dynamic/ancestor_slice_generator.h"
#include "src/trace_processor/dynamic/describe_slice_generator.h"
#include "src/trace_processor/dynamic/experimental_counter_dur_generator.h"
#include "src/trace_processor/dynamic/experimental_flamegraph_generator.h"
@@ -598,6 +599,8 @@
new ExperimentalSliceLayoutGenerator(
context_.storage.get()->mutable_string_pool(),
&storage->slice_table())));
+ RegisterDynamicTable(std::unique_ptr<AncestorSliceGenerator>(
+ new AncestorSliceGenerator(&context_)));
// New style db-backed tables.
RegisterDbTable(storage->arg_table());
diff --git a/test/trace_processor/ancestor_slice.out b/test/trace_processor/ancestor_slice.out
new file mode 100644
index 0000000..fb01164
--- /dev/null
+++ b/test/trace_processor/ancestor_slice.out
@@ -0,0 +1,15 @@
+"currentSliceName","ancestorSliceName"
+"event1_on_async_no_relationships","[NULL]"
+"event1_on_t1_no_relationships","[NULL]"
+"event1_on_t2_no_relationships","[NULL]"
+"event2_depth_0_on_t1","[NULL]"
+"event2_on_async_depth_0","[NULL]"
+"event2_depth_1_on_t1","event2_depth_0_on_t1"
+"event2_on_async_depth_1","event2_on_async_depth_0"
+"event2_depth_0_on_t2","[NULL]"
+"event2_first_depth_1_on_t2","event2_depth_0_on_t2"
+"event2_first_depth_2_on_t2","event2_depth_0_on_t2"
+"event2_first_depth_2_on_t2","event2_first_depth_1_on_t2"
+"event2_second_depth_1_on_t2","event2_depth_0_on_t2"
+"event2_second_depth_2_on_t2","event2_depth_0_on_t2"
+"event2_second_depth_2_on_t2","event2_second_depth_1_on_t2"
diff --git a/test/trace_processor/ancestor_slice.sql b/test/trace_processor/ancestor_slice.sql
new file mode 100644
index 0000000..44ff972
--- /dev/null
+++ b/test/trace_processor/ancestor_slice.sql
@@ -0,0 +1,3 @@
+SELECT slice.name AS currentSliceName, ancestor.name AS ancestorSliceName
+FROM slice LEFT JOIN ancestor_slice(slice.id) AS ancestor
+ORDER BY slice.ts ASC, ancestor.ts ASC, slice.name ASC, ancestor.name ASC;
diff --git a/test/trace_processor/index b/test/trace_processor/index
index 95703e8..2d1de52 100644
--- a/test/trace_processor/index
+++ b/test/trace_processor/index
@@ -106,6 +106,8 @@
# Window table
../data/android_sched_and_ps.pb smoke_window.sql android_sched_and_ps_smoke_window.out
+# Ancestor slice table.
+relationship_tables.textproto ancestor_slice.sql ancestor_slice.out
# The below tests check the lower level layers of the trace processor (i.e.
# fitering and printing code).
diff --git a/test/trace_processor/relationship_tables.textproto b/test/trace_processor/relationship_tables.textproto
new file mode 100644
index 0000000..084ce99
--- /dev/null
+++ b/test/trace_processor/relationship_tables.textproto
@@ -0,0 +1,308 @@
+# Sequence 1 defaults to track for "t1".
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 0
+ incremental_state_cleared: true
+ track_descriptor {
+ uuid: 1
+ parent_uuid: 10
+ thread {
+ pid: 5
+ tid: 1
+ thread_name: "t1"
+ }
+ }
+ trace_packet_defaults {
+ track_event_defaults {
+ track_uuid: 1
+ }
+ }
+}
+# Sequence 2 defaults to track for "t2".
+packet {
+ trusted_packet_sequence_id: 2
+ timestamp: 0
+ incremental_state_cleared: true
+ track_descriptor {
+ uuid: 2
+ parent_uuid: 10
+ thread {
+ pid: 5
+ tid: 2
+ thread_name: "t2"
+ }
+ }
+ trace_packet_defaults {
+ track_event_defaults {
+ track_uuid: 2
+ }
+ }
+}
+# Both thread tracks are nested underneath this process track.
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 0
+ track_descriptor {
+ uuid: 10
+ process {
+ pid: 5
+ process_name: "p1"
+ }
+ }
+}
+# And we have an async track underneath the process too.
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 0
+ track_descriptor {
+ uuid: 11
+ parent_uuid: 10
+ name: "async"
+ }
+}
+
+# ----------------------
+# Slices
+# ----------------------
+
+# First we create an event with no relationships just a single event on a track
+# by it self. For both threads and async track.
+#
+# t1 |----------|
+# ---------------------
+# t2 |-----|
+# --------------------
+# async |-------------|
+
+# Should appear on default track "t1".
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 1000
+ track_event {
+ categories: "cat"
+ name: "event1_on_t1_no_relationships"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 4000
+ track_event {
+ categories: "cat"
+ name: "event1_on_t1_no_relationships"
+ type: 2
+ }
+}
+# Should appear on default track "t2".
+packet {
+ trusted_packet_sequence_id: 2
+ timestamp: 2000
+ track_event {
+ categories: "cat"
+ name: "event1_on_t2_no_relationships"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 2
+ timestamp: 3000
+ track_event {
+ categories: "cat"
+ name: "event1_on_t2_no_relationships"
+ type: 2
+ }
+}
+# Should appear on async track.
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 1000
+ track_event {
+ track_uuid: 11
+ categories: "cat"
+ name: "event1_on_async_no_relationships"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 5000
+ track_event {
+ track_uuid: 11
+ categories: "cat"
+ name: "event1_on_async_no_relationships"
+ type: 2
+ }
+}
+
+# Next we create stacks of various depths for the different tracks.
+# t1 |--------|
+# |---|
+# ---------------------------------
+# t2 |-------------------|
+# |---| |-----|
+# | |-|
+# ---------------------------------
+# async |----------------------|
+# |-----------|
+
+# Should appear on default track "t1".
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 6000
+ track_event {
+ categories: "cat"
+ name: "event2_depth_0_on_t1"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 7000
+ track_event {
+ categories: "cat"
+ name: "event2_depth_1_on_t1"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 8000
+ track_event {
+ categories: "cat"
+ name: "event2_depth_1_on_t1"
+ type: 2
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 9000
+ track_event {
+ categories: "cat"
+ name: "event2_depth_0_on_t1"
+ type: 2
+ }
+}
+
+# Should appear on default track "t2".
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 10000
+ track_event {
+ categories: "cat"
+ name: "event2_depth_0_on_t2"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 11000
+ track_event {
+ categories: "cat"
+ name: "event2_first_depth_1_on_t2"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 12000
+ track_event {
+ categories: "cat"
+ name: "event2_first_depth_2_on_t2"
+ type: 3
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 13000
+ track_event {
+ categories: "cat"
+ name: "event2_first_depth_1_on_t2"
+ type: 2
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 15000
+ track_event {
+ categories: "cat"
+ name: "event2_second_depth_1_on_t2"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 16000
+ track_event {
+ categories: "cat"
+ name: "event2_second_depth_2_on_t2"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 17000
+ track_event {
+ categories: "cat"
+ name: "event2_second_depth_2_on_t2"
+ type: 2
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 18000
+ track_event {
+ categories: "cat"
+ name: "event2_second_depth_1_on_t2"
+ type: 2
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 19000
+ track_event {
+ categories: "cat"
+ name: "event2_depth_0_on_t2"
+ type: 2
+ }
+}
+# Should appear on async track.
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 6000
+ track_event {
+ track_uuid: 11
+ categories: "cat"
+ name: "event2_on_async_depth_0"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 9000
+ track_event {
+ track_uuid: 11
+ categories: "cat"
+ name: "event2_on_async_depth_1"
+ type: 1
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 13000
+ track_event {
+ track_uuid: 11
+ categories: "cat"
+ name: "event1_on_async_depth_1"
+ type: 2
+ }
+}
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 14000
+ track_event {
+ track_uuid: 11
+ categories: "cat"
+ name: "event2_on_async_depth_0"
+ type: 2
+ }
+}