Merge "ftrace: fix pointer stability of events_ vector"
diff --git a/.clang-tidy b/.clang-tidy
new file mode 100644
index 0000000..b7aebb1
--- /dev/null
+++ b/.clang-tidy
@@ -0,0 +1,4 @@
+Checks: android-cloexec-*,bugprone-*,google-explicit-constructor
+CheckOptions:
+  - key:             bugprone-assert-side-effect.AssertMacros
+    value:           'PERFETTO_DCHECK'
diff --git a/include/perfetto/ext/base/string_utils.h b/include/perfetto/ext/base/string_utils.h
index df2de4b..12fe495 100644
--- a/include/perfetto/ext/base/string_utils.h
+++ b/include/perfetto/ext/base/string_utils.h
@@ -112,6 +112,9 @@
   return ToHex(s.c_str(), s.size());
 }
 std::string IntToHexString(uint32_t number);
+std::string ReplaceAll(std::string str,
+                       const std::string& to_replace,
+                       const std::string& replacement);
 
 }  // namespace base
 }  // namespace perfetto
diff --git a/protos/perfetto/trace/android/BUILD.gn b/protos/perfetto/trace/android/BUILD.gn
index 99d7f0f..2bece63 100644
--- a/protos/perfetto/trace/android/BUILD.gn
+++ b/protos/perfetto/trace/android/BUILD.gn
@@ -19,8 +19,8 @@
 
   sources = [
     "android_log.proto",
-    "graphics_frame_event.proto",
     "gpu_mem_event.proto",
+    "graphics_frame_event.proto",
     "initial_display_state.proto",
     "packages_list.proto",
   ]
diff --git a/src/base/string_utils.cc b/src/base/string_utils.cc
index f1ee35e..dfd6716 100644
--- a/src/base/string_utils.cc
+++ b/src/base/string_utils.cc
@@ -149,5 +149,17 @@
   return res;
 }
 
+std::string ReplaceAll(std::string str,
+                       const std::string& to_replace,
+                       const std::string& replacement) {
+  PERFETTO_CHECK(!to_replace.empty());
+  size_t pos = 0;
+  while ((pos = str.find(to_replace, pos)) != std::string::npos) {
+    str.replace(pos, to_replace.length(), replacement);
+    pos += replacement.length();
+  }
+  return str;
+}
+
 }  // namespace base
 }  // namespace perfetto
diff --git a/src/base/string_utils_unittest.cc b/src/base/string_utils_unittest.cc
index f41c470..6fba1fd 100644
--- a/src/base/string_utils_unittest.cc
+++ b/src/base/string_utils_unittest.cc
@@ -248,6 +248,19 @@
   EXPECT_EQ(Find("abc", ""), std::string::npos);
 }
 
+TEST(StringUtilsTest, ReplaceAll) {
+  EXPECT_EQ(ReplaceAll("", "a", ""), "");
+  EXPECT_EQ(ReplaceAll("", "a", "b"), "");
+  EXPECT_EQ(ReplaceAll("a", "a", "b"), "b");
+  EXPECT_EQ(ReplaceAll("aaaa", "a", "b"), "bbbb");
+  EXPECT_EQ(ReplaceAll("aaaa", "aa", "b"), "bb");
+  EXPECT_EQ(ReplaceAll("aa", "aa", "bb"), "bb");
+  EXPECT_EQ(ReplaceAll("aa", "a", "bb"), "bbbb");
+  EXPECT_EQ(ReplaceAll("abc", "a", "b"), "bbc");
+  EXPECT_EQ(ReplaceAll("abc", "c", "b"), "abb");
+  EXPECT_EQ(ReplaceAll("abc", "c", "bbb"), "abbbb");
+}
+
 }  // namespace
 }  // namespace base
 }  // namespace perfetto
diff --git a/src/profiling/memory/client_ext.cc b/src/profiling/memory/client_ext.cc
index ce6cfbf..e724027 100644
--- a/src/profiling/memory/client_ext.cc
+++ b/src/profiling/memory/client_ext.cc
@@ -411,8 +411,8 @@
     if (sampled_alloc_sz == 0)  // not sampling
       return false;
 
-    client = *g_client_ptr;   // owning copy
-  }                           // unlock
+    client = *g_client_ptr;  // owning copy
+  }                          // unlock
 
   if (!client->RecordMalloc(heap.service_heap_id, sampled_alloc_sz, size, id)) {
     ShutdownLazy();
diff --git a/src/profiling/memory/heapprofd_producer.cc b/src/profiling/memory/heapprofd_producer.cc
index da4b3ea..8669388 100644
--- a/src/profiling/memory/heapprofd_producer.cc
+++ b/src/profiling/memory/heapprofd_producer.cc
@@ -163,7 +163,7 @@
       socket_delegate_(this),
       weak_factory_(this) {
   CheckDataSourceMemory();  // Kick off guardrail task.
-  stat_fd_.reset(open("/proc/self/stat", O_RDONLY));
+  stat_fd_.reset(open("/proc/self/stat", O_RDONLY | O_CLOEXEC));
   if (!stat_fd_) {
     PERFETTO_ELOG(
         "Failed to open /proc/self/stat. Cannot accept profiles "
diff --git a/src/profiling/memory/parse_smaps_unittest.cc b/src/profiling/memory/parse_smaps_unittest.cc
index 9ee4c82..5708a2c 100644
--- a/src/profiling/memory/parse_smaps_unittest.cc
+++ b/src/profiling/memory/parse_smaps_unittest.cc
@@ -38,7 +38,7 @@
 TEST(ParseSmapsTest, Smoke) {
   base::ScopedFstream fd(fopen(
       base::GetTestDataPath("src/profiling/memory/test/data/cat_smaps").c_str(),
-      "r"));
+      "re"));
   std::vector<SmapsEntry> entries;
   EXPECT_TRUE(ParseSmaps(
       *fd, [&entries](const SmapsEntry& e) { entries.emplace_back(e); }));
@@ -65,7 +65,7 @@
   base::ScopedFstream fd(fopen(
       base::GetTestDataPath("src/profiling/memory/test/data/cat_smaps_noeol")
           .c_str(),
-      "r"));
+      "re"));
   std::vector<SmapsEntry> entries;
   EXPECT_TRUE(ParseSmaps(
       *fd, [&entries](const SmapsEntry& e) { entries.emplace_back(e); }));
diff --git a/src/trace_processor/importers/proto/heap_profile_tracker.cc b/src/trace_processor/importers/proto/heap_profile_tracker.cc
index 14d99be..602a6bd 100644
--- a/src/trace_processor/importers/proto/heap_profile_tracker.cc
+++ b/src/trace_processor/importers/proto/heap_profile_tracker.cc
@@ -267,12 +267,18 @@
       static_cast<uint32_t>(alloc.pid));
 
   tables::HeapProfileAllocationTable::Row alloc_row{
-      alloc.timestamp, upid, callstack_id,
+      alloc.timestamp,
+      upid,
+      alloc.heap_name,
+      callstack_id,
       static_cast<int64_t>(alloc.alloc_count),
       static_cast<int64_t>(alloc.self_allocated)};
 
   tables::HeapProfileAllocationTable::Row free_row{
-      alloc.timestamp, upid, callstack_id,
+      alloc.timestamp,
+      upid,
+      alloc.heap_name,
+      callstack_id,
       -static_cast<int64_t>(alloc.free_count),
       -static_cast<int64_t>(alloc.self_freed)};
 
@@ -295,7 +301,8 @@
   tables::HeapProfileAllocationTable::Row& prev_free = prev_free_it->second;
 
   std::set<CallsiteId>& callstacks_for_source_callstack_id =
-      sequence_state.seen_callstacks[std::make_pair(upid, alloc.callstack_id)];
+      sequence_state.seen_callstacks[SourceAllocationIndex{
+          upid, alloc.callstack_id, alloc.heap_name}];
   bool new_callstack;
   std::tie(std::ignore, new_callstack) =
       callstacks_for_source_callstack_id.emplace(callstack_id);
diff --git a/src/trace_processor/importers/proto/heap_profile_tracker.h b/src/trace_processor/importers/proto/heap_profile_tracker.h
index 91cfe72..0f6e174 100644
--- a/src/trace_processor/importers/proto/heap_profile_tracker.h
+++ b/src/trace_processor/importers/proto/heap_profile_tracker.h
@@ -39,6 +39,7 @@
     // This is int64_t, because we get this from the TraceSorter which also
     // converts this for us.
     int64_t timestamp = 0;
+    StringPool::Id heap_name;
     StackProfileTracker::SourceCallstackId callstack_id = 0;
     uint64_t self_allocated = 0;
     uint64_t self_freed = 0;
@@ -75,7 +76,15 @@
       StackProfileTracker* stack_profile_tracker,
       const SourceAllocation&,
       const StackProfileTracker::InternLookup* intern_lookup = nullptr);
-
+  struct SourceAllocationIndex {
+    UniquePid upid;
+    StackProfileTracker::SourceCallstackId src_callstack_id;
+    StringPool::Id heap_name;
+    bool operator<(const SourceAllocationIndex& o) const {
+      return std::tie(upid, src_callstack_id, heap_name) <
+             std::tie(o.upid, o.src_callstack_id, o.heap_name);
+    }
+  };
   struct SequenceState {
     std::vector<SourceAllocation> pending_allocs;
 
@@ -96,9 +105,7 @@
     // one, and then handle it as normal. If it is the first time we see a
     // SourceCallstackId for a CallsiteId, we put the previous value into
     // the correction maps below.
-    std::map<std::pair<UniquePid, StackProfileTracker::SourceCallstackId>,
-             std::set<CallsiteId>>
-        seen_callstacks;
+    std::map<SourceAllocationIndex, std::set<CallsiteId>> seen_callstacks;
     std::map<StackProfileTracker::SourceCallstackId,
              tables::HeapProfileAllocationTable::Row>
         alloc_correction;
diff --git a/src/trace_processor/importers/proto/proto_trace_parser.cc b/src/trace_processor/importers/proto/proto_trace_parser.cc
index d11afc4..e821046 100644
--- a/src/trace_processor/importers/proto/proto_trace_parser.cc
+++ b/src/trace_processor/importers/proto/proto_trace_parser.cc
@@ -318,6 +318,12 @@
 
       HeapProfileTracker::SourceAllocation src_allocation;
       src_allocation.pid = entry.pid();
+      if (entry.heap_name().size != 0) {
+        src_allocation.heap_name =
+            context_->storage->InternString(entry.heap_name());
+      } else {
+        src_allocation.heap_name = context_->storage->InternString("malloc");
+      }
       src_allocation.timestamp = timestamp;
       src_allocation.callstack_id = sample.callstack_id();
       if (sample.self_max()) {
diff --git a/src/trace_processor/sqlite/span_join_operator_table.cc b/src/trace_processor/sqlite/span_join_operator_table.cc
index de98f94..1357ddf 100644
--- a/src/trace_processor/sqlite/span_join_operator_table.cc
+++ b/src/trace_processor/sqlite/span_join_operator_table.cc
@@ -53,6 +53,24 @@
   return base::nullopt;
 }
 
+inline std::string EscapedSqliteValueAsString(sqlite3_value* value) {
+  switch (sqlite3_value_type(value)) {
+    case SQLITE_INTEGER:
+      return std::to_string(sqlite3_value_int64(value));
+    case SQLITE_FLOAT:
+      return std::to_string(sqlite3_value_double(value));
+    case SQLITE_TEXT: {
+      // If str itself contains a single quote, we need to escape it with
+      // another single quote.
+      const char* str =
+          reinterpret_cast<const char*>(sqlite3_value_text(value));
+      return "'" + base::ReplaceAll(str, "'", "''") + "'";
+    }
+    default:
+      PERFETTO_FATAL("Unknown value type %d", sqlite3_value_type(value));
+  }
+}
+
 }  // namespace
 
 SpanJoinOperatorTable::SpanJoinOperatorTable(sqlite3* db, const TraceStorage*)
@@ -275,7 +293,7 @@
 
     auto op = sqlite_utils::OpToString(
         cs.op == kSourceGeqOpCode ? SQLITE_INDEX_CONSTRAINT_GE : cs.op);
-    auto value = sqlite_utils::SqliteValueAsString(argv[i]);
+    auto value = EscapedSqliteValueAsString(argv[i]);
 
     constraints.emplace_back("`" + col_name + "`" + op + value);
   }
diff --git a/src/trace_processor/sqlite/sqlite_utils.h b/src/trace_processor/sqlite/sqlite_utils.h
index 8e97c03..4896df7 100644
--- a/src/trace_processor/sqlite/sqlite_utils.h
+++ b/src/trace_processor/sqlite/sqlite_utils.h
@@ -380,22 +380,6 @@
   sqlite3_result_double(ctx, value);
 }
 
-inline std::string SqliteValueAsString(sqlite3_value* value) {
-  switch (sqlite3_value_type(value)) {
-    case SQLITE_INTEGER:
-      return std::to_string(sqlite3_value_int64(value));
-    case SQLITE_FLOAT:
-      return std::to_string(sqlite3_value_double(value));
-    case SQLITE_TEXT: {
-      const char* str =
-          reinterpret_cast<const char*>(sqlite3_value_text(value));
-      return "'" + std::string(str) + "'";
-    }
-    default:
-      PERFETTO_FATAL("Unknown value type %d", sqlite3_value_type(value));
-  }
-}
-
 inline std::vector<SqliteTable::Column> GetColumnsForTable(
     sqlite3* db,
     const std::string& raw_table_name) {
diff --git a/src/trace_processor/tables/profiler_tables.h b/src/trace_processor/tables/profiler_tables.h
index 4ce7ed9..3be12fa 100644
--- a/src/trace_processor/tables/profiler_tables.h
+++ b/src/trace_processor/tables/profiler_tables.h
@@ -183,6 +183,7 @@
   PERFETTO_TP_ROOT_TABLE(PARENT, C)                              \
   C(int64_t, ts, Column::Flag::kSorted)                          \
   C(uint32_t, upid)                                              \
+  C(StringPool::Id, heap_name)                                   \
   C(StackProfileCallsiteTable::Id, callsite_id)                  \
   C(int64_t, count)                                              \
   C(int64_t, size)
diff --git a/test/metrics/android_ion.out b/test/metrics/android_ion.out
index c7094fb..5116e55 100644
--- a/test/metrics/android_ion.out
+++ b/test/metrics/android_ion.out
@@ -8,7 +8,7 @@
   }
   buffer {
     name: "system"
-    avg_size_bytes: 1497.48743719
+    avg_size_bytes: 1497.4874371859296
     min_size_bytes: 1000.0
     max_size_bytes: 2000.0
     total_alloc_size_bytes: 2000.0
diff --git a/test/trace_processor/heap_profile_tracker_new_stack.out b/test/trace_processor/heap_profile_tracker_new_stack.out
index 205768d..9306208 100644
--- a/test/trace_processor/heap_profile_tracker_new_stack.out
+++ b/test/trace_processor/heap_profile_tracker_new_stack.out
@@ -1,5 +1,5 @@
-"id","type","ts","upid","callsite_id","count","size"
-0,"heap_profile_allocation",0,0,0,1,1
-1,"heap_profile_allocation",0,0,0,-1,-1
-2,"heap_profile_allocation",1,0,0,1,1
-3,"heap_profile_allocation",1,0,0,-1,-1
+"id","type","ts","upid","heap_name","callsite_id","count","size"
+0,"heap_profile_allocation",0,0,"malloc",0,1,1
+1,"heap_profile_allocation",0,0,"malloc",0,-1,-1
+2,"heap_profile_allocation",1,0,"malloc",0,1,1
+3,"heap_profile_allocation",1,0,"malloc",0,-1,-1
diff --git a/test/trace_processor/heap_profile_tracker_twoheaps.out b/test/trace_processor/heap_profile_tracker_twoheaps.out
new file mode 100644
index 0000000..cc78466
--- /dev/null
+++ b/test/trace_processor/heap_profile_tracker_twoheaps.out
@@ -0,0 +1,5 @@
+"id","type","ts","upid","heap_name","callsite_id","count","size"
+0,"heap_profile_allocation",0,0,"malloc",0,1,1
+1,"heap_profile_allocation",0,0,"malloc",0,-1,-1
+2,"heap_profile_allocation",0,0,"custom",0,1,1
+3,"heap_profile_allocation",0,0,"custom",0,-1,-1
diff --git a/test/trace_processor/heap_profile_tracker_twoheaps.sql b/test/trace_processor/heap_profile_tracker_twoheaps.sql
new file mode 100644
index 0000000..efed7da
--- /dev/null
+++ b/test/trace_processor/heap_profile_tracker_twoheaps.sql
@@ -0,0 +1 @@
+select * from heap_profile_allocation;
diff --git a/test/trace_processor/heap_profile_tracker_twoheaps.textproto b/test/trace_processor/heap_profile_tracker_twoheaps.textproto
new file mode 100644
index 0000000..b5a4aed
--- /dev/null
+++ b/test/trace_processor/heap_profile_tracker_twoheaps.textproto
@@ -0,0 +1,62 @@
+packet {
+  clock_snapshot {
+    clocks: {
+      clock_id: 6 # BOOTTIME
+      timestamp: 0
+    }
+    clocks: {
+      clock_id: 4 # MONOTONIC_COARSE
+      timestamp: 0
+    }
+  }
+}
+
+packet {
+  previous_packet_dropped: true
+  incremental_state_cleared: true
+  trusted_packet_sequence_id: 1
+  timestamp: 0
+  interned_data {
+    mappings {
+      iid: 1
+    }
+    frames {
+      iid: 1
+      mapping_id: 1
+      rel_pc: 0x123
+    }
+    callstacks {
+      iid: 1
+      frame_ids: 1
+    }
+  }
+}
+
+packet {
+  trusted_packet_sequence_id: 1
+  timestamp: 0
+  profile_packet {
+    index: 0
+    continued: false
+    process_dumps {
+      heap_name: "malloc"
+      samples {
+        callstack_id: 1
+        self_allocated: 1
+        alloc_count: 1
+        self_freed: 1
+        free_count: 1
+      }
+    }
+    process_dumps {
+      heap_name: "custom"
+      samples {
+        callstack_id: 1
+        self_allocated: 1
+        alloc_count: 1
+        self_freed: 1
+        free_count: 1
+      }
+    }
+  }
+}
diff --git a/test/trace_processor/index b/test/trace_processor/index
index e533516..2f2953b 100644
--- a/test/trace_processor/index
+++ b/test/trace_processor/index
@@ -180,6 +180,7 @@
 ../data/system-server-heap-graph.pftrace heap_graph_flamegraph.sql heap_graph_flamegraph_system-server-heap-graph.out
 ../data/system-server-native-profile heap_profile_flamegraph.sql heap_profile_flamegraph_system-server-native-profile.out
 heap_profile_tracker_new_stack.textproto heap_profile_tracker_new_stack.sql heap_profile_tracker_new_stack.out
+heap_profile_tracker_twoheaps.textproto heap_profile_tracker_twoheaps.sql heap_profile_tracker_twoheaps.out
 heap_graph_branching.textproto heap_graph_flamegraph_focused.sql heap_graph_flamegraph_focused.out
 
 stack_profile_tracker_empty_callstack.textproto stack_profile_tracker_empty_callstack.sql stack_profile_tracker_empty_callstack.out
diff --git a/tools/diff_test_trace_processor.py b/tools/diff_test_trace_processor.py
index ea1f5b5..9b98644 100755
--- a/tools/diff_test_trace_processor.py
+++ b/tools/diff_test_trace_processor.py
@@ -13,6 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
 import argparse
 import datetime
 import difflib
@@ -135,7 +139,7 @@
 
   if json_output:
     expected_text = expected
-    actual_text = stdout
+    actual_text = stdout.decode('utf8')
   else:
     # Expected will be in text proto format and we'll need to parse it to a real
     # proto.
@@ -152,7 +156,7 @@
     actual_text = text_format.MessageToString(actual_message)
 
   return TestResult('metric', metric, gen_trace_path, cmd, expected_text,
-                    actual_text, stderr, tp.returncode)
+                    actual_text, stderr.decode('utf8'), tp.returncode)
 
 
 def run_query_test(trace_processor_path, gen_trace_path, query_path,
@@ -171,8 +175,8 @@
 
   tp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   (stdout, stderr) = tp.communicate()
-  return TestResult('query', query_path, gen_trace_path, cmd, expected, stdout,
-                    stderr, tp.returncode)
+  return TestResult('query', query_path, gen_trace_path, cmd, expected,
+                    stdout.decode('utf8'), stderr.decode('utf8'), tp.returncode)
 
 
 def run_all_tests(trace_processor, trace_descriptor_path,
@@ -228,11 +232,11 @@
       else:
         assert False
 
-      perf_lines = tmp_perf_file.readlines()
+      perf_lines = [line.decode('utf8') for line in tmp_perf_file.readlines()]
 
     if gen_trace_file:
       if keep_input:
-        print "Saving generated input trace: ", gen_trace_path
+        print("Saving generated input trace: {}".format(gen_trace_path))
       else:
         gen_trace_file.close()
         os.remove(gen_trace_path)
diff --git a/ui/src/common/query_iterator.ts b/ui/src/common/query_iterator.ts
new file mode 100644
index 0000000..1e41c41
--- /dev/null
+++ b/ui/src/common/query_iterator.ts
@@ -0,0 +1,186 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {RawQueryResult} from './protos';
+
+// Union of all the query result formats that we can turn into forward
+// iterators.
+// TODO(hjd): Replace someOtherEncoding place holder with the real new
+// format.
+type QueryResult = RawQueryResult|{someOtherEncoding: string};
+
+// One row extracted from an SQL result:
+interface Row {
+  [key: string]: string|number|null;
+}
+
+// API:
+// const result = await engine.query("select 42 as n;");
+// const it = getRowIterator({"answer": NUM}, result);
+// for (; it.valid(); it.next()) {
+//   console.log(it.row.answer);
+// }
+export interface RowIterator<T extends Row> {
+  valid(): boolean;
+  next(): void;
+  row: T;
+}
+
+export const NUM = 0;
+export const STR = 'str';
+export const NUM_NULL: number|null = 1;
+export const STR_NULL: string|null = 'str_null';
+export type ColumnType =
+    (typeof NUM)|(typeof STR)|(typeof NUM_NULL)|(typeof STR_NULL);
+
+// Exported for testing
+export function findColumnIndex(
+    result: RawQueryResult, name: string, columnType: number|null|string):
+    number {
+  let matchingDescriptorIndex = -1;
+  const disallowNulls = columnType === STR || columnType === NUM;
+  const expectsStrings = columnType === STR || columnType === STR_NULL;
+  const expectsNumbers = columnType === NUM || columnType === NUM_NULL;
+
+  for (let i = 0; i < result.columnDescriptors.length; ++i) {
+    const descriptor = result.columnDescriptors[i];
+    const column = result.columns[i];
+    if (descriptor.name !== name) {
+      continue;
+    }
+
+    const hasDoubles = column.doubleValues && column.doubleValues.length;
+    const hasLongs = column.longValues && column.longValues.length;
+    const hasStrings = column.stringValues && column.stringValues.length;
+
+    if (matchingDescriptorIndex !== -1) {
+      throw new Error(`Multiple columns with the name ${name}`);
+    }
+
+    if (expectsStrings && (hasDoubles || hasLongs)) {
+      throw new Error(`Expected strings for column ${name} but found numbers`);
+    }
+
+    if (expectsNumbers && hasStrings) {
+      throw new Error(`Expected numbers for column ${name} but found strings`);
+    }
+
+    if (disallowNulls) {
+      for (let j = 0; j < result.numRecords; ++j) {
+        if (column.isNulls![j] === true) {
+          throw new Error(`Column ${name} contains nulls`);
+        }
+      }
+    }
+    matchingDescriptorIndex = i;
+  }
+
+  if (matchingDescriptorIndex === -1) {
+    throw new Error(`No column with name ${name} found in result.`);
+  }
+
+  return matchingDescriptorIndex;
+}
+
+class ColumnarRowIterator {
+  row: Row;
+  private i_: number;
+  private rowCount_: number;
+  private columnCount_: number;
+  private columnNames_: string[];
+  private columns_: Array<number[]|string[]>;
+  private nullColumns_: boolean[][];
+
+  constructor(querySpec: Row, queryResult: RawQueryResult) {
+    const row: Row = querySpec;
+    this.row = row;
+    this.i_ = 0;
+    this.rowCount_ = +queryResult.numRecords;
+    this.columnCount_ = 0;
+    this.columnNames_ = [];
+    this.columns_ = [];
+    this.nullColumns_ = [];
+
+    for (const [columnName, columnType] of Object.entries(querySpec)) {
+      const index = findColumnIndex(queryResult, columnName, columnType);
+      const column = queryResult.columns[index];
+      this.columnCount_++;
+      this.columnNames_.push(columnName);
+      let values: string[]|Array<number|Long> = [];
+      if (column.longValues && column.longValues.length > 0) {
+        values = column.longValues;
+      }
+      if (column.doubleValues && column.doubleValues.length > 0) {
+        values = column.doubleValues;
+      }
+      if (column.stringValues && column.stringValues.length > 0) {
+        values = column.stringValues;
+      }
+      this.columns_.push(values as string[]);
+      this.nullColumns_.push(column.isNulls!);
+    }
+    if (this.rowCount_ > 0) {
+      for (let j = 0; j < this.columnCount_; ++j) {
+        const name = this.columnNames_[j];
+        const isNull = this.nullColumns_[j][this.i_];
+        this.row[name] = isNull ? null : this.columns_[j][this.i_];
+      }
+    }
+  }
+
+  valid(): boolean {
+    return this.i_ < this.rowCount_;
+  }
+
+  next(): void {
+    this.i_++;
+    for (let j = 0; j < this.columnCount_; ++j) {
+      const name = this.columnNames_[j];
+      const isNull = this.nullColumns_[j][this.i_];
+      this.row[name] = isNull ? null : this.columns_[j][this.i_];
+    }
+  }
+}
+
+// Deliberately not exported, use iter() below to make code easy to switch
+// to other queryResult formats.
+function iterFromColumns<T extends Row>(
+    querySpec: T, queryResult: RawQueryResult): RowIterator<T> {
+  const iter = new ColumnarRowIterator(querySpec, queryResult);
+  return iter as unknown as RowIterator<T>;
+}
+
+
+function isColumnarQueryResult(result: QueryResult): result is RawQueryResult {
+  return (result as RawQueryResult).columnDescriptors !== undefined;
+}
+
+export function iter<T extends Row>(
+    spec: T, result: QueryResult): RowIterator<T> {
+  if (isColumnarQueryResult(result)) {
+    return iterFromColumns(spec, result);
+  } else {
+    throw new Error('Unsuported format');
+  }
+}
+
+export function slowlyCountRows(result: QueryResult): number {
+  if (isColumnarQueryResult(result)) {
+    // This isn't actually slow for columnar data but it might be for other
+    // formats.
+    return +result.numRecords;
+  } else {
+    throw new Error('Unsuported format');
+  }
+}
diff --git a/ui/src/common/query_iterator_unittest.ts b/ui/src/common/query_iterator_unittest.ts
new file mode 100644
index 0000000..d6eca47
--- /dev/null
+++ b/ui/src/common/query_iterator_unittest.ts
@@ -0,0 +1,173 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {RawQueryResult} from './protos';
+import {
+  findColumnIndex,
+  iter,
+  NUM,
+  NUM_NULL,
+  slowlyCountRows,
+  STR,
+  STR_NULL
+} from './query_iterator';
+
+const COLUMN_TYPE_STR = RawQueryResult.ColumnDesc.Type.STRING;
+const COLUMN_TYPE_DOUBLE = RawQueryResult.ColumnDesc.Type.DOUBLE;
+const COLUMN_TYPE_LONG = RawQueryResult.ColumnDesc.Type.LONG;
+
+test('Columnar iteration slowlyCountRows', () => {
+  const r = new RawQueryResult({
+    columnDescriptors: [{
+      name: 'string_column',
+      type: COLUMN_TYPE_STR,
+    }],
+    numRecords: 1,
+    columns: [{
+      stringValues: ['foo'],
+      isNulls: [false],
+    }],
+  });
+
+  expect(slowlyCountRows(r)).toBe(1);
+});
+
+test('Columnar iteration findColumnIndex', () => {
+  const r = new RawQueryResult({
+    columnDescriptors: [
+      {
+        name: 'strings',
+        type: COLUMN_TYPE_STR,
+      },
+      {
+        name: 'doubles',
+        type: COLUMN_TYPE_DOUBLE,
+      },
+      {
+        name: 'longs',
+        type: COLUMN_TYPE_LONG,
+      },
+      {
+        name: 'nullable_strings',
+        type: COLUMN_TYPE_STR,
+      },
+      {
+        name: 'nullable_doubles',
+        type: COLUMN_TYPE_DOUBLE,
+      },
+      {
+        name: 'nullable_longs',
+        type: COLUMN_TYPE_LONG,
+      },
+      {
+        name: 'twin',
+        type: COLUMN_TYPE_LONG,
+      },
+      {
+        name: 'twin',
+        type: COLUMN_TYPE_STR,
+      }
+    ],
+    numRecords: 1,
+    columns: [
+      {
+        stringValues: ['foo'],
+        isNulls: [false],
+      },
+      {
+        doubleValues: [1],
+        isNulls: [false],
+      },
+      {
+        longValues: [1],
+        isNulls: [false],
+      },
+      {
+        stringValues: [''],
+        isNulls: [true],
+      },
+      {
+        doubleValues: [0],
+        isNulls: [true],
+      },
+      {
+        longValues: [0],
+        isNulls: [true],
+      },
+      {
+        doubleValues: [0],
+        isNulls: [false],
+      },
+      {
+        stringValues: [''],
+        isNulls: [false],
+      }
+    ],
+  });
+
+  expect(findColumnIndex(r, 'strings', STR)).toBe(0);
+  expect(findColumnIndex(r, 'doubles', NUM)).toBe(1);
+  expect(findColumnIndex(r, 'longs', NUM)).toBe(2);
+
+  expect(findColumnIndex(r, 'nullable_strings', STR_NULL)).toBe(3);
+  expect(findColumnIndex(r, 'nullable_doubles', NUM_NULL)).toBe(4);
+  expect(findColumnIndex(r, 'nullable_longs', NUM_NULL)).toBe(5);
+
+  expect(() => findColumnIndex(r, 'no such col', NUM)).toThrow(Error);
+
+  // It's allowable to expect nulls but for the whole column to be non-null...
+  expect(findColumnIndex(r, 'strings', STR_NULL)).toBe(0);
+  expect(findColumnIndex(r, 'doubles', NUM_NULL)).toBe(1);
+  expect(findColumnIndex(r, 'longs', NUM_NULL)).toBe(2);
+
+  // ...but if we expect no-nulls there shouldn't be even one:
+  expect(() => findColumnIndex(r, 'nullable_strings', STR)).toThrow(Error);
+  expect(() => findColumnIndex(r, 'nullable_doubles', NUM)).toThrow(Error);
+  expect(() => findColumnIndex(r, 'nullable_longs', NUM)).toThrow(Error);
+
+  // If multiple columns have the desired name we error even if we could
+  // distinguish based on the type:
+  expect(() => findColumnIndex(r, 'twin', NUM)).toThrow(Error);
+
+  expect(() => findColumnIndex(r, 'strings', NUM)).toThrow(Error);
+  expect(() => findColumnIndex(r, 'longs', STR)).toThrow(Error);
+  expect(() => findColumnIndex(r, 'doubles', STR)).toThrow(Error);
+});
+
+test('Columnar iteration over two rows', () => {
+  const r = new RawQueryResult({
+    columnDescriptors: [{
+      name: 'name',
+      type: COLUMN_TYPE_STR,
+    }],
+    numRecords: 2,
+    columns: [{
+      stringValues: ['Alice', 'Bob'],
+      isNulls: [false, false],
+    }],
+  });
+
+  const it = iter({'name': STR}, r);
+
+  expect(it.valid()).toBe(true);
+  const name: string = it.row.name;
+  expect(name).toBe('Alice');
+  it.next();
+
+  expect(it.valid()).toBe(true);
+  expect(it.row.name).toBe('Bob');
+  it.next();
+
+  expect(it.valid()).toBe(false);
+});
diff --git a/ui/src/tracks/async_slices/controller.ts b/ui/src/tracks/async_slices/controller.ts
index 57694bc..534c19e 100644
--- a/ui/src/tracks/async_slices/controller.ts
+++ b/ui/src/tracks/async_slices/controller.ts
@@ -13,7 +13,6 @@
 // limitations under the License.
 
 import {fromNs, toNs} from '../../common/time';
-import {LIMIT} from '../../common/track_data';
 import {
   TrackController,
   trackControllerRegistry,
@@ -21,81 +20,54 @@
 
 import {ASYNC_SLICE_TRACK_KIND, Config, Data} from './common';
 
+
+// Allow to override via devtools for testing (note, needs to be done in the
+// controller-thread).
+(self as {} as {quantPx: number}).quantPx = 1;
+
 class AsyncSliceTrackController extends TrackController<Config, Data> {
   static readonly kind = ASYNC_SLICE_TRACK_KIND;
-  private setup = false;
+  private maxDurNs = 0;
 
   async onBoundsChange(start: number, end: number, resolution: number):
       Promise<Data> {
     const startNs = toNs(start);
     const endNs = toNs(end);
-    // Ns in 1px width. We want all slices smaller than 1px to be grouped.
-    const minNs = toNs(resolution);
 
-    if (!this.setup) {
-      await this.query(
-          `create virtual table ${this.tableName('window')} using window;`);
+    const pxSize = (self as {} as {quantPx: number}).quantPx;
 
-      await this.query(
-          `create view ${this.tableName('small')} as ` +
-          `select ts,dur,layout_depth,name,id from experimental_slice_layout ` +
-          `where filter_track_ids = "${this.config.trackIds.join(',')}" ` +
-          `and dur < ${minNs} ` +
-          `order by ts;`);
+    // ns per quantization bucket (i.e. ns per pixel). /2 * 2 is to force it to
+    // be an even number, so we can snap in the middle.
+    const bucketNs = Math.round(resolution * 1e9 * pxSize / 2) * 2;
 
-      await this.query(`create virtual table ${this.tableName('span')} using
-      span_join(${this.tableName('small')} PARTITIONED layout_depth,
-      ${this.tableName('window')});`);
-
-      this.setup = true;
+    if (this.maxDurNs === 0) {
+      const maxDurResult = await this.query(`
+        select max(dur)
+        from experimental_slice_layout
+        where filter_track_ids = '${this.config.trackIds.join(',')}'
+      `);
+      if (maxDurResult.numRecords === 1) {
+        this.maxDurNs = +maxDurResult.columns![0].longValues![0];
+      }
     }
 
-    const windowDurNs = Math.max(1, endNs - startNs);
-
-    this.query(`update ${this.tableName('window')} set
-    window_start=${startNs},
-    window_dur=${windowDurNs},
-    quantum=${minNs}`);
-
-    await this.query(`drop view if exists ${this.tableName('small')}`);
-    await this.query(`drop view if exists ${this.tableName('big')}`);
-    await this.query(`drop view if exists ${this.tableName('summary')}`);
-
-    await this.query(
-        `create view ${this.tableName('small')} as ` +
-        `select ts,dur,layout_depth,name,id from experimental_slice_layout ` +
-        `where filter_track_ids = "${this.config.trackIds.join(',')}" ` +
-        `and dur < ${minNs} ` +
-        `order by ts `);
-
-    await this.query(
-        `create view ${this.tableName('big')} as ` +
-        `select ts,dur,layout_depth,name,id from experimental_slice_layout ` +
-        `where filter_track_ids = "${this.config.trackIds.join(',')}" ` +
-        `and ts >= ${startNs} - dur ` +
-        `and ts <= ${endNs} ` +
-        `and dur >= ${minNs} ` +
-        `order by ts `);
-
-    // So that busy slices never overlap, we use the start of the bucket
-    // as the ts, even though min(ts) would technically be more accurate.
-    await this.query(`create view ${this.tableName('summary')} as select
-      (quantum_ts * ${minNs} + ${startNs}) as ts,
-      ${minNs} as dur,
-      layout_depth,
-      'Busy' as name,
-      -1 as id
-      from ${this.tableName('span')}
-      group by layout_depth, quantum_ts
-      order by ts;`);
-
-    const query = `select * from ${this.tableName('summary')} UNION ` +
-        `select * from ${this.tableName('big')} order by ts limit ${LIMIT}`;
-
-    const rawResult = await this.query(query);
+    const rawResult = await this.query(`
+      SELECT
+        (ts + ${bucketNs / 2}) / ${bucketNs} * ${bucketNs} as tsq,
+        ts,
+        max(dur) as dur,
+        layout_depth,
+        name,
+        id
+      from experimental_slice_layout
+      where
+        filter_track_ids = '${this.config.trackIds.join(',')}' and
+        ts >= ${startNs - this.maxDurNs} and
+        ts <= ${endNs}
+      group by tsq
+    `);
 
     const numRows = +rawResult.numRecords;
-
     const slices: Data = {
       start,
       end,
@@ -119,14 +91,25 @@
       return idx;
     }
 
+    const cols = rawResult.columns;
     for (let row = 0; row < numRows; row++) {
-      const cols = rawResult.columns;
-      const startSec = fromNs(+cols[0].longValues![row]);
-      slices.starts[row] = startSec;
-      slices.ends[row] = startSec + fromNs(+cols[1].longValues![row]);
-      slices.depths[row] = +cols[2].longValues![row];
-      slices.titles[row] = internString(cols[3].stringValues![row]);
-      slices.sliceIds[row] = +cols[4].longValues![row];
+      const startNsQ = +cols[0].longValues![row];
+      const startNs = +cols[1].longValues![row];
+      const durNs = +cols[2].longValues![row];
+      const endNs = startNs + durNs;
+
+      let endNsQ = Math.floor((endNs + bucketNs / 2 - 1) / bucketNs) * bucketNs;
+      endNsQ = Math.max(endNsQ, startNsQ + bucketNs);
+
+      if (startNsQ === endNsQ) {
+        throw new Error('Should never happen');
+      }
+
+      slices.starts[row] = fromNs(startNsQ);
+      slices.ends[row] = fromNs(endNsQ);
+      slices.depths[row] = +cols[3].longValues![row];
+      slices.titles[row] = internString(cols[4].stringValues![row]);
+      slices.sliceIds[row] = +cols[5].longValues![row];
     }
     return slices;
   }
diff --git a/ui/src/tracks/counter/controller.ts b/ui/src/tracks/counter/controller.ts
index 1452d00..0b534a9 100644
--- a/ui/src/tracks/counter/controller.ts
+++ b/ui/src/tracks/counter/controller.ts
@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+import {iter, NUM, slowlyCountRows} from '../../common/query_iterator';
 import {fromNs, toNs} from '../../common/time';
-
 import {
   TrackController,
   trackControllerRegistry
@@ -91,8 +91,7 @@
     const rawResult = await this.query(`
       select
         (ts + ${bucketNs / 2}) / ${bucketNs} * ${bucketNs} as tsq,
-        ts,
-        max(dur) as dur,
+        max(value),
         id,
         value
       from ${this.tableName('counter_view')}
@@ -100,7 +99,8 @@
       group by tsq
     `);
 
-    const numRows = +rawResult.numRecords;
+    const numRows = slowlyCountRows(rawResult);
+
     const data: Data = {
       start,
       end,
@@ -113,26 +113,11 @@
       values: new Float64Array(numRows),
     };
 
-    const cols = rawResult.columns;
-    for (let row = 0; row < numRows; row++) {
-      const startNsQ = +cols[0].longValues![row];
-      const startNs = +cols[1].longValues![row];
-      const durNs = +cols[2].longValues![row];
-      const endNs = startNs + durNs;
-
-      let endNsQ = Math.floor((endNs + bucketNs / 2 - 1) / bucketNs) * bucketNs;
-      endNsQ = Math.max(endNsQ, startNsQ + bucketNs);
-
-      if (startNsQ === endNsQ) {
-        throw new Error('Should never happen');
-      }
-
-      const id = +cols[3].longValues![row];
-      const value = +cols[4].doubleValues![row];
-
-      data.timestamps[row] = fromNs(startNsQ);
-      data.ids[row] = id;
-      data.values[row] = value;
+    const it = iter({'tsq': NUM, 'id': NUM, 'value': NUM}, rawResult);
+    for (let i = 0; it.valid(); ++i, it.next()) {
+      data.timestamps[i] = fromNs(it.row.tsq);
+      data.ids[i] = it.row.id;
+      data.values[i] = it.row.value;
     }
 
     return data;