Merge "trace_processor: Flatten CPU freq storage and remove cycles"
diff --git a/src/trace_processor/counters_table.cc b/src/trace_processor/counters_table.cc
index 86cf70c..eec57f9 100644
--- a/src/trace_processor/counters_table.cc
+++ b/src/trace_processor/counters_table.cc
@@ -52,54 +52,59 @@
   return std::unique_ptr<Table::Cursor>(new Cursor(storage_));
 }
 
-int CountersTable::BestIndex(const QueryConstraints& qc, BestIndexInfo* info) {
-  info->estimated_cost = 10;
-
-  // If the query has a constraint on the |kRef| field, return a reduced cost
-  // because we can do that filter efficiently.
-  const auto& constraints = qc.constraints();
-  if (constraints.size() == 1 && constraints.front().iColumn == Column::kRef) {
-    info->estimated_cost = IsOpEq(constraints.front().op) ? 1 : 10;
-  }
-
+int CountersTable::BestIndex(const QueryConstraints&, BestIndexInfo* info) {
+  // TODO(taylori): Work out cost dependant on constraints.
+  info->estimated_cost =
+      static_cast<uint32_t>(storage_->counters().counter_count());
   return SQLITE_OK;
 }
 
-CountersTable::Cursor::Cursor(const TraceStorage* storage)
-    : storage_(storage) {}
+CountersTable::Cursor::Cursor(const TraceStorage* storage) : storage_(storage) {
+  num_rows_ = storage->counters().counter_count();
+}
 
 int CountersTable::Cursor::Column(sqlite3_context* context, int N) {
   switch (N) {
     case Column::kTimestamp: {
-      const auto& freq = storage_->GetFreqForCpu(current_cpu_);
-      sqlite3_result_int64(context,
-                           static_cast<int64_t>(freq[index_in_cpu_].first));
+      sqlite3_result_int64(
+          context,
+          static_cast<int64_t>(storage_->counters().timestamps()[row_]));
       break;
     }
     case Column::kValue: {
-      const auto& freq = storage_->GetFreqForCpu(current_cpu_);
-      sqlite3_result_int64(context, freq[index_in_cpu_].second);
+      sqlite3_result_int64(
+          context, static_cast<int64_t>(storage_->counters().values()[row_]));
       break;
     }
     case Column::kName: {
-      sqlite3_result_text(context, "cpufreq", -1, nullptr);
+      sqlite3_result_text(
+          context,
+          storage_->GetString(storage_->counters().name_ids()[row_]).c_str(),
+          -1, nullptr);
       break;
     }
     case Column::kRef: {
-      sqlite3_result_int64(context, current_cpu_);
+      sqlite3_result_int64(
+          context, static_cast<int64_t>(storage_->counters().refs()[row_]));
       break;
     }
     case Column::kRefType: {
-      sqlite3_result_text(context, "cpu", -1, nullptr);
+      switch (storage_->counters().types()[row_]) {
+        case RefType::kCPU_ID: {
+          sqlite3_result_text(context, "cpu", -1, nullptr);
+          break;
+        }
+        case RefType::kUPID: {
+          sqlite3_result_text(context, "upid", -1, nullptr);
+          break;
+        }
+      }
       break;
     }
     case Column::kDuration: {
-      const auto& freq = storage_->GetFreqForCpu(current_cpu_);
-      uint64_t duration = 0;
-      if (index_in_cpu_ + 1 < freq.size()) {
-        duration = freq[index_in_cpu_ + 1].first - freq[index_in_cpu_].first;
-      }
-      sqlite3_result_int64(context, static_cast<int64_t>(duration));
+      sqlite3_result_int64(
+          context,
+          static_cast<int64_t>(storage_->counters().durations()[row_]));
       break;
     }
     default:
@@ -109,47 +114,17 @@
   return SQLITE_OK;
 }
 
-int CountersTable::Cursor::Filter(const QueryConstraints& qc,
-                                  sqlite3_value** argv) {
-  for (size_t j = 0; j < qc.constraints().size(); j++) {
-    const auto& cs = qc.constraints()[j];
-    if (cs.iColumn == Column::kRef) {
-      auto constraint_cpu = static_cast<uint32_t>(sqlite3_value_int(argv[j]));
-      if (IsOpEq(cs.op)) {
-        filter_by_cpu_ = true;
-        filter_cpu_ = constraint_cpu;
-      }
-    }
-  }
-
+int CountersTable::Cursor::Filter(const QueryConstraints&, sqlite3_value**) {
   return SQLITE_OK;
 }
 
 int CountersTable::Cursor::Next() {
-  if (filter_by_cpu_) {
-    current_cpu_ = filter_cpu_;
-    ++index_in_cpu_;
-  } else {
-    if (index_in_cpu_ < storage_->GetFreqForCpu(current_cpu_).size() - 1) {
-      index_in_cpu_++;
-    } else if (current_cpu_ < storage_->GetMaxCpu()) {
-      ++current_cpu_;
-      index_in_cpu_ = 0;
-    }
-    // If the cpu is has no freq events, move to the next one.
-    while (current_cpu_ != storage_->GetMaxCpu() &&
-           storage_->GetFreqForCpu(current_cpu_).size() == 0) {
-      ++current_cpu_;
-    }
-  }
+  row_++;
   return SQLITE_OK;
 }
 
 int CountersTable::Cursor::Eof() {
-  if (filter_by_cpu_) {
-    return index_in_cpu_ == storage_->GetFreqForCpu(current_cpu_).size();
-  }
-  return current_cpu_ == storage_->GetMaxCpu();
+  return row_ >= num_rows_;
 }
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/counters_table.h b/src/trace_processor/counters_table.h
index a28f366..9e257e3 100644
--- a/src/trace_processor/counters_table.h
+++ b/src/trace_processor/counters_table.h
@@ -58,10 +58,8 @@
     int Column(sqlite3_context*, int N) override;
 
    private:
-    bool filter_by_cpu_ = false;
-    uint32_t current_cpu_ = 0;
-    size_t index_in_cpu_ = 0;
-    uint32_t filter_cpu_ = 0;
+    size_t num_rows_;
+    size_t row_ = 0;
 
     const TraceStorage* const storage_;
   };
diff --git a/src/trace_processor/counters_table_unittest.cc b/src/trace_processor/counters_table_unittest.cc
index 4ec5792..0ff7cde 100644
--- a/src/trace_processor/counters_table_unittest.cc
+++ b/src/trace_processor/counters_table_unittest.cc
@@ -15,6 +15,7 @@
  */
 
 #include "src/trace_processor/counters_table.h"
+#include "src/trace_processor/sched_tracker.h"
 #include "src/trace_processor/scoped_db.h"
 #include "src/trace_processor/trace_processor_context.h"
 
@@ -33,6 +34,7 @@
     db_.reset(db);
 
     context_.storage.reset(new TraceStorage());
+    context_.sched_tracker.reset(new SchedTracker(&context_));
 
     CountersTable::RegisterTable(db_.get(), context_.storage.get());
   }
@@ -60,20 +62,23 @@
 TEST_F(CountersTableUnittest, SelectWhereCpu) {
   uint64_t timestamp = 1000;
   uint32_t freq = 3000;
-  context_.storage->PushCpuFreq(timestamp, 1 /* cpu */, freq);
-  context_.storage->PushCpuFreq(timestamp + 1, 1 /* cpu */, freq + 1000);
-  context_.storage->PushCpuFreq(timestamp + 2, 2 /* cpu */, freq + 2000);
+  context_.storage->mutable_counters()->AddCounter(
+      timestamp, 0, 1, freq, 1 /* cpu */, RefType::kCPU_ID);
+  context_.storage->mutable_counters()->AddCounter(
+      timestamp + 1, 1, 1, freq + 1000, 1 /* cpu */, RefType::kCPU_ID);
+  context_.storage->mutable_counters()->AddCounter(
+      timestamp + 2, 1, 1, freq + 2000, 2 /* cpu */, RefType::kCPU_ID);
 
   PrepareValidStatement("SELECT ts, dur, value FROM counters where ref = 1");
 
   ASSERT_EQ(sqlite3_step(*stmt_), SQLITE_ROW);
   ASSERT_EQ(sqlite3_column_int(*stmt_, 0), timestamp);
-  ASSERT_EQ(sqlite3_column_int(*stmt_, 1), 1);
+  ASSERT_EQ(sqlite3_column_int(*stmt_, 1), 0);
   ASSERT_EQ(sqlite3_column_int(*stmt_, 2), freq);
 
   ASSERT_EQ(sqlite3_step(*stmt_), SQLITE_ROW);
   ASSERT_EQ(sqlite3_column_int(*stmt_, 0), timestamp + 1);
-  ASSERT_EQ(sqlite3_column_int(*stmt_, 1), 0);
+  ASSERT_EQ(sqlite3_column_int(*stmt_, 1), 1);
   ASSERT_EQ(sqlite3_column_int(*stmt_, 2), freq + 1000);
 
   ASSERT_EQ(sqlite3_step(*stmt_), SQLITE_DONE);
@@ -82,9 +87,12 @@
 TEST_F(CountersTableUnittest, GroupByFreq) {
   uint64_t timestamp = 1000;
   uint32_t freq = 3000;
-  context_.storage->PushCpuFreq(timestamp, 1 /* cpu */, freq);
-  context_.storage->PushCpuFreq(timestamp + 1, 1 /* cpu */, freq + 1000);
-  context_.storage->PushCpuFreq(timestamp + 3, 1 /* cpu */, freq);
+  context_.storage->mutable_counters()->AddCounter(
+      timestamp, 1, 1, freq, 1 /* cpu */, RefType::kCPU_ID);
+  context_.storage->mutable_counters()->AddCounter(
+      timestamp + 1, 2, 1, freq + 1000, 1 /* cpu */, RefType::kCPU_ID);
+  context_.storage->mutable_counters()->AddCounter(
+      timestamp + 3, 0, 1, freq, 1 /* cpu */, RefType::kCPU_ID);
 
   PrepareValidStatement(
       "SELECT value, sum(dur) as dur_sum FROM counters where value > 0 group "
diff --git a/src/trace_processor/proto_trace_parser.cc b/src/trace_processor/proto_trace_parser.cc
index 8cf1101..78ee7ad 100644
--- a/src/trace_processor/proto_trace_parser.cc
+++ b/src/trace_processor/proto_trace_parser.cc
@@ -84,7 +84,8 @@
 using protozero::proto_utils::kFieldTypeLengthDelimited;
 
 ProtoTraceParser::ProtoTraceParser(TraceProcessorContext* context)
-    : context_(context) {}
+    : context_(context),
+      cpu_freq_name_id_(context->storage->InternString("cpufreq")) {}
 
 ProtoTraceParser::~ProtoTraceParser() = default;
 
@@ -204,20 +205,20 @@
 void ProtoTraceParser::ParseCpuFreq(uint64_t timestamp, TraceBlobView view) {
   ProtoDecoder decoder(view.data(), view.length());
 
-  uint32_t cpu = 0;
+  uint32_t cpu_affected = 0;
   uint32_t new_freq = 0;
   for (auto fld = decoder.ReadField(); fld.id != 0; fld = decoder.ReadField()) {
     switch (fld.id) {
       case protos::CpuFrequencyFtraceEvent::kCpuIdFieldNumber:
-        cpu = fld.as_uint32();
+        cpu_affected = fld.as_uint32();
         break;
       case protos::CpuFrequencyFtraceEvent::kStateFieldNumber:
         new_freq = fld.as_uint32();
         break;
     }
   }
-
-  context_->storage->PushCpuFreq(timestamp, cpu, new_freq);
+  context_->sched_tracker->PushCounter(timestamp, new_freq, cpu_freq_name_id_,
+                                       cpu_affected, RefType::kCPU_ID);
 
   PERFETTO_DCHECK(decoder.IsEndOfBuffer());
 }
diff --git a/src/trace_processor/proto_trace_parser.h b/src/trace_processor/proto_trace_parser.h
index be3bc40..bccf534 100644
--- a/src/trace_processor/proto_trace_parser.h
+++ b/src/trace_processor/proto_trace_parser.h
@@ -67,6 +67,7 @@
 
  private:
   TraceProcessorContext* context_;
+  const StringId cpu_freq_name_id_;
 };
 
 }  // namespace trace_processor
diff --git a/src/trace_processor/proto_trace_parser_unittest.cc b/src/trace_processor/proto_trace_parser_unittest.cc
index 68efcdd..a2c1576 100644
--- a/src/trace_processor/proto_trace_parser_unittest.cc
+++ b/src/trace_processor/proto_trace_parser_unittest.cc
@@ -49,6 +49,13 @@
                     uint32_t prev_state,
                     base::StringView prev_comm,
                     uint32_t next_pid));
+
+  MOCK_METHOD5(PushCounter,
+               void(uint64_t timestamp,
+                    double value,
+                    StringId name_id,
+                    uint64_t ref,
+                    RefType ref_type));
 };
 
 class MockProcessTracker : public ProcessTracker {
@@ -66,8 +73,7 @@
  public:
   MockTraceStorage() : TraceStorage() {}
 
-  MOCK_METHOD3(PushCpuFreq,
-               void(uint64_t timestamp, uint32_t cpu, uint32_t new_freq));
+  MOCK_METHOD1(InternString, StringId(base::StringView));
 };
 
 class ProtoTraceParserTest : public ::testing::Test {
@@ -236,7 +242,7 @@
   cpu_freq->set_cpu_id(10);
   cpu_freq->set_state(2000);
 
-  EXPECT_CALL(*storage_, PushCpuFreq(1000, 10, 2000));
+  EXPECT_CALL(*sched_, PushCounter(1000, 2000, 0, 10, RefType::kCPU_ID));
   Tokenize(trace_1);
 }
 
diff --git a/src/trace_processor/sched_slice_table.cc b/src/trace_processor/sched_slice_table.cc
index 352d244..dfdde24 100644
--- a/src/trace_processor/sched_slice_table.cc
+++ b/src/trace_processor/sched_slice_table.cc
@@ -103,7 +103,6 @@
          "dur UNSIGNED BIG INT, "
          "quantized_group UNSIGNED BIG INT, "
          "utid UNSIGNED INT, "
-         "cycles UNSIGNED BIG INT, "
          "quantum HIDDEN BIG INT, "
          "ts_lower_bound HIDDEN BIG INT, "
          "ts_clip HIDDEN BOOLEAN, "
@@ -275,11 +274,6 @@
       sqlite3_result_int64(context, slices.utids()[row]);
       break;
     }
-    case Column::kCycles: {
-      sqlite3_result_int64(context,
-                           static_cast<sqlite3_int64>(slices.cycles()[row]));
-      break;
-    }
   }
   return SQLITE_OK;
 }
@@ -463,8 +457,6 @@
       return Compare(f_cpu, s_cpu, ob.desc);
     case SchedSliceTable::Column::kUtid:
       return Compare(f_sl.utids()[f_idx], s_sl.utids()[s_idx], ob.desc);
-    case SchedSliceTable::Column::kCycles:
-      return Compare(f_sl.cycles()[f_idx], s_sl.cycles()[s_idx], ob.desc);
     case SchedSliceTable::Column::kQuantizedGroup: {
       // We don't support sorting in descending order on quantized group when
       // we have a non-zero quantum.
diff --git a/src/trace_processor/sched_slice_table.h b/src/trace_processor/sched_slice_table.h
index 1cfca45..4e86336 100644
--- a/src/trace_processor/sched_slice_table.h
+++ b/src/trace_processor/sched_slice_table.h
@@ -39,12 +39,11 @@
     kDuration = 2,
     kQuantizedGroup = 3,
     kUtid = 4,
-    kCycles = 5,
 
     // Hidden columns.
-    kQuantum = 6,
-    kTimestampLowerBound = 7,
-    kClipTimestamp = 8,
+    kQuantum = 5,
+    kTimestampLowerBound = 6,
+    kClipTimestamp = 7,
   };
 
   SchedSliceTable(sqlite3*, const TraceStorage* storage);
diff --git a/src/trace_processor/sched_slice_table_unittest.cc b/src/trace_processor/sched_slice_table_unittest.cc
index fbc84ff..31c215f 100644
--- a/src/trace_processor/sched_slice_table_unittest.cc
+++ b/src/trace_processor/sched_slice_table_unittest.cc
@@ -366,42 +366,6 @@
               ElementsAre(71));
 }
 
-TEST_F(SchedSliceTableTest, CyclesOrdering) {
-  uint32_t cpu = 3;
-  uint64_t timestamp = 100;
-  uint32_t pid_1 = 2;
-  uint32_t prev_state = 32;
-  static const char kCommProc1[] = "process1";
-  static const char kCommProc2[] = "process2";
-  uint32_t pid_2 = 4;
-  context_.sched_tracker->PushSchedSwitch(cpu, timestamp, pid_1, prev_state,
-                                          kCommProc1, pid_2);
-  context_.storage->PushCpuFreq(timestamp + 1, cpu, 1e9);
-  context_.sched_tracker->PushSchedSwitch(cpu, timestamp + 2, pid_2, prev_state,
-                                          kCommProc2, pid_1);
-  context_.sched_tracker->PushSchedSwitch(cpu, timestamp + 4, pid_1, prev_state,
-                                          kCommProc1, pid_2);
-  context_.storage->PushCpuFreq(timestamp + 5, cpu, 2e9);
-  context_.sched_tracker->PushSchedSwitch(cpu, timestamp + 7, pid_2, prev_state,
-                                          kCommProc2, pid_1);
-
-  PrepareValidStatement("SELECT cycles, ts FROM sched ORDER BY cycles desc");
-
-  ASSERT_EQ(sqlite3_step(*stmt_), SQLITE_ROW);
-  ASSERT_EQ(sqlite3_column_int64(*stmt_, 0), 5000 /* cycles */);
-  ASSERT_EQ(sqlite3_column_int64(*stmt_, 1), timestamp + 4);
-
-  ASSERT_EQ(sqlite3_step(*stmt_), SQLITE_ROW);
-  ASSERT_EQ(sqlite3_column_int64(*stmt_, 0), 2000 /* cycles */);
-  ASSERT_EQ(sqlite3_column_int64(*stmt_, 1), timestamp + 2);
-
-  ASSERT_EQ(sqlite3_step(*stmt_), SQLITE_ROW);
-  ASSERT_EQ(sqlite3_column_int64(*stmt_, 0), 1000 /* cycles */);
-  ASSERT_EQ(sqlite3_column_int64(*stmt_, 1), timestamp);
-
-  ASSERT_EQ(sqlite3_step(*stmt_), SQLITE_DONE);
-}
-
 }  // namespace
 }  // namespace trace_processor
 }  // namespace perfetto
diff --git a/src/trace_processor/sched_tracker.cc b/src/trace_processor/sched_tracker.cc
index e1e1854..5e9021b 100644
--- a/src/trace_processor/sched_tracker.cc
+++ b/src/trace_processor/sched_tracker.cc
@@ -54,9 +54,7 @@
                             : context_->storage->InternString(prev_comm);
     UniqueTid utid = context_->process_tracker->UpdateThread(
         prev->timestamp, prev->next_pid /* == prev_pid */, prev_thread_name_id);
-    uint64_t cycles = CalculateCycles(cpu, prev->timestamp, timestamp);
-    context_->storage->AddSliceToCpu(cpu, prev->timestamp, duration, utid,
-                                     cycles);
+    context_->storage->AddSliceToCpu(cpu, prev->timestamp, duration, utid);
   }
 
   // If the this events previous pid does not match the previous event's next
@@ -72,42 +70,33 @@
   prev->next_pid = next_pid;
 };
 
-uint64_t SchedTracker::CalculateCycles(uint32_t cpu,
-                                       uint64_t start_ns,
-                                       uint64_t end_ns) {
-  const auto& frequencies = context_->storage->GetFreqForCpu(cpu);
-  auto lower_index = lower_index_per_cpu_[cpu];
-  if (frequencies.empty())
-    return 0;
-
-  long double cycles = 0;
-
-  // Move the lower index up to the first cpu_freq event before start_ns.
-  while (lower_index + 1 < frequencies.size()) {
-    if (frequencies[lower_index + 1].first >= start_ns)
-      break;
-    ++lower_index;
-  };
-
-  // Since events are processed in timestamp order, we don't have any cpu_freq
-  // events with a timestamp larger than end_ns. Therefore we care about all
-  // freq events from lower_index (first event before start_ns) to the last
-  // cpu_freq event.
-  for (size_t i = lower_index; i < frequencies.size(); ++i) {
-    // Using max handles the special case for the first cpu_freq event.
-    uint64_t cycle_start = std::max(frequencies[i].first, start_ns);
-    // If there are no more freq_events we compute cycles until |end_ns|.
-    uint64_t cycle_end = end_ns;
-    if (i + 1 < frequencies.size())
-      cycle_end = frequencies[i + 1].first;
-
-    uint32_t freq_khz = frequencies[i].second;
-    cycles += ((cycle_end - cycle_start) / 1E6L) * freq_khz;
+void SchedTracker::PushCounter(uint64_t timestamp,
+                               double value,
+                               StringId name_id,
+                               uint64_t ref,
+                               RefType ref_type) {
+  if (timestamp < prev_timestamp_) {
+    PERFETTO_ELOG("counter event out of order by %.4f ms, skipping",
+                  (prev_timestamp_ - timestamp) / 1e6);
+    return;
+  }
+  prev_timestamp_ = timestamp;
+  Counter& prev = last_counter_per_cpu_[static_cast<size_t>(ref)];
+  if (prev.timestamp != 0) {
+    uint64_t duration = 0;
+    // TODO(taylori): Add handling of events other than cpu freq.
+    if (ref_type == RefType::kCPU_ID) {
+      duration = timestamp - prev.timestamp;
+    }
+    context_->storage->mutable_counters()->AddCounter(
+        prev.timestamp, duration, name_id, prev.value,
+        static_cast<int64_t>(ref), RefType::kCPU_ID);
   }
 
-  lower_index_per_cpu_[cpu] = frequencies.size() - 1;
-  return static_cast<uint64_t>(round(cycles));
-}
+  prev.timestamp = timestamp;
+  prev.value = value;
+  prev.name_id = name_id;
+};
 
 }  // namespace trace_processor
 }  // namespace perfetto
diff --git a/src/trace_processor/sched_tracker.h b/src/trace_processor/sched_tracker.h
index 53855a3..71037b6 100644
--- a/src/trace_processor/sched_tracker.h
+++ b/src/trace_processor/sched_tracker.h
@@ -46,6 +46,15 @@
     bool valid() const { return timestamp != 0; }
   };
 
+  // A Counter is a trace event that has a value attached to a timestamp.
+  // These include CPU frequency ftrace events and systrace trace_marker
+  // counter events.
+  struct Counter {
+    uint64_t timestamp = 0;
+    double value = 0;
+    StringId name_id = 0;
+  };
+
   // This method is called when a sched switch event is seen in the trace.
   virtual void PushSchedSwitch(uint32_t cpu,
                                uint64_t timestamp,
@@ -54,16 +63,25 @@
                                base::StringView prev_comm,
                                uint32_t next_pid);
 
- private:
-  // Based on the cpu frequencies stored in trace_storage, the number of cycles
-  // between start_ns and end_ns on |cpu| is calculated.
-  uint64_t CalculateCycles(uint32_t cpu, uint64_t start_ns, uint64_t end_ns);
+  // This method is called when a cpu freq event is seen in the trace.
+  // In the future it will be called for all counters.
+  // TODO(taylori): Move to a more appropriate class or rename class.
+  virtual void PushCounter(uint64_t timestamp,
+                           double value,
+                           StringId name_id,
+                           uint64_t ref,
+                           RefType ref_type);
 
+ private:
   // Store the previous sched event to calculate the duration before storing it.
   std::array<SchedSwitchEvent, base::kMaxCpus> last_sched_per_cpu_;
 
-  std::array<size_t, base::kMaxCpus> lower_index_per_cpu_{};
+  // Store the previous counter event to calculate the duration before storing
+  // in trace storage.
+  std::array<Counter, base::kMaxCpus> last_counter_per_cpu_;
 
+  // Timestamp of the previous event. Used to discard events arriving out
+  // of order.
   uint64_t prev_timestamp_ = 0;
 
   StringId const idle_string_id_;
diff --git a/src/trace_processor/sched_tracker_unittest.cc b/src/trace_processor/sched_tracker_unittest.cc
index b50f135..298de15 100644
--- a/src/trace_processor/sched_tracker_unittest.cc
+++ b/src/trace_processor/sched_tracker_unittest.cc
@@ -97,32 +97,33 @@
   ASSERT_EQ(context.storage->SlicesForCpu(cpu).durations().at(2), 31u - 11u);
   ASSERT_EQ(context.storage->SlicesForCpu(cpu).utids().at(0),
             context.storage->SlicesForCpu(cpu).utids().at(2));
-  ASSERT_EQ(context.storage->SlicesForCpu(cpu).cycles().at(0), 0);
 }
 
-TEST_F(SchedTrackerTest, TestCyclesCalculation) {
+TEST_F(SchedTrackerTest, CounterDuration) {
   uint32_t cpu = 3;
-  uint64_t timestamp = 1e9;
-  context.storage->PushCpuFreq(timestamp, cpu, 1e6);
+  uint64_t timestamp = 100;
+  StringId name_id = 0;
+  context.sched_tracker->PushCounter(timestamp, 1000, name_id, cpu,
+                                     RefType::kCPU_ID);
+  context.sched_tracker->PushCounter(timestamp + 1, 4000, name_id, cpu,
+                                     RefType::kCPU_ID);
+  context.sched_tracker->PushCounter(timestamp + 3, 5000, name_id, cpu,
+                                     RefType::kCPU_ID);
+  context.sched_tracker->PushCounter(timestamp + 9, 1000, name_id, cpu,
+                                     RefType::kCPU_ID);
 
-  uint32_t prev_state = 32;
-  static const char kCommProc1[] = "process1";
-  static const char kCommProc2[] = "process2";
+  ASSERT_EQ(context.storage->counters().counter_count(), 3ul);
+  ASSERT_EQ(context.storage->counters().timestamps().at(0), timestamp);
+  ASSERT_EQ(context.storage->counters().durations().at(0), 1);
+  ASSERT_EQ(context.storage->counters().values().at(0), 1000);
 
-  context.sched_tracker->PushSchedSwitch(
-      cpu, static_cast<uint64_t>(timestamp + 1e7L), /*tid=*/2, prev_state,
-      kCommProc1,
-      /*tid=*/4);
+  ASSERT_EQ(context.storage->counters().timestamps().at(1), timestamp + 1);
+  ASSERT_EQ(context.storage->counters().durations().at(1), 2);
+  ASSERT_EQ(context.storage->counters().values().at(1), 4000);
 
-  context.storage->PushCpuFreq(static_cast<uint64_t>(timestamp + 1e8L), cpu,
-                               2e6);
-  context.storage->PushCpuFreq(static_cast<uint64_t>(timestamp + 2e8L), cpu,
-                               3e6);
-  context.sched_tracker->PushSchedSwitch(
-      cpu, static_cast<uint64_t>(timestamp + 3e8L), /*tid=*/4, prev_state,
-      kCommProc2,
-      /*tid=*/2);
-  ASSERT_EQ(context.storage->SlicesForCpu(cpu).cycles().at(0), 590000000);
+  ASSERT_EQ(context.storage->counters().timestamps().at(2), timestamp + 3);
+  ASSERT_EQ(context.storage->counters().durations().at(2), 6);
+  ASSERT_EQ(context.storage->counters().values().at(2), 5000);
 }
 
 }  // namespace
diff --git a/src/trace_processor/trace_sorter_unittest.cc b/src/trace_processor/trace_sorter_unittest.cc
index db1a8b6..8716fbb 100644
--- a/src/trace_processor/trace_sorter_unittest.cc
+++ b/src/trace_processor/trace_sorter_unittest.cc
@@ -52,10 +52,19 @@
   }
 };
 
+class MockTraceStorage : public TraceStorage {
+ public:
+  MockTraceStorage() : TraceStorage() {}
+
+  MOCK_METHOD1(InternString, StringId(base::StringView view));
+};
+
 class TraceSorterTest : public ::testing::TestWithParam<OptimizationMode> {
  public:
   TraceSorterTest()
       : test_buffer_(std::unique_ptr<uint8_t[]>(new uint8_t[8]), 0, 8) {
+    storage_ = new MockTraceStorage();
+    context_.storage.reset(storage_);
     context_.sorter.reset(
         new TraceSorter(&context_, GetParam(), 0 /*window_size*/));
     parser_ = new MockTraceParser(&context_);
@@ -65,6 +74,7 @@
  protected:
   TraceProcessorContext context_;
   MockTraceParser* parser_;
+  MockTraceStorage* storage_;
   TraceBlobView test_buffer_;
 };
 
diff --git a/src/trace_processor/trace_storage.cc b/src/trace_processor/trace_storage.cc
index ce5fae1..43c5944 100644
--- a/src/trace_processor/trace_storage.cc
+++ b/src/trace_processor/trace_storage.cc
@@ -29,9 +29,6 @@
   // Reserve string ID 0 for the empty string.
   InternString("");
 
-  // Initialize all CPUs @ freq 0Hz.
-  for (size_t cpu = 0; cpu < base::kMaxCpus; cpu++)
-    cpu_freq_[cpu].emplace_back(0, 0);
 }
 
 TraceStorage::~TraceStorage() {}
@@ -39,9 +36,8 @@
 void TraceStorage::AddSliceToCpu(uint32_t cpu,
                                  uint64_t start_ns,
                                  uint64_t duration_ns,
-                                 UniqueTid utid,
-                                 uint64_t cycles) {
-  cpu_events_[cpu].AddSlice(start_ns, duration_ns, utid, cycles);
+                                 UniqueTid utid) {
+  cpu_events_[cpu].AddSlice(start_ns, duration_ns, utid);
 };
 
 StringId TraceStorage::InternString(base::StringView str) {
diff --git a/src/trace_processor/trace_storage.h b/src/trace_processor/trace_storage.h
index 79b1492..5a7e1be 100644
--- a/src/trace_processor/trace_storage.h
+++ b/src/trace_processor/trace_storage.h
@@ -43,9 +43,7 @@
 // StringId is an offset into |string_pool_|.
 using StringId = size_t;
 
-// A map containing timestamps and the cpu frequency set at that time.
-using CpuFreq =
-    std::deque<std::pair<uint64_t /*timestamp*/, uint32_t /*freq*/>>;
+enum RefType { kUPID = 0, kCPU_ID = 1 };
 
 // Stores a data inside a trace file in a columnar form. This makes it efficient
 // to read or search across a single field of the trace (e.g. all the thread
@@ -84,12 +82,10 @@
    public:
     inline void AddSlice(uint64_t start_ns,
                          uint64_t duration_ns,
-                         UniqueTid utid,
-                         uint64_t cycles) {
+                         UniqueTid utid) {
       start_ns_.emplace_back(start_ns);
       durations_.emplace_back(duration_ns);
       utids_.emplace_back(utid);
-      cycles_.emplace_back(cycles);
     }
 
     size_t slice_count() const { return start_ns_.size(); }
@@ -100,15 +96,12 @@
 
     const std::deque<UniqueTid>& utids() const { return utids_; }
 
-    const std::deque<uint64_t>& cycles() const { return cycles_; }
-
    private:
     // Each deque below has the same number of entries (the number of slices
     // in the trace for the CPU).
     std::deque<uint64_t> start_ns_;
     std::deque<uint64_t> durations_;
     std::deque<UniqueTid> utids_;
-    std::deque<uint64_t> cycles_;
   };
 
   class NestableSlices {
@@ -154,13 +147,50 @@
     std::deque<uint64_t> parent_stack_ids_;
   };
 
+  class Counters {
+   public:
+    inline void AddCounter(uint64_t timestamp,
+                           uint64_t duration,
+                           StringId name_id,
+                           double value,
+                           int64_t ref,
+                           RefType type) {
+      timestamps_.emplace_back(timestamp);
+      durations_.emplace_back(duration);
+      name_ids_.emplace_back(name_id);
+      values_.emplace_back(value);
+      refs_.emplace_back(ref);
+      types_.emplace_back(type);
+    }
+    size_t counter_count() const { return timestamps_.size(); }
+
+    const std::deque<uint64_t>& timestamps() const { return timestamps_; }
+
+    const std::deque<uint64_t>& durations() const { return durations_; }
+
+    const std::deque<StringId>& name_ids() const { return name_ids_; }
+
+    const std::deque<double>& values() const { return values_; }
+
+    const std::deque<int64_t>& refs() const { return refs_; }
+
+    const std::deque<RefType>& types() const { return types_; }
+
+   private:
+    std::deque<uint64_t> timestamps_;
+    std::deque<uint64_t> durations_;
+    std::deque<StringId> name_ids_;
+    std::deque<double> values_;
+    std::deque<int64_t> refs_;
+    std::deque<RefType> types_;
+  };
+
   void ResetStorage();
 
   void AddSliceToCpu(uint32_t cpu,
                      uint64_t start_ns,
                      uint64_t duration_ns,
-                     UniqueTid utid,
-                     uint64_t cycles);
+                     UniqueTid utid);
 
   UniqueTid AddEmptyThread(uint32_t tid) {
     unique_threads_.emplace_back(tid);
@@ -176,7 +206,8 @@
 
   // Return an unqiue identifier for the contents of each string.
   // The string is copied internally and can be destroyed after this called.
-  StringId InternString(base::StringView);
+  // Virtual for testing.
+  virtual StringId InternString(base::StringView);
 
   Process* GetMutableProcess(UniquePid upid) {
     PERFETTO_DCHECK(upid > 0 && upid < unique_processes_.size());
@@ -213,24 +244,8 @@
   const NestableSlices& nestable_slices() const { return nestable_slices_; }
   NestableSlices* mutable_nestable_slices() { return &nestable_slices_; }
 
-  // Virtual for testing.
-  virtual void PushCpuFreq(uint64_t timestamp,
-                           uint32_t cpu,
-                           uint32_t new_freq) {
-    auto& freqs = cpu_freq_[cpu];
-    if (!freqs.empty() && timestamp < freqs.back().first) {
-      PERFETTO_ELOG("cpufreq out of order by %.4f ms, skipping",
-                    (freqs.back().first - timestamp) / 1e6);
-      return;
-    }
-    freqs.emplace_back(timestamp, new_freq);
-  }
-
-  const CpuFreq& GetFreqForCpu(uint32_t cpu) const { return cpu_freq_[cpu]; }
-
-  uint32_t GetMaxCpu() const {
-    return static_cast<uint32_t>(cpu_freq_.size() - 1);
-  }
+  const Counters& counters() const { return counters_; }
+  Counters* mutable_counters() { return &counters_; }
 
   // |unique_processes_| always contains at least 1 element becuase the 0th ID
   // is reserved to indicate an invalid process.
@@ -254,10 +269,6 @@
   // One entry for each CPU in the trace.
   std::array<SlicesPerCpu, base::kMaxCpus> cpu_events_;
 
-  // One map containing frequencies for every CPU in the trace. The map contains
-  // timestamps and the cpu frequency value at that time.
-  std::array<CpuFreq, base::kMaxCpus> cpu_freq_;
-
   // One entry for each unique string in the trace.
   std::deque<std::string> string_pool_;
 
@@ -272,6 +283,10 @@
 
   // Slices coming from userspace events (e.g. Chromium TRACE_EVENT macros).
   NestableSlices nestable_slices_;
+
+  // Counter events from the trace. This includes CPU frequency events as well
+  // systrace trace_marker counter events.
+  Counters counters_;
 };
 
 }  // namespace trace_processor