Move NetEq and ANA plotting to a separate file.

Bug: webrtc:11566
Change-Id: I6d6176ff72a158a1629e14b539de2e928e7d02a9
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/176510
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@google.com>
Commit-Queue: Björn Terelius <terelius@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#31472}
diff --git a/rtc_tools/BUILD.gn b/rtc_tools/BUILD.gn
index f193c51..7d7ae99 100644
--- a/rtc_tools/BUILD.gn
+++ b/rtc_tools/BUILD.gn
@@ -325,6 +325,8 @@
       sources = [
         "rtc_event_log_visualizer/alerts.cc",
         "rtc_event_log_visualizer/alerts.h",
+        "rtc_event_log_visualizer/analyze_audio.cc",
+        "rtc_event_log_visualizer/analyze_audio.h",
         "rtc_event_log_visualizer/analyzer.cc",
         "rtc_event_log_visualizer/analyzer.h",
         "rtc_event_log_visualizer/analyzer_common.cc",
@@ -371,6 +373,7 @@
       absl_deps = [
         "//third_party/abseil-cpp/absl/algorithm:container",
         "//third_party/abseil-cpp/absl/strings",
+        "//third_party/abseil-cpp/absl/types:optional",
       ]
     }
   }
diff --git a/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc
new file mode 100644
index 0000000..becc004
--- /dev/null
+++ b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc
@@ -0,0 +1,503 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/rtc_event_log_visualizer/analyze_audio.h"
+
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
+#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "rtc_base/ref_counted_object.h"
+
+namespace webrtc {
+
+void CreateAudioEncoderTargetBitrateGraph(const ParsedRtcEventLog& parsed_log,
+                                          const AnalyzerConfig& config,
+                                          Plot* plot) {
+  TimeSeries time_series("Audio encoder target bitrate", LineStyle::kLine,
+                         PointStyle::kHighlight);
+  auto GetAnaBitrateBps = [](const LoggedAudioNetworkAdaptationEvent& ana_event)
+      -> absl::optional<float> {
+    if (ana_event.config.bitrate_bps)
+      return absl::optional<float>(
+          static_cast<float>(*ana_event.config.bitrate_bps));
+    return absl::nullopt;
+  };
+  auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) {
+    return config.GetCallTimeSec(packet.log_time_us());
+  };
+  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
+      ToCallTime, GetAnaBitrateBps,
+      parsed_log.audio_network_adaptation_events(), &time_series);
+  plot->AppendTimeSeries(std::move(time_series));
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin);
+  plot->SetTitle("Reported audio encoder target bitrate");
+}
+
+void CreateAudioEncoderFrameLengthGraph(const ParsedRtcEventLog& parsed_log,
+                                        const AnalyzerConfig& config,
+                                        Plot* plot) {
+  TimeSeries time_series("Audio encoder frame length", LineStyle::kLine,
+                         PointStyle::kHighlight);
+  auto GetAnaFrameLengthMs =
+      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
+        if (ana_event.config.frame_length_ms)
+          return absl::optional<float>(
+              static_cast<float>(*ana_event.config.frame_length_ms));
+        return absl::optional<float>();
+      };
+  auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) {
+    return config.GetCallTimeSec(packet.log_time_us());
+  };
+  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
+      ToCallTime, GetAnaFrameLengthMs,
+      parsed_log.audio_network_adaptation_events(), &time_series);
+  plot->AppendTimeSeries(std::move(time_series));
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin);
+  plot->SetTitle("Reported audio encoder frame length");
+}
+
+void CreateAudioEncoderPacketLossGraph(const ParsedRtcEventLog& parsed_log,
+                                       const AnalyzerConfig& config,
+                                       Plot* plot) {
+  TimeSeries time_series("Audio encoder uplink packet loss fraction",
+                         LineStyle::kLine, PointStyle::kHighlight);
+  auto GetAnaPacketLoss =
+      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
+        if (ana_event.config.uplink_packet_loss_fraction)
+          return absl::optional<float>(static_cast<float>(
+              *ana_event.config.uplink_packet_loss_fraction));
+        return absl::optional<float>();
+      };
+  auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) {
+    return config.GetCallTimeSec(packet.log_time_us());
+  };
+  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
+      ToCallTime, GetAnaPacketLoss,
+      parsed_log.audio_network_adaptation_events(), &time_series);
+  plot->AppendTimeSeries(std::move(time_series));
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin,
+                          kTopMargin);
+  plot->SetTitle("Reported audio encoder lost packets");
+}
+
+void CreateAudioEncoderEnableFecGraph(const ParsedRtcEventLog& parsed_log,
+                                      const AnalyzerConfig& config,
+                                      Plot* plot) {
+  TimeSeries time_series("Audio encoder FEC", LineStyle::kLine,
+                         PointStyle::kHighlight);
+  auto GetAnaFecEnabled =
+      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
+        if (ana_event.config.enable_fec)
+          return absl::optional<float>(
+              static_cast<float>(*ana_event.config.enable_fec));
+        return absl::optional<float>();
+      };
+  auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) {
+    return config.GetCallTimeSec(packet.log_time_us());
+  };
+  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
+      ToCallTime, GetAnaFecEnabled,
+      parsed_log.audio_network_adaptation_events(), &time_series);
+  plot->AppendTimeSeries(std::move(time_series));
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin);
+  plot->SetTitle("Reported audio encoder FEC");
+}
+
+void CreateAudioEncoderEnableDtxGraph(const ParsedRtcEventLog& parsed_log,
+                                      const AnalyzerConfig& config,
+                                      Plot* plot) {
+  TimeSeries time_series("Audio encoder DTX", LineStyle::kLine,
+                         PointStyle::kHighlight);
+  auto GetAnaDtxEnabled =
+      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
+        if (ana_event.config.enable_dtx)
+          return absl::optional<float>(
+              static_cast<float>(*ana_event.config.enable_dtx));
+        return absl::optional<float>();
+      };
+  auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) {
+    return config.GetCallTimeSec(packet.log_time_us());
+  };
+  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
+      ToCallTime, GetAnaDtxEnabled,
+      parsed_log.audio_network_adaptation_events(), &time_series);
+  plot->AppendTimeSeries(std::move(time_series));
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin);
+  plot->SetTitle("Reported audio encoder DTX");
+}
+
+void CreateAudioEncoderNumChannelsGraph(const ParsedRtcEventLog& parsed_log,
+                                        const AnalyzerConfig& config,
+                                        Plot* plot) {
+  TimeSeries time_series("Audio encoder number of channels", LineStyle::kLine,
+                         PointStyle::kHighlight);
+  auto GetAnaNumChannels =
+      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
+        if (ana_event.config.num_channels)
+          return absl::optional<float>(
+              static_cast<float>(*ana_event.config.num_channels));
+        return absl::optional<float>();
+      };
+  auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) {
+    return config.GetCallTimeSec(packet.log_time_us());
+  };
+  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
+      ToCallTime, GetAnaNumChannels,
+      parsed_log.audio_network_adaptation_events(), &time_series);
+  plot->AppendTimeSeries(std::move(time_series));
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))",
+                          kBottomMargin, kTopMargin);
+  plot->SetTitle("Reported audio encoder number of channels");
+}
+
+class NetEqStreamInput : public test::NetEqInput {
+ public:
+  // Does not take any ownership, and all pointers must refer to valid objects
+  // that outlive the one constructed.
+  NetEqStreamInput(const std::vector<LoggedRtpPacketIncoming>* packet_stream,
+                   const std::vector<LoggedAudioPlayoutEvent>* output_events,
+                   absl::optional<int64_t> end_time_ms)
+      : packet_stream_(*packet_stream),
+        packet_stream_it_(packet_stream_.begin()),
+        output_events_it_(output_events->begin()),
+        output_events_end_(output_events->end()),
+        end_time_ms_(end_time_ms) {
+    RTC_DCHECK(packet_stream);
+    RTC_DCHECK(output_events);
+  }
+
+  absl::optional<int64_t> NextPacketTime() const override {
+    if (packet_stream_it_ == packet_stream_.end()) {
+      return absl::nullopt;
+    }
+    if (end_time_ms_ && packet_stream_it_->rtp.log_time_ms() > *end_time_ms_) {
+      return absl::nullopt;
+    }
+    return packet_stream_it_->rtp.log_time_ms();
+  }
+
+  absl::optional<int64_t> NextOutputEventTime() const override {
+    if (output_events_it_ == output_events_end_) {
+      return absl::nullopt;
+    }
+    if (end_time_ms_ && output_events_it_->log_time_ms() > *end_time_ms_) {
+      return absl::nullopt;
+    }
+    return output_events_it_->log_time_ms();
+  }
+
+  std::unique_ptr<PacketData> PopPacket() override {
+    if (packet_stream_it_ == packet_stream_.end()) {
+      return std::unique_ptr<PacketData>();
+    }
+    std::unique_ptr<PacketData> packet_data(new PacketData());
+    packet_data->header = packet_stream_it_->rtp.header;
+    packet_data->time_ms = packet_stream_it_->rtp.log_time_ms();
+
+    // This is a header-only "dummy" packet. Set the payload to all zeros, with
+    // length according to the virtual length.
+    packet_data->payload.SetSize(packet_stream_it_->rtp.total_length -
+                                 packet_stream_it_->rtp.header_length);
+    std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
+
+    ++packet_stream_it_;
+    return packet_data;
+  }
+
+  void AdvanceOutputEvent() override {
+    if (output_events_it_ != output_events_end_) {
+      ++output_events_it_;
+    }
+  }
+
+  bool ended() const override { return !NextEventTime(); }
+
+  absl::optional<RTPHeader> NextHeader() const override {
+    if (packet_stream_it_ == packet_stream_.end()) {
+      return absl::nullopt;
+    }
+    return packet_stream_it_->rtp.header;
+  }
+
+ private:
+  const std::vector<LoggedRtpPacketIncoming>& packet_stream_;
+  std::vector<LoggedRtpPacketIncoming>::const_iterator packet_stream_it_;
+  std::vector<LoggedAudioPlayoutEvent>::const_iterator output_events_it_;
+  const std::vector<LoggedAudioPlayoutEvent>::const_iterator output_events_end_;
+  const absl::optional<int64_t> end_time_ms_;
+};
+
+namespace {
+
+// Factory to create a "replacement decoder" that produces the decoded audio
+// by reading from a file rather than from the encoded payloads.
+class ReplacementAudioDecoderFactory : public AudioDecoderFactory {
+ public:
+  ReplacementAudioDecoderFactory(const absl::string_view replacement_file_name,
+                                 int file_sample_rate_hz)
+      : replacement_file_name_(replacement_file_name),
+        file_sample_rate_hz_(file_sample_rate_hz) {}
+
+  std::vector<AudioCodecSpec> GetSupportedDecoders() override {
+    RTC_NOTREACHED();
+    return {};
+  }
+
+  bool IsSupportedDecoder(const SdpAudioFormat& format) override {
+    return true;
+  }
+
+  std::unique_ptr<AudioDecoder> MakeAudioDecoder(
+      const SdpAudioFormat& format,
+      absl::optional<AudioCodecPairId> codec_pair_id) override {
+    auto replacement_file = std::make_unique<test::ResampleInputAudioFile>(
+        replacement_file_name_, file_sample_rate_hz_);
+    replacement_file->set_output_rate_hz(48000);
+    return std::make_unique<test::FakeDecodeFromFile>(
+        std::move(replacement_file), 48000, false);
+  }
+
+ private:
+  const std::string replacement_file_name_;
+  const int file_sample_rate_hz_;
+};
+
+// Creates a NetEq test object and all necessary input and output helpers. Runs
+// the test and returns the NetEqDelayAnalyzer object that was used to
+// instrument the test.
+std::unique_ptr<test::NetEqStatsGetter> CreateNetEqTestAndRun(
+    const std::vector<LoggedRtpPacketIncoming>* packet_stream,
+    const std::vector<LoggedAudioPlayoutEvent>* output_events,
+    absl::optional<int64_t> end_time_ms,
+    const std::string& replacement_file_name,
+    int file_sample_rate_hz) {
+  std::unique_ptr<test::NetEqInput> input(
+      new NetEqStreamInput(packet_stream, output_events, end_time_ms));
+
+  constexpr int kReplacementPt = 127;
+  std::set<uint8_t> cn_types;
+  std::set<uint8_t> forbidden_types;
+  input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt,
+                                              cn_types, forbidden_types));
+
+  NetEq::Config config;
+  config.max_packets_in_buffer = 200;
+  config.enable_fast_accelerate = true;
+
+  std::unique_ptr<test::VoidAudioSink> output(new test::VoidAudioSink());
+
+  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory =
+      new rtc::RefCountedObject<ReplacementAudioDecoderFactory>(
+          replacement_file_name, file_sample_rate_hz);
+
+  test::NetEqTest::DecoderMap codecs = {
+      {kReplacementPt, SdpAudioFormat("l16", 48000, 1)}};
+
+  std::unique_ptr<test::NetEqDelayAnalyzer> delay_cb(
+      new test::NetEqDelayAnalyzer);
+  std::unique_ptr<test::NetEqStatsGetter> neteq_stats_getter(
+      new test::NetEqStatsGetter(std::move(delay_cb)));
+  test::DefaultNetEqTestErrorCallback error_cb;
+  test::NetEqTest::Callbacks callbacks;
+  callbacks.error_callback = &error_cb;
+  callbacks.post_insert_packet = neteq_stats_getter->delay_analyzer();
+  callbacks.get_audio_callback = neteq_stats_getter.get();
+
+  test::NetEqTest test(config, decoder_factory, codecs, /*text_log=*/nullptr,
+                       /*factory=*/nullptr, std::move(input), std::move(output),
+                       callbacks);
+  test.Run();
+  return neteq_stats_getter;
+}
+}  // namespace
+
+NetEqStatsGetterMap SimulateNetEq(const ParsedRtcEventLog& parsed_log,
+                                  const AnalyzerConfig& config,
+                                  const std::string& replacement_file_name,
+                                  int file_sample_rate_hz) {
+  NetEqStatsGetterMap neteq_stats;
+
+  for (const auto& stream : parsed_log.incoming_rtp_packets_by_ssrc()) {
+    const uint32_t ssrc = stream.ssrc;
+    if (!IsAudioSsrc(parsed_log, kIncomingPacket, ssrc))
+      continue;
+    const std::vector<LoggedRtpPacketIncoming>* audio_packets =
+        &stream.incoming_packets;
+    if (audio_packets == nullptr) {
+      // No incoming audio stream found.
+      continue;
+    }
+
+    RTC_DCHECK(neteq_stats.find(ssrc) == neteq_stats.end());
+
+    std::map<uint32_t, std::vector<LoggedAudioPlayoutEvent>>::const_iterator
+        output_events_it = parsed_log.audio_playout_events().find(ssrc);
+    if (output_events_it == parsed_log.audio_playout_events().end()) {
+      // Could not find output events with SSRC matching the input audio stream.
+      // Using the first available stream of output events.
+      output_events_it = parsed_log.audio_playout_events().cbegin();
+    }
+
+    int64_t end_time_ms = parsed_log.first_log_segment().stop_time_ms();
+
+    neteq_stats[ssrc] = CreateNetEqTestAndRun(
+        audio_packets, &output_events_it->second, end_time_ms,
+        replacement_file_name, file_sample_rate_hz);
+  }
+
+  return neteq_stats;
+}
+
+// Given a NetEqStatsGetter and the SSRC that the NetEqStatsGetter was created
+// for, this method generates a plot for the jitter buffer delay profile.
+void CreateAudioJitterBufferGraph(const ParsedRtcEventLog& parsed_log,
+                                  const AnalyzerConfig& config,
+                                  uint32_t ssrc,
+                                  const test::NetEqStatsGetter* stats_getter,
+                                  Plot* plot) {
+  test::NetEqDelayAnalyzer::Delays arrival_delay_ms;
+  test::NetEqDelayAnalyzer::Delays corrected_arrival_delay_ms;
+  test::NetEqDelayAnalyzer::Delays playout_delay_ms;
+  test::NetEqDelayAnalyzer::Delays target_delay_ms;
+
+  stats_getter->delay_analyzer()->CreateGraphs(
+      &arrival_delay_ms, &corrected_arrival_delay_ms, &playout_delay_ms,
+      &target_delay_ms);
+
+  TimeSeries time_series_packet_arrival("packet arrival delay",
+                                        LineStyle::kLine);
+  TimeSeries time_series_relative_packet_arrival(
+      "Relative packet arrival delay", LineStyle::kLine);
+  TimeSeries time_series_play_time("Playout delay", LineStyle::kLine);
+  TimeSeries time_series_target_time("Target delay", LineStyle::kLine,
+                                     PointStyle::kHighlight);
+
+  for (const auto& data : arrival_delay_ms) {
+    const float x = config.GetCallTimeSec(data.first * 1000);  // ms to us.
+    const float y = data.second;
+    time_series_packet_arrival.points.emplace_back(TimeSeriesPoint(x, y));
+  }
+  for (const auto& data : corrected_arrival_delay_ms) {
+    const float x = config.GetCallTimeSec(data.first * 1000);  // ms to us.
+    const float y = data.second;
+    time_series_relative_packet_arrival.points.emplace_back(
+        TimeSeriesPoint(x, y));
+  }
+  for (const auto& data : playout_delay_ms) {
+    const float x = config.GetCallTimeSec(data.first * 1000);  // ms to us.
+    const float y = data.second;
+    time_series_play_time.points.emplace_back(TimeSeriesPoint(x, y));
+  }
+  for (const auto& data : target_delay_ms) {
+    const float x = config.GetCallTimeSec(data.first * 1000);  // ms to us.
+    const float y = data.second;
+    time_series_target_time.points.emplace_back(TimeSeriesPoint(x, y));
+  }
+
+  plot->AppendTimeSeries(std::move(time_series_packet_arrival));
+  plot->AppendTimeSeries(std::move(time_series_relative_packet_arrival));
+  plot->AppendTimeSeries(std::move(time_series_play_time));
+  plot->AppendTimeSeries(std::move(time_series_target_time));
+
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 1, "Relative delay (ms)", kBottomMargin,
+                          kTopMargin);
+  plot->SetTitle("NetEq timing for " +
+                 GetStreamName(parsed_log, kIncomingPacket, ssrc));
+}
+
+template <typename NetEqStatsType>
+void CreateNetEqStatsGraphInternal(
+    const ParsedRtcEventLog& parsed_log,
+    const AnalyzerConfig& config,
+    const NetEqStatsGetterMap& neteq_stats,
+    rtc::FunctionView<const std::vector<std::pair<int64_t, NetEqStatsType>>*(
+        const test::NetEqStatsGetter*)> data_extractor,
+    rtc::FunctionView<float(const NetEqStatsType&)> stats_extractor,
+    const std::string& plot_name,
+    Plot* plot) {
+  std::map<uint32_t, TimeSeries> time_series;
+
+  for (const auto& st : neteq_stats) {
+    const uint32_t ssrc = st.first;
+    const std::vector<std::pair<int64_t, NetEqStatsType>>* data_vector =
+        data_extractor(st.second.get());
+    for (const auto& data : *data_vector) {
+      const float time = config.GetCallTimeSec(data.first * 1000);  // ms to us.
+      const float value = stats_extractor(data.second);
+      time_series[ssrc].points.emplace_back(TimeSeriesPoint(time, value));
+    }
+  }
+
+  for (auto& series : time_series) {
+    series.second.label =
+        GetStreamName(parsed_log, kIncomingPacket, series.first);
+    series.second.line_style = LineStyle::kLine;
+    plot->AppendTimeSeries(std::move(series.second));
+  }
+
+  plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)",
+                 kLeftMargin, kRightMargin);
+  plot->SetSuggestedYAxis(0, 1, plot_name, kBottomMargin, kTopMargin);
+  plot->SetTitle(plot_name);
+}
+
+void CreateNetEqNetworkStatsGraph(
+    const ParsedRtcEventLog& parsed_log,
+    const AnalyzerConfig& config,
+    const NetEqStatsGetterMap& neteq_stats,
+    rtc::FunctionView<float(const NetEqNetworkStatistics&)> stats_extractor,
+    const std::string& plot_name,
+    Plot* plot) {
+  CreateNetEqStatsGraphInternal<NetEqNetworkStatistics>(
+      parsed_log, config, neteq_stats,
+      [](const test::NetEqStatsGetter* stats_getter) {
+        return stats_getter->stats();
+      },
+      stats_extractor, plot_name, plot);
+}
+
+void CreateNetEqLifetimeStatsGraph(
+    const ParsedRtcEventLog& parsed_log,
+    const AnalyzerConfig& config,
+    const NetEqStatsGetterMap& neteq_stats,
+    rtc::FunctionView<float(const NetEqLifetimeStatistics&)> stats_extractor,
+    const std::string& plot_name,
+    Plot* plot) {
+  CreateNetEqStatsGraphInternal<NetEqLifetimeStatistics>(
+      parsed_log, config, neteq_stats,
+      [](const test::NetEqStatsGetter* stats_getter) {
+        return stats_getter->lifetime_stats();
+      },
+      stats_extractor, plot_name, plot);
+}
+
+}  // namespace webrtc
diff --git a/rtc_tools/rtc_event_log_visualizer/analyze_audio.h b/rtc_tools/rtc_event_log_visualizer/analyze_audio.h
new file mode 100644
index 0000000..726e844
--- /dev/null
+++ b/rtc_tools/rtc_event_log_visualizer/analyze_audio.h
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_
+#define RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_
+
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <string>
+
+#include "api/function_view.h"
+#include "logging/rtc_event_log/rtc_event_log_parser.h"
+#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
+#include "rtc_tools/rtc_event_log_visualizer/analyzer_common.h"
+#include "rtc_tools/rtc_event_log_visualizer/plot_base.h"
+
+namespace webrtc {
+
+void CreateAudioEncoderTargetBitrateGraph(const ParsedRtcEventLog& parsed_log,
+                                          const AnalyzerConfig& config,
+                                          Plot* plot);
+void CreateAudioEncoderFrameLengthGraph(const ParsedRtcEventLog& parsed_log,
+                                        const AnalyzerConfig& config,
+                                        Plot* plot);
+void CreateAudioEncoderPacketLossGraph(const ParsedRtcEventLog& parsed_log,
+                                       const AnalyzerConfig& config,
+                                       Plot* plot);
+void CreateAudioEncoderEnableFecGraph(const ParsedRtcEventLog& parsed_log,
+                                      const AnalyzerConfig& config,
+                                      Plot* plot);
+void CreateAudioEncoderEnableDtxGraph(const ParsedRtcEventLog& parsed_log,
+                                      const AnalyzerConfig& config,
+                                      Plot* plot);
+void CreateAudioEncoderNumChannelsGraph(const ParsedRtcEventLog& parsed_log,
+                                        const AnalyzerConfig& config,
+                                        Plot* plot);
+
+using NetEqStatsGetterMap =
+    std::map<uint32_t, std::unique_ptr<test::NetEqStatsGetter>>;
+NetEqStatsGetterMap SimulateNetEq(const ParsedRtcEventLog& parsed_log,
+                                  const AnalyzerConfig& config,
+                                  const std::string& replacement_file_name,
+                                  int file_sample_rate_hz);
+
+void CreateAudioJitterBufferGraph(const ParsedRtcEventLog& parsed_log,
+                                  const AnalyzerConfig& config,
+                                  uint32_t ssrc,
+                                  const test::NetEqStatsGetter* stats_getter,
+                                  Plot* plot);
+void CreateNetEqNetworkStatsGraph(
+    const ParsedRtcEventLog& parsed_log,
+    const AnalyzerConfig& config,
+    const NetEqStatsGetterMap& neteq_stats_getters,
+    rtc::FunctionView<float(const NetEqNetworkStatistics&)> stats_extractor,
+    const std::string& plot_name,
+    Plot* plot);
+void CreateNetEqLifetimeStatsGraph(
+    const ParsedRtcEventLog& parsed_log,
+    const AnalyzerConfig& config,
+    const NetEqStatsGetterMap& neteq_stats_getters,
+    rtc::FunctionView<float(const NetEqLifetimeStatistics&)> stats_extractor,
+    const std::string& plot_name,
+    Plot* plot);
+
+}  // namespace webrtc
+
+#endif  // RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_
diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer.cc b/rtc_tools/rtc_event_log_visualizer/analyzer.cc
index 287fbe2..8ca108e 100644
--- a/rtc_tools/rtc_event_log_visualizer/analyzer.cc
+++ b/rtc_tools/rtc_event_log_visualizer/analyzer.cc
@@ -31,12 +31,6 @@
 #include "logging/rtc_event_log/rtc_event_processor.h"
 #include "logging/rtc_event_log/rtc_stream_config.h"
 #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
-#include "modules/audio_coding/neteq/tools/audio_sink.h"
-#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
-#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
-#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
-#include "modules/audio_coding/neteq/tools/neteq_test.h"
-#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
 #include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h"
 #include "modules/congestion_controller/goog_cc/bitrate_estimator.h"
 #include "modules/congestion_controller/goog_cc/delay_based_bwe.h"
@@ -71,8 +65,6 @@
 
 namespace {
 
-const int kNumMicrosecsPerSec = 1000000;
-
 std::string SsrcToString(uint32_t ssrc) {
   rtc::StringBuilder ss;
   ss << "SSRC " << ssrc;
@@ -168,11 +160,6 @@
   return absl::nullopt;
 }
 
-constexpr float kLeftMargin = 0.01f;
-constexpr float kRightMargin = 0.02f;
-constexpr float kBottomMargin = 0.02f;
-constexpr float kTopMargin = 0.05f;
-
 absl::optional<double> NetworkDelayDiff_AbsSendTime(
     const LoggedRtpPacketIncoming& old_packet,
     const LoggedRtpPacketIncoming& new_packet) {
@@ -222,99 +209,6 @@
   return delay_change;
 }
 
-// For each element in data_view, use |f()| to extract a y-coordinate and
-// store the result in a TimeSeries.
-template <typename DataType, typename IterableType>
-void ProcessPoints(rtc::FunctionView<float(const DataType&)> fx,
-                   rtc::FunctionView<absl::optional<float>(const DataType&)> fy,
-                   const IterableType& data_view,
-                   TimeSeries* result) {
-  for (size_t i = 0; i < data_view.size(); i++) {
-    const DataType& elem = data_view[i];
-    float x = fx(elem);
-    absl::optional<float> y = fy(elem);
-    if (y)
-      result->points.emplace_back(x, *y);
-  }
-}
-
-// For each pair of adjacent elements in |data|, use |f()| to extract a
-// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
-// will be the time of the second element in the pair.
-template <typename DataType, typename ResultType, typename IterableType>
-void ProcessPairs(
-    rtc::FunctionView<float(const DataType&)> fx,
-    rtc::FunctionView<absl::optional<ResultType>(const DataType&,
-                                                 const DataType&)> fy,
-    const IterableType& data,
-    TimeSeries* result) {
-  for (size_t i = 1; i < data.size(); i++) {
-    float x = fx(data[i]);
-    absl::optional<ResultType> y = fy(data[i - 1], data[i]);
-    if (y)
-      result->points.emplace_back(x, static_cast<float>(*y));
-  }
-}
-
-// For each pair of adjacent elements in |data|, use |f()| to extract a
-// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
-// will be the time of the second element in the pair.
-template <typename DataType, typename ResultType, typename IterableType>
-void AccumulatePairs(
-    rtc::FunctionView<float(const DataType&)> fx,
-    rtc::FunctionView<absl::optional<ResultType>(const DataType&,
-                                                 const DataType&)> fy,
-    const IterableType& data,
-    TimeSeries* result) {
-  ResultType sum = 0;
-  for (size_t i = 1; i < data.size(); i++) {
-    float x = fx(data[i]);
-    absl::optional<ResultType> y = fy(data[i - 1], data[i]);
-    if (y) {
-      sum += *y;
-      result->points.emplace_back(x, static_cast<float>(sum));
-    }
-  }
-}
-
-// Calculates a moving average of |data| and stores the result in a TimeSeries.
-// A data point is generated every |step| microseconds from |begin_time|
-// to |end_time|. The value of each data point is the average of the data
-// during the preceding |window_duration_us| microseconds.
-template <typename DataType, typename ResultType, typename IterableType>
-void MovingAverage(
-    rtc::FunctionView<absl::optional<ResultType>(const DataType&)> fy,
-    const IterableType& data_view,
-    AnalyzerConfig config,
-    TimeSeries* result) {
-  size_t window_index_begin = 0;
-  size_t window_index_end = 0;
-  ResultType sum_in_window = 0;
-
-  for (int64_t t = config.begin_time_; t < config.end_time_ + config.step_;
-       t += config.step_) {
-    while (window_index_end < data_view.size() &&
-           data_view[window_index_end].log_time_us() < t) {
-      absl::optional<ResultType> value = fy(data_view[window_index_end]);
-      if (value)
-        sum_in_window += *value;
-      ++window_index_end;
-    }
-    while (window_index_begin < data_view.size() &&
-           data_view[window_index_begin].log_time_us() <
-               t - config.window_duration_) {
-      absl::optional<ResultType> value = fy(data_view[window_index_begin]);
-      if (value)
-        sum_in_window -= *value;
-      ++window_index_begin;
-    }
-    float window_duration_s =
-        static_cast<float>(config.window_duration_) / kNumMicrosecsPerSec;
-    float x = config.GetCallTimeSec(t);
-    float y = sum_in_window / window_duration_s;
-    result->points.emplace_back(x, y);
-  }
-}
 
 template <typename T>
 TimeSeries CreateRtcpTypeTimeSeries(const std::vector<T>& rtcp_list,
@@ -1725,462 +1619,6 @@
   plot->SetTitle(title);
 }
 
-void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) {
-  TimeSeries time_series("Audio encoder target bitrate", LineStyle::kLine,
-                         PointStyle::kHighlight);
-  auto GetAnaBitrateBps = [](const LoggedAudioNetworkAdaptationEvent& ana_event)
-      -> absl::optional<float> {
-    if (ana_event.config.bitrate_bps)
-      return absl::optional<float>(
-          static_cast<float>(*ana_event.config.bitrate_bps));
-    return absl::nullopt;
-  };
-  auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) {
-    return this->config_.GetCallTimeSec(packet.log_time_us());
-  };
-  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
-      ToCallTime, GetAnaBitrateBps,
-      parsed_log_.audio_network_adaptation_events(), &time_series);
-  plot->AppendTimeSeries(std::move(time_series));
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin);
-  plot->SetTitle("Reported audio encoder target bitrate");
-}
-
-void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) {
-  TimeSeries time_series("Audio encoder frame length", LineStyle::kLine,
-                         PointStyle::kHighlight);
-  auto GetAnaFrameLengthMs =
-      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
-        if (ana_event.config.frame_length_ms)
-          return absl::optional<float>(
-              static_cast<float>(*ana_event.config.frame_length_ms));
-        return absl::optional<float>();
-      };
-  auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) {
-    return this->config_.GetCallTimeSec(packet.log_time_us());
-  };
-  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
-      ToCallTime, GetAnaFrameLengthMs,
-      parsed_log_.audio_network_adaptation_events(), &time_series);
-  plot->AppendTimeSeries(std::move(time_series));
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin);
-  plot->SetTitle("Reported audio encoder frame length");
-}
-
-void EventLogAnalyzer::CreateAudioEncoderPacketLossGraph(Plot* plot) {
-  TimeSeries time_series("Audio encoder uplink packet loss fraction",
-                         LineStyle::kLine, PointStyle::kHighlight);
-  auto GetAnaPacketLoss =
-      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
-        if (ana_event.config.uplink_packet_loss_fraction)
-          return absl::optional<float>(static_cast<float>(
-              *ana_event.config.uplink_packet_loss_fraction));
-        return absl::optional<float>();
-      };
-  auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) {
-    return this->config_.GetCallTimeSec(packet.log_time_us());
-  };
-  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
-      ToCallTime, GetAnaPacketLoss,
-      parsed_log_.audio_network_adaptation_events(), &time_series);
-  plot->AppendTimeSeries(std::move(time_series));
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin,
-                          kTopMargin);
-  plot->SetTitle("Reported audio encoder lost packets");
-}
-
-void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) {
-  TimeSeries time_series("Audio encoder FEC", LineStyle::kLine,
-                         PointStyle::kHighlight);
-  auto GetAnaFecEnabled =
-      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
-        if (ana_event.config.enable_fec)
-          return absl::optional<float>(
-              static_cast<float>(*ana_event.config.enable_fec));
-        return absl::optional<float>();
-      };
-  auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) {
-    return this->config_.GetCallTimeSec(packet.log_time_us());
-  };
-  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
-      ToCallTime, GetAnaFecEnabled,
-      parsed_log_.audio_network_adaptation_events(), &time_series);
-  plot->AppendTimeSeries(std::move(time_series));
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin);
-  plot->SetTitle("Reported audio encoder FEC");
-}
-
-void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) {
-  TimeSeries time_series("Audio encoder DTX", LineStyle::kLine,
-                         PointStyle::kHighlight);
-  auto GetAnaDtxEnabled =
-      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
-        if (ana_event.config.enable_dtx)
-          return absl::optional<float>(
-              static_cast<float>(*ana_event.config.enable_dtx));
-        return absl::optional<float>();
-      };
-  auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) {
-    return this->config_.GetCallTimeSec(packet.log_time_us());
-  };
-  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
-      ToCallTime, GetAnaDtxEnabled,
-      parsed_log_.audio_network_adaptation_events(), &time_series);
-  plot->AppendTimeSeries(std::move(time_series));
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin);
-  plot->SetTitle("Reported audio encoder DTX");
-}
-
-void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) {
-  TimeSeries time_series("Audio encoder number of channels", LineStyle::kLine,
-                         PointStyle::kHighlight);
-  auto GetAnaNumChannels =
-      [](const LoggedAudioNetworkAdaptationEvent& ana_event) {
-        if (ana_event.config.num_channels)
-          return absl::optional<float>(
-              static_cast<float>(*ana_event.config.num_channels));
-        return absl::optional<float>();
-      };
-  auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) {
-    return this->config_.GetCallTimeSec(packet.log_time_us());
-  };
-  ProcessPoints<LoggedAudioNetworkAdaptationEvent>(
-      ToCallTime, GetAnaNumChannels,
-      parsed_log_.audio_network_adaptation_events(), &time_series);
-  plot->AppendTimeSeries(std::move(time_series));
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))",
-                          kBottomMargin, kTopMargin);
-  plot->SetTitle("Reported audio encoder number of channels");
-}
-
-class NetEqStreamInput : public test::NetEqInput {
- public:
-  // Does not take any ownership, and all pointers must refer to valid objects
-  // that outlive the one constructed.
-  NetEqStreamInput(const std::vector<LoggedRtpPacketIncoming>* packet_stream,
-                   const std::vector<LoggedAudioPlayoutEvent>* output_events,
-                   absl::optional<int64_t> end_time_ms)
-      : packet_stream_(*packet_stream),
-        packet_stream_it_(packet_stream_.begin()),
-        output_events_it_(output_events->begin()),
-        output_events_end_(output_events->end()),
-        end_time_ms_(end_time_ms) {
-    RTC_DCHECK(packet_stream);
-    RTC_DCHECK(output_events);
-  }
-
-  absl::optional<int64_t> NextPacketTime() const override {
-    if (packet_stream_it_ == packet_stream_.end()) {
-      return absl::nullopt;
-    }
-    if (end_time_ms_ && packet_stream_it_->rtp.log_time_ms() > *end_time_ms_) {
-      return absl::nullopt;
-    }
-    return packet_stream_it_->rtp.log_time_ms();
-  }
-
-  absl::optional<int64_t> NextOutputEventTime() const override {
-    if (output_events_it_ == output_events_end_) {
-      return absl::nullopt;
-    }
-    if (end_time_ms_ && output_events_it_->log_time_ms() > *end_time_ms_) {
-      return absl::nullopt;
-    }
-    return output_events_it_->log_time_ms();
-  }
-
-  std::unique_ptr<PacketData> PopPacket() override {
-    if (packet_stream_it_ == packet_stream_.end()) {
-      return std::unique_ptr<PacketData>();
-    }
-    std::unique_ptr<PacketData> packet_data(new PacketData());
-    packet_data->header = packet_stream_it_->rtp.header;
-    packet_data->time_ms = packet_stream_it_->rtp.log_time_ms();
-
-    // This is a header-only "dummy" packet. Set the payload to all zeros, with
-    // length according to the virtual length.
-    packet_data->payload.SetSize(packet_stream_it_->rtp.total_length -
-                                 packet_stream_it_->rtp.header_length);
-    std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
-
-    ++packet_stream_it_;
-    return packet_data;
-  }
-
-  void AdvanceOutputEvent() override {
-    if (output_events_it_ != output_events_end_) {
-      ++output_events_it_;
-    }
-  }
-
-  bool ended() const override { return !NextEventTime(); }
-
-  absl::optional<RTPHeader> NextHeader() const override {
-    if (packet_stream_it_ == packet_stream_.end()) {
-      return absl::nullopt;
-    }
-    return packet_stream_it_->rtp.header;
-  }
-
- private:
-  const std::vector<LoggedRtpPacketIncoming>& packet_stream_;
-  std::vector<LoggedRtpPacketIncoming>::const_iterator packet_stream_it_;
-  std::vector<LoggedAudioPlayoutEvent>::const_iterator output_events_it_;
-  const std::vector<LoggedAudioPlayoutEvent>::const_iterator output_events_end_;
-  const absl::optional<int64_t> end_time_ms_;
-};
-
-namespace {
-
-// Factory to create a "replacement decoder" that produces the decoded audio
-// by reading from a file rather than from the encoded payloads.
-class ReplacementAudioDecoderFactory : public AudioDecoderFactory {
- public:
-  ReplacementAudioDecoderFactory(const absl::string_view replacement_file_name,
-                                 int file_sample_rate_hz)
-      : replacement_file_name_(replacement_file_name),
-        file_sample_rate_hz_(file_sample_rate_hz) {}
-
-  std::vector<AudioCodecSpec> GetSupportedDecoders() override {
-    RTC_NOTREACHED();
-    return {};
-  }
-
-  bool IsSupportedDecoder(const SdpAudioFormat& format) override {
-    return true;
-  }
-
-  std::unique_ptr<AudioDecoder> MakeAudioDecoder(
-      const SdpAudioFormat& format,
-      absl::optional<AudioCodecPairId> codec_pair_id) override {
-    auto replacement_file = std::make_unique<test::ResampleInputAudioFile>(
-        replacement_file_name_, file_sample_rate_hz_);
-    replacement_file->set_output_rate_hz(48000);
-    return std::make_unique<test::FakeDecodeFromFile>(
-        std::move(replacement_file), 48000, false);
-  }
-
- private:
-  const std::string replacement_file_name_;
-  const int file_sample_rate_hz_;
-};
-
-// Creates a NetEq test object and all necessary input and output helpers. Runs
-// the test and returns the NetEqDelayAnalyzer object that was used to
-// instrument the test.
-std::unique_ptr<test::NetEqStatsGetter> CreateNetEqTestAndRun(
-    const std::vector<LoggedRtpPacketIncoming>* packet_stream,
-    const std::vector<LoggedAudioPlayoutEvent>* output_events,
-    absl::optional<int64_t> end_time_ms,
-    const std::string& replacement_file_name,
-    int file_sample_rate_hz) {
-  std::unique_ptr<test::NetEqInput> input(
-      new NetEqStreamInput(packet_stream, output_events, end_time_ms));
-
-  constexpr int kReplacementPt = 127;
-  std::set<uint8_t> cn_types;
-  std::set<uint8_t> forbidden_types;
-  input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt,
-                                              cn_types, forbidden_types));
-
-  NetEq::Config config;
-  config.max_packets_in_buffer = 200;
-  config.enable_fast_accelerate = true;
-
-  std::unique_ptr<test::VoidAudioSink> output(new test::VoidAudioSink());
-
-  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory =
-      new rtc::RefCountedObject<ReplacementAudioDecoderFactory>(
-          replacement_file_name, file_sample_rate_hz);
-
-  test::NetEqTest::DecoderMap codecs = {
-      {kReplacementPt, SdpAudioFormat("l16", 48000, 1)}};
-
-  std::unique_ptr<test::NetEqDelayAnalyzer> delay_cb(
-      new test::NetEqDelayAnalyzer);
-  std::unique_ptr<test::NetEqStatsGetter> neteq_stats_getter(
-      new test::NetEqStatsGetter(std::move(delay_cb)));
-  test::DefaultNetEqTestErrorCallback error_cb;
-  test::NetEqTest::Callbacks callbacks;
-  callbacks.error_callback = &error_cb;
-  callbacks.post_insert_packet = neteq_stats_getter->delay_analyzer();
-  callbacks.get_audio_callback = neteq_stats_getter.get();
-
-  test::NetEqTest test(config, decoder_factory, codecs, /*text_log=*/nullptr,
-                       /*factory=*/nullptr, std::move(input), std::move(output),
-                       callbacks);
-  test.Run();
-  return neteq_stats_getter;
-}
-}  // namespace
-
-EventLogAnalyzer::NetEqStatsGetterMap EventLogAnalyzer::SimulateNetEq(
-    const std::string& replacement_file_name,
-    int file_sample_rate_hz) const {
-  NetEqStatsGetterMap neteq_stats;
-
-  for (const auto& stream : parsed_log_.incoming_rtp_packets_by_ssrc()) {
-    const uint32_t ssrc = stream.ssrc;
-    if (!IsAudioSsrc(parsed_log_, kIncomingPacket, ssrc))
-      continue;
-    const std::vector<LoggedRtpPacketIncoming>* audio_packets =
-        &stream.incoming_packets;
-    if (audio_packets == nullptr) {
-      // No incoming audio stream found.
-      continue;
-    }
-
-    RTC_DCHECK(neteq_stats.find(ssrc) == neteq_stats.end());
-
-    std::map<uint32_t, std::vector<LoggedAudioPlayoutEvent>>::const_iterator
-        output_events_it = parsed_log_.audio_playout_events().find(ssrc);
-    if (output_events_it == parsed_log_.audio_playout_events().end()) {
-      // Could not find output events with SSRC matching the input audio stream.
-      // Using the first available stream of output events.
-      output_events_it = parsed_log_.audio_playout_events().cbegin();
-    }
-
-    int64_t end_time_ms = parsed_log_.first_log_segment().stop_time_ms();
-
-    neteq_stats[ssrc] = CreateNetEqTestAndRun(
-        audio_packets, &output_events_it->second, end_time_ms,
-        replacement_file_name, file_sample_rate_hz);
-  }
-
-  return neteq_stats;
-}
-
-// Given a NetEqStatsGetter and the SSRC that the NetEqStatsGetter was created
-// for, this method generates a plot for the jitter buffer delay profile.
-void EventLogAnalyzer::CreateAudioJitterBufferGraph(
-    uint32_t ssrc,
-    const test::NetEqStatsGetter* stats_getter,
-    Plot* plot) const {
-  test::NetEqDelayAnalyzer::Delays arrival_delay_ms;
-  test::NetEqDelayAnalyzer::Delays corrected_arrival_delay_ms;
-  test::NetEqDelayAnalyzer::Delays playout_delay_ms;
-  test::NetEqDelayAnalyzer::Delays target_delay_ms;
-
-  stats_getter->delay_analyzer()->CreateGraphs(
-      &arrival_delay_ms, &corrected_arrival_delay_ms, &playout_delay_ms,
-      &target_delay_ms);
-
-  TimeSeries time_series_packet_arrival("packet arrival delay",
-                                        LineStyle::kLine);
-  TimeSeries time_series_relative_packet_arrival(
-      "Relative packet arrival delay", LineStyle::kLine);
-  TimeSeries time_series_play_time("Playout delay", LineStyle::kLine);
-  TimeSeries time_series_target_time("Target delay", LineStyle::kLine,
-                                     PointStyle::kHighlight);
-
-  for (const auto& data : arrival_delay_ms) {
-    const float x = config_.GetCallTimeSec(data.first * 1000);  // ms to us.
-    const float y = data.second;
-    time_series_packet_arrival.points.emplace_back(TimeSeriesPoint(x, y));
-  }
-  for (const auto& data : corrected_arrival_delay_ms) {
-    const float x = config_.GetCallTimeSec(data.first * 1000);  // ms to us.
-    const float y = data.second;
-    time_series_relative_packet_arrival.points.emplace_back(
-        TimeSeriesPoint(x, y));
-  }
-  for (const auto& data : playout_delay_ms) {
-    const float x = config_.GetCallTimeSec(data.first * 1000);  // ms to us.
-    const float y = data.second;
-    time_series_play_time.points.emplace_back(TimeSeriesPoint(x, y));
-  }
-  for (const auto& data : target_delay_ms) {
-    const float x = config_.GetCallTimeSec(data.first * 1000);  // ms to us.
-    const float y = data.second;
-    time_series_target_time.points.emplace_back(TimeSeriesPoint(x, y));
-  }
-
-  plot->AppendTimeSeries(std::move(time_series_packet_arrival));
-  plot->AppendTimeSeries(std::move(time_series_relative_packet_arrival));
-  plot->AppendTimeSeries(std::move(time_series_play_time));
-  plot->AppendTimeSeries(std::move(time_series_target_time));
-
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 1, "Relative delay (ms)", kBottomMargin,
-                          kTopMargin);
-  plot->SetTitle("NetEq timing for " +
-                 GetStreamName(parsed_log_, kIncomingPacket, ssrc));
-}
-
-template <typename NetEqStatsType>
-void EventLogAnalyzer::CreateNetEqStatsGraphInternal(
-    const NetEqStatsGetterMap& neteq_stats,
-    rtc::FunctionView<const std::vector<std::pair<int64_t, NetEqStatsType>>*(
-        const test::NetEqStatsGetter*)> data_extractor,
-    rtc::FunctionView<float(const NetEqStatsType&)> stats_extractor,
-    const std::string& plot_name,
-    Plot* plot) const {
-  std::map<uint32_t, TimeSeries> time_series;
-
-  for (const auto& st : neteq_stats) {
-    const uint32_t ssrc = st.first;
-    const std::vector<std::pair<int64_t, NetEqStatsType>>* data_vector =
-        data_extractor(st.second.get());
-    for (const auto& data : *data_vector) {
-      const float time =
-          config_.GetCallTimeSec(data.first * 1000);  // ms to us.
-      const float value = stats_extractor(data.second);
-      time_series[ssrc].points.emplace_back(TimeSeriesPoint(time, value));
-    }
-  }
-
-  for (auto& series : time_series) {
-    series.second.label =
-        GetStreamName(parsed_log_, kIncomingPacket, series.first);
-    series.second.line_style = LineStyle::kLine;
-    plot->AppendTimeSeries(std::move(series.second));
-  }
-
-  plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(),
-                 "Time (s)", kLeftMargin, kRightMargin);
-  plot->SetSuggestedYAxis(0, 1, plot_name, kBottomMargin, kTopMargin);
-  plot->SetTitle(plot_name);
-}
-
-void EventLogAnalyzer::CreateNetEqNetworkStatsGraph(
-    const NetEqStatsGetterMap& neteq_stats,
-    rtc::FunctionView<float(const NetEqNetworkStatistics&)> stats_extractor,
-    const std::string& plot_name,
-    Plot* plot) const {
-  CreateNetEqStatsGraphInternal<NetEqNetworkStatistics>(
-      neteq_stats,
-      [](const test::NetEqStatsGetter* stats_getter) {
-        return stats_getter->stats();
-      },
-      stats_extractor, plot_name, plot);
-}
-
-void EventLogAnalyzer::CreateNetEqLifetimeStatsGraph(
-    const NetEqStatsGetterMap& neteq_stats,
-    rtc::FunctionView<float(const NetEqLifetimeStatistics&)> stats_extractor,
-    const std::string& plot_name,
-    Plot* plot) const {
-  CreateNetEqStatsGraphInternal<NetEqLifetimeStatistics>(
-      neteq_stats,
-      [](const test::NetEqStatsGetter* stats_getter) {
-        return stats_getter->lifetime_stats();
-      },
-      stats_extractor, plot_name, plot);
-}
-
 void EventLogAnalyzer::CreateIceCandidatePairConfigGraph(Plot* plot) {
   std::map<uint32_t, TimeSeries> configs_by_cp_id;
   for (const auto& config : parsed_log_.ice_candidate_pair_configs()) {
diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer.h b/rtc_tools/rtc_event_log_visualizer/analyzer.h
index ebdfdcc..4918cf4 100644
--- a/rtc_tools/rtc_event_log_visualizer/analyzer.h
+++ b/rtc_tools/rtc_event_log_visualizer/analyzer.h
@@ -79,32 +79,6 @@
       std::string yaxis_label,
       Plot* plot);
 
-  void CreateAudioEncoderTargetBitrateGraph(Plot* plot);
-  void CreateAudioEncoderFrameLengthGraph(Plot* plot);
-  void CreateAudioEncoderPacketLossGraph(Plot* plot);
-  void CreateAudioEncoderEnableFecGraph(Plot* plot);
-  void CreateAudioEncoderEnableDtxGraph(Plot* plot);
-  void CreateAudioEncoderNumChannelsGraph(Plot* plot);
-
-  using NetEqStatsGetterMap =
-      std::map<uint32_t, std::unique_ptr<test::NetEqStatsGetter>>;
-  NetEqStatsGetterMap SimulateNetEq(const std::string& replacement_file_name,
-                                    int file_sample_rate_hz) const;
-
-  void CreateAudioJitterBufferGraph(uint32_t ssrc,
-                                    const test::NetEqStatsGetter* stats_getter,
-                                    Plot* plot) const;
-  void CreateNetEqNetworkStatsGraph(
-      const NetEqStatsGetterMap& neteq_stats_getters,
-      rtc::FunctionView<float(const NetEqNetworkStatistics&)> stats_extractor,
-      const std::string& plot_name,
-      Plot* plot) const;
-  void CreateNetEqLifetimeStatsGraph(
-      const NetEqStatsGetterMap& neteq_stats_getters,
-      rtc::FunctionView<float(const NetEqLifetimeStatistics&)> stats_extractor,
-      const std::string& plot_name,
-      Plot* plot) const;
-
   void CreateIceCandidatePairConfigGraph(Plot* plot);
   void CreateIceConnectivityCheckGraph(Plot* plot);
 
@@ -115,15 +89,6 @@
   void PrintNotifications(FILE* file);
 
  private:
-  template <typename NetEqStatsType>
-  void CreateNetEqStatsGraphInternal(
-      const NetEqStatsGetterMap& neteq_stats,
-      rtc::FunctionView<const std::vector<std::pair<int64_t, NetEqStatsType>>*(
-          const test::NetEqStatsGetter*)> data_extractor,
-      rtc::FunctionView<float(const NetEqStatsType&)> stats_extractor,
-      const std::string& plot_name,
-      Plot* plot) const;
-
   template <typename IterableType>
   void CreateAccumulatedPacketsTimeSeries(Plot* plot,
                                           const IterableType& packets,
diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer_common.h b/rtc_tools/rtc_event_log_visualizer/analyzer_common.h
index 3ac651e..d5776ac 100644
--- a/rtc_tools/rtc_event_log_visualizer/analyzer_common.h
+++ b/rtc_tools/rtc_event_log_visualizer/analyzer_common.h
@@ -14,10 +14,19 @@
 #include <cstdint>
 #include <string>
 
+#include "absl/types/optional.h"
+#include "api/function_view.h"
 #include "logging/rtc_event_log/rtc_event_log_parser.h"
+#include "rtc_tools/rtc_event_log_visualizer/plot_base.h"
 
 namespace webrtc {
 
+constexpr int kNumMicrosecsPerSec = 1000000;
+constexpr float kLeftMargin = 0.01f;
+constexpr float kRightMargin = 0.02f;
+constexpr float kBottomMargin = 0.02f;
+constexpr float kTopMargin = 0.05f;
+
 class AnalyzerConfig {
  public:
   float GetCallTimeSec(int64_t timestamp_us) const {
@@ -74,6 +83,100 @@
                           uint32_t ssrc);
 std::string GetLayerName(LayerDescription layer);
 
+// For each element in data_view, use |f()| to extract a y-coordinate and
+// store the result in a TimeSeries.
+template <typename DataType, typename IterableType>
+void ProcessPoints(rtc::FunctionView<float(const DataType&)> fx,
+                   rtc::FunctionView<absl::optional<float>(const DataType&)> fy,
+                   const IterableType& data_view,
+                   TimeSeries* result) {
+  for (size_t i = 0; i < data_view.size(); i++) {
+    const DataType& elem = data_view[i];
+    float x = fx(elem);
+    absl::optional<float> y = fy(elem);
+    if (y)
+      result->points.emplace_back(x, *y);
+  }
+}
+
+// For each pair of adjacent elements in |data|, use |f()| to extract a
+// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
+// will be the time of the second element in the pair.
+template <typename DataType, typename ResultType, typename IterableType>
+void ProcessPairs(
+    rtc::FunctionView<float(const DataType&)> fx,
+    rtc::FunctionView<absl::optional<ResultType>(const DataType&,
+                                                 const DataType&)> fy,
+    const IterableType& data,
+    TimeSeries* result) {
+  for (size_t i = 1; i < data.size(); i++) {
+    float x = fx(data[i]);
+    absl::optional<ResultType> y = fy(data[i - 1], data[i]);
+    if (y)
+      result->points.emplace_back(x, static_cast<float>(*y));
+  }
+}
+
+// For each pair of adjacent elements in |data|, use |f()| to extract a
+// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate
+// will be the time of the second element in the pair.
+template <typename DataType, typename ResultType, typename IterableType>
+void AccumulatePairs(
+    rtc::FunctionView<float(const DataType&)> fx,
+    rtc::FunctionView<absl::optional<ResultType>(const DataType&,
+                                                 const DataType&)> fy,
+    const IterableType& data,
+    TimeSeries* result) {
+  ResultType sum = 0;
+  for (size_t i = 1; i < data.size(); i++) {
+    float x = fx(data[i]);
+    absl::optional<ResultType> y = fy(data[i - 1], data[i]);
+    if (y) {
+      sum += *y;
+      result->points.emplace_back(x, static_cast<float>(sum));
+    }
+  }
+}
+
+// Calculates a moving average of |data| and stores the result in a TimeSeries.
+// A data point is generated every |step| microseconds from |begin_time|
+// to |end_time|. The value of each data point is the average of the data
+// during the preceding |window_duration_us| microseconds.
+template <typename DataType, typename ResultType, typename IterableType>
+void MovingAverage(
+    rtc::FunctionView<absl::optional<ResultType>(const DataType&)> fy,
+    const IterableType& data_view,
+    AnalyzerConfig config,
+    TimeSeries* result) {
+  size_t window_index_begin = 0;
+  size_t window_index_end = 0;
+  ResultType sum_in_window = 0;
+
+  for (int64_t t = config.begin_time_; t < config.end_time_ + config.step_;
+       t += config.step_) {
+    while (window_index_end < data_view.size() &&
+           data_view[window_index_end].log_time_us() < t) {
+      absl::optional<ResultType> value = fy(data_view[window_index_end]);
+      if (value)
+        sum_in_window += *value;
+      ++window_index_end;
+    }
+    while (window_index_begin < data_view.size() &&
+           data_view[window_index_begin].log_time_us() <
+               t - config.window_duration_) {
+      absl::optional<ResultType> value = fy(data_view[window_index_begin]);
+      if (value)
+        sum_in_window -= *value;
+      ++window_index_begin;
+    }
+    float window_duration_s =
+        static_cast<float>(config.window_duration_) / kNumMicrosecsPerSec;
+    float x = config.GetCallTimeSec(t);
+    float y = sum_in_window / window_duration_s;
+    result->points.emplace_back(x, y);
+  }
+}
+
 }  // namespace webrtc
 
 #endif  // RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZER_COMMON_H_
diff --git a/rtc_tools/rtc_event_log_visualizer/main.cc b/rtc_tools/rtc_event_log_visualizer/main.cc
index 42ee7e1..2aa1653 100644
--- a/rtc_tools/rtc_event_log_visualizer/main.cc
+++ b/rtc_tools/rtc_event_log_visualizer/main.cc
@@ -31,6 +31,7 @@
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 #include "rtc_tools/rtc_event_log_visualizer/alerts.h"
+#include "rtc_tools/rtc_event_log_visualizer/analyze_audio.h"
 #include "rtc_tools/rtc_event_log_visualizer/analyzer.h"
 #include "rtc_tools/rtc_event_log_visualizer/plot_base.h"
 #include "rtc_tools/rtc_event_log_visualizer/plot_protobuf.h"
@@ -436,22 +437,22 @@
   plots.RegisterPlot("pacer_delay",
                      [&](Plot* plot) { analyzer.CreatePacerDelayGraph(plot); });
   plots.RegisterPlot("audio_encoder_bitrate", [&](Plot* plot) {
-    analyzer.CreateAudioEncoderTargetBitrateGraph(plot);
+    CreateAudioEncoderTargetBitrateGraph(parsed_log, config, plot);
   });
   plots.RegisterPlot("audio_encoder_frame_length", [&](Plot* plot) {
-    analyzer.CreateAudioEncoderFrameLengthGraph(plot);
+    CreateAudioEncoderFrameLengthGraph(parsed_log, config, plot);
   });
   plots.RegisterPlot("audio_encoder_packet_loss", [&](Plot* plot) {
-    analyzer.CreateAudioEncoderPacketLossGraph(plot);
+    CreateAudioEncoderPacketLossGraph(parsed_log, config, plot);
   });
   plots.RegisterPlot("audio_encoder_fec", [&](Plot* plot) {
-    analyzer.CreateAudioEncoderEnableFecGraph(plot);
+    CreateAudioEncoderEnableFecGraph(parsed_log, config, plot);
   });
   plots.RegisterPlot("audio_encoder_dtx", [&](Plot* plot) {
-    analyzer.CreateAudioEncoderEnableDtxGraph(plot);
+    CreateAudioEncoderEnableDtxGraph(parsed_log, config, plot);
   });
   plots.RegisterPlot("audio_encoder_num_channels", [&](Plot* plot) {
-    analyzer.CreateAudioEncoderNumChannelsGraph(plot);
+    CreateAudioEncoderNumChannelsGraph(parsed_log, config, plot);
   });
 
   plots.RegisterPlot("ice_candidate_pair_config", [&](Plot* plot) {
@@ -474,14 +475,14 @@
     wav_path = webrtc::test::ResourcePath(
         "audio_processing/conversational_speech/EN_script2_F_sp2_B1", "wav");
   }
-  absl::optional<webrtc::EventLogAnalyzer::NetEqStatsGetterMap> neteq_stats;
+  absl::optional<webrtc::NetEqStatsGetterMap> neteq_stats;
 
   plots.RegisterPlot("simulated_neteq_expand_rate", [&](Plot* plot) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    analyzer.CreateNetEqNetworkStatsGraph(
-        *neteq_stats,
+    webrtc::CreateNetEqNetworkStatsGraph(
+        parsed_log, config, *neteq_stats,
         [](const webrtc::NetEqNetworkStatistics& stats) {
           return stats.expand_rate / 16384.f;
         },
@@ -490,10 +491,10 @@
 
   plots.RegisterPlot("simulated_neteq_speech_expand_rate", [&](Plot* plot) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    analyzer.CreateNetEqNetworkStatsGraph(
-        *neteq_stats,
+    webrtc::CreateNetEqNetworkStatsGraph(
+        parsed_log, config, *neteq_stats,
         [](const webrtc::NetEqNetworkStatistics& stats) {
           return stats.speech_expand_rate / 16384.f;
         },
@@ -502,10 +503,10 @@
 
   plots.RegisterPlot("simulated_neteq_accelerate_rate", [&](Plot* plot) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    analyzer.CreateNetEqNetworkStatsGraph(
-        *neteq_stats,
+    webrtc::CreateNetEqNetworkStatsGraph(
+        parsed_log, config, *neteq_stats,
         [](const webrtc::NetEqNetworkStatistics& stats) {
           return stats.accelerate_rate / 16384.f;
         },
@@ -514,10 +515,10 @@
 
   plots.RegisterPlot("simulated_neteq_preemptive_rate", [&](Plot* plot) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    analyzer.CreateNetEqNetworkStatsGraph(
-        *neteq_stats,
+    webrtc::CreateNetEqNetworkStatsGraph(
+        parsed_log, config, *neteq_stats,
         [](const webrtc::NetEqNetworkStatistics& stats) {
           return stats.preemptive_rate / 16384.f;
         },
@@ -526,10 +527,10 @@
 
   plots.RegisterPlot("simulated_neteq_packet_loss_rate", [&](Plot* plot) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    analyzer.CreateNetEqNetworkStatsGraph(
-        *neteq_stats,
+    webrtc::CreateNetEqNetworkStatsGraph(
+        parsed_log, config, *neteq_stats,
         [](const webrtc::NetEqNetworkStatistics& stats) {
           return stats.packet_loss_rate / 16384.f;
         },
@@ -538,10 +539,10 @@
 
   plots.RegisterPlot("simulated_neteq_concealment_events", [&](Plot* plot) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    analyzer.CreateNetEqLifetimeStatsGraph(
-        *neteq_stats,
+    webrtc::CreateNetEqLifetimeStatsGraph(
+        parsed_log, config, *neteq_stats,
         [](const webrtc::NetEqLifetimeStatistics& stats) {
           return static_cast<float>(stats.concealment_events);
         },
@@ -550,10 +551,10 @@
 
   plots.RegisterPlot("simulated_neteq_preferred_buffer_size", [&](Plot* plot) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    analyzer.CreateNetEqNetworkStatsGraph(
-        *neteq_stats,
+    webrtc::CreateNetEqNetworkStatsGraph(
+        parsed_log, config, *neteq_stats,
         [](const webrtc::NetEqNetworkStatistics& stats) {
           return stats.preferred_buffer_size_ms;
         },
@@ -614,13 +615,13 @@
   if (absl::c_find(plot_flags, "simulated_neteq_jitter_buffer_delay") !=
       plot_flags.end()) {
     if (!neteq_stats) {
-      neteq_stats = analyzer.SimulateNetEq(wav_path, 48000);
+      neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000);
     }
-    for (webrtc::EventLogAnalyzer::NetEqStatsGetterMap::const_iterator it =
-             neteq_stats->cbegin();
+    for (webrtc::NetEqStatsGetterMap::const_iterator it = neteq_stats->cbegin();
          it != neteq_stats->cend(); ++it) {
-      analyzer.CreateAudioJitterBufferGraph(it->first, it->second.get(),
-                                            collection->AppendNewPlot());
+      webrtc::CreateAudioJitterBufferGraph(parsed_log, config, it->first,
+                                           it->second.get(),
+                                           collection->AppendNewPlot());
     }
   }