Switch to new FFmpeg API.

Deprecated avcodec_decode_video2 is replaced with
avcodec_send_packet and avcodec_receive_frame.
https://www.ffmpeg.org/doxygen/3.1/group__lavc__encdec.html

Setting av_context_->refcounted_frames=1 is removed. This
is automatically enabled if avcodec_receive_frame us used.

Bug: webrtc:8493
Change-Id: Id1d2b1315717f4553fb7fc182197f9429bd31daf
Reviewed-on: https://webrtc-review.googlesource.com/18462
Commit-Queue: Sergey Silkin <ssilkin@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20601}
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index 4506d8a..f6355c5 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -239,8 +239,6 @@
   // |get_buffer2| is called with the context, there |opaque| can be used to get
   // a pointer |this|.
   av_context_->opaque = this;
-  // Use ref counted frames (av_frame_unref).
-  av_context_->refcounted_frames = 1;  // true
 
   AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
   if (!codec) {
@@ -320,32 +318,27 @@
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
   packet.size = static_cast<int>(input_image._length);
-  av_context_->reordered_opaque = input_image.ntp_time_ms_ * 1000;  // ms -> μs
+  int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000;  // ms -> μs
+  av_context_->reordered_opaque = frame_timestamp_us;
 
-  int frame_decoded = 0;
-  int result = avcodec_decode_video2(av_context_.get(),
-                                     av_frame_.get(),
-                                     &frame_decoded,
-                                     &packet);
+  int result = avcodec_send_packet(av_context_.get(), &packet);
   if (result < 0) {
-    LOG(LS_ERROR) << "avcodec_decode_video2 error: " << result;
-    ReportError();
-    return WEBRTC_VIDEO_CODEC_ERROR;
-  }
-  // |result| is number of bytes used, which should be all of them.
-  if (result != packet.size) {
-    LOG(LS_ERROR) << "avcodec_decode_video2 consumed " << result << " bytes "
-        "when " << packet.size << " bytes were expected.";
+    LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
     ReportError();
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
 
-  if (!frame_decoded) {
-    LOG(LS_WARNING) << "avcodec_decode_video2 successful but no frame was "
-        "decoded.";
-    return WEBRTC_VIDEO_CODEC_OK;
+  result = avcodec_receive_frame(av_context_.get(), av_frame_.get());
+  if (result < 0) {
+    LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
+    ReportError();
+    return WEBRTC_VIDEO_CODEC_ERROR;
   }
 
+  // We don't expect reordering. Decoded frame tamestamp should match
+  // the input one.
+  RTC_DCHECK_EQ(av_frame_->reordered_opaque, frame_timestamp_us);
+
   // Obtain the |video_frame| containing the decoded image.
   VideoFrame* video_frame = static_cast<VideoFrame*>(
       av_buffer_get_opaque(av_frame_->buf[0]));