Add accessor methods for RTP timestamp of EncodedImage.
Intention is to make the member private, but downstream callers
must be updated to use the accessor methods first.
Bug: webrtc:9378
Change-Id: I3495bd8d545b7234fbea10abfd14f082caa420b6
Reviewed-on: https://webrtc-review.googlesource.com/82160
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Sebastian Jansson <srte@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24352}
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index ae3de06..73d4583 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -304,7 +304,7 @@
VideoFrame::Builder()
.set_video_frame_buffer(input_frame->video_frame_buffer())
.set_timestamp_us(input_frame->timestamp_us())
- .set_timestamp_rtp(input_image._timeStamp)
+ .set_timestamp_rtp(input_image.Timestamp())
.set_rotation(input_frame->rotation())
.set_color_space(color_space)
.build();
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 0b84e3a..4fc9b4f 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -496,7 +496,7 @@
encoded_images_[i]._encodedWidth = configurations_[i].width;
encoded_images_[i]._encodedHeight = configurations_[i].height;
- encoded_images_[i]._timeStamp = input_frame.timestamp();
+ encoded_images_[i].SetTimestamp(input_frame.timestamp());
encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
encoded_images_[i].rotation_ = input_frame.rotation();
diff --git a/modules/video_coding/codecs/i420/i420.cc b/modules/video_coding/codecs/i420/i420.cc
index 565a39e..7c498b1 100644
--- a/modules/video_coding/codecs/i420/i420.cc
+++ b/modules/video_coding/codecs/i420/i420.cc
@@ -84,7 +84,7 @@
}
_encodedImage._frameType = kVideoFrameKey;
- _encodedImage._timeStamp = inputImage.timestamp();
+ _encodedImage.SetTimestamp(inputImage.timestamp());
_encodedImage._encodedHeight = inputImage.height();
_encodedImage._encodedWidth = inputImage.width();
@@ -200,7 +200,7 @@
return WEBRTC_VIDEO_CODEC_MEMORY;
}
- VideoFrame decoded_image(frame_buffer, inputImage._timeStamp, 0,
+ VideoFrame decoded_image(frame_buffer, inputImage.Timestamp(), 0,
webrtc::kVideoRotation_0);
_decodeCompleteCallback->Decoded(decoded_image);
return WEBRTC_VIDEO_CODEC_OK;
diff --git a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
index e0d0618..1a775af 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
@@ -139,10 +139,10 @@
}
if (image.component_count == 1) {
- RTC_DCHECK(decoded_data_.find(input_image._timeStamp) ==
+ RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) ==
decoded_data_.end());
decoded_data_.emplace(std::piecewise_construct,
- std::forward_as_tuple(input_image._timeStamp),
+ std::forward_as_tuple(input_image.Timestamp()),
std::forward_as_tuple(kAXXStream));
}
int32_t rv = 0;
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
index 63cd6da..fd316cf 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
@@ -258,7 +258,7 @@
image_component.codec_type = frame_headers[i].codec_type;
EncodedImage encoded_image = combined_image;
- encoded_image._timeStamp = combined_image._timeStamp;
+ encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type;
encoded_image._size =
static_cast<size_t>(frame_headers[i].bitstream_length);
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
index 6f921e3..4733b3a 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -258,7 +258,8 @@
encodedImage._length);
rtc::CritScope cs(&crit_);
- const auto& stashed_image_itr = stashed_images_.find(encodedImage._timeStamp);
+ const auto& stashed_image_itr =
+ stashed_images_.find(encodedImage.Timestamp());
const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
RTC_DCHECK(stashed_image_itr != stashed_images_.end());
MultiplexImage& stashed_image = stashed_image_itr->second;
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index 5a1269c..a60ddf4 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -352,7 +352,7 @@
GetLayerIndices(codec_specific, &spatial_idx, &temporal_idx);
FrameStatistics* frame_stat =
- stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
const size_t frame_number = frame_stat->frame_number;
// Ensure that the encode order is monotonically increasing, within this
@@ -428,7 +428,7 @@
if (!layer_dropped) {
base_image = &merged_encoded_frames_[i];
base_stat =
- stats_->GetFrameWithTimestamp(encoded_image._timeStamp, i);
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), i);
} else if (base_image && !base_stat->non_ref_for_inter_layer_pred) {
DecodeFrame(*base_image, i);
}
@@ -526,7 +526,7 @@
size_t spatial_idx) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
FrameStatistics* frame_stat =
- stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
+ stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
frame_stat->decode_start_ns = rtc::TimeNanos();
frame_stat->decode_return_code =
@@ -551,7 +551,7 @@
for (int base_idx = static_cast<int>(spatial_idx) - 1; base_idx >= 0;
--base_idx) {
EncodedImage lower_layer = merged_encoded_frames_.at(base_idx);
- if (lower_layer._timeStamp == encoded_image._timeStamp) {
+ if (lower_layer.Timestamp() == encoded_image.Timestamp()) {
base_image = lower_layer;
break;
}
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
index 49959ef..298a593 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -254,7 +254,7 @@
vpx_codec_err_t vpx_ret =
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
- ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
+ ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
if (ret != 0) {
// Reset to avoid requesting key frames too often.
if (ret < 0 && propagation_cnt_ > 0)
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 95abc5c..3826e14 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -881,7 +881,7 @@
break;
}
}
- encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
+ encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
encoded_images_[encoder_idx].capture_time_ms_ =
input_image.render_time_ms();
encoded_images_[encoder_idx].rotation_ = input_image.rotation();
diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 116d06c..be92b34 100644
--- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -136,7 +136,7 @@
CodecSpecificInfo codec_specific_info;
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
- EXPECT_EQ(kInitialTimestampRtp, encoded_frame._timeStamp);
+ EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_);
EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index bdae580..e6df457 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -1009,7 +1009,7 @@
}
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
- encoded_image_._timeStamp = input_image_->timestamp();
+ encoded_image_.SetTimestamp(input_image_->timestamp());
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_.rotation_ = input_image_->rotation();
encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
@@ -1046,9 +1046,9 @@
if (end_of_picture) {
const uint32_t timestamp_ms =
- 1000 * encoded_image_._timeStamp / kVideoPayloadTypeFrequency;
+ 1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
output_framerate_.Update(1, timestamp_ms);
- last_encoded_frame_rtp_timestamp_ = encoded_image_._timeStamp;
+ last_encoded_frame_rtp_timestamp_ = encoded_image_.Timestamp();
}
}
}
@@ -1190,7 +1190,7 @@
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
int ret =
- ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
+ ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
if (ret != 0) {
return ret;
}
diff --git a/modules/video_coding/decoding_state.cc b/modules/video_coding/decoding_state.cc
index 23bf668..1d54063 100644
--- a/modules/video_coding/decoding_state.cc
+++ b/modules/video_coding/decoding_state.cc
@@ -58,7 +58,7 @@
assert(frame != NULL);
if (in_initial_state_)
return false;
- return !IsNewerTimestamp(frame->TimeStamp(), time_stamp_);
+ return !IsNewerTimestamp(frame->Timestamp(), time_stamp_);
}
bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
@@ -73,7 +73,7 @@
if (!UsingFlexibleMode(frame))
UpdateSyncState(frame);
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
- time_stamp_ = frame->TimeStamp();
+ time_stamp_ = frame->Timestamp();
picture_id_ = frame->PictureId();
temporal_id_ = frame->TemporalId();
tl0_pic_id_ = frame->Tl0PicId();
@@ -143,7 +143,7 @@
// Continuous empty packets or continuous frames can be dropped if we
// advance the sequence number.
sequence_num_ = frame->GetHighSeqNum();
- time_stamp_ = frame->TimeStamp();
+ time_stamp_ = frame->Timestamp();
return true;
}
return false;
diff --git a/modules/video_coding/encoded_frame.cc b/modules/video_coding/encoded_frame.cc
index 91ee871..c53a737 100644
--- a/modules/video_coding/encoded_frame.cc
+++ b/modules/video_coding/encoded_frame.cc
@@ -35,8 +35,8 @@
}
void VCMEncodedFrame::Reset() {
+ SetTimestamp(0);
_renderTimeMs = -1;
- _timeStamp = 0;
_payloadType = 0;
_frameType = kVideoFrameDelta;
_encodedWidth = 0;
diff --git a/modules/video_coding/encoded_frame.h b/modules/video_coding/encoded_frame.h
index 252dea4..a08eb07 100644
--- a/modules/video_coding/encoded_frame.h
+++ b/modules/video_coding/encoded_frame.h
@@ -64,10 +64,12 @@
* Get frame length
*/
size_t Length() const { return _length; }
+
/**
- * Get frame timestamp (90kHz)
+ * Frame RTP timestamp (90kHz)
*/
- uint32_t TimeStamp() const { return _timeStamp; }
+ using EncodedImage::Timestamp;
+ using EncodedImage::SetTimestamp;
/**
* Get render time in milliseconds
*/
diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc
index b9241b9..9c2819e 100644
--- a/modules/video_coding/frame_buffer.cc
+++ b/modules/video_coding/frame_buffer.cc
@@ -87,7 +87,7 @@
if (kStateEmpty == _state) {
// First packet (empty and/or media) inserted into this frame.
// store some info and set some initial values.
- _timeStamp = packet.timestamp;
+ SetTimestamp(packet.timestamp);
// We only take the ntp timestamp of the first packet of a frame.
ntp_time_ms_ = packet.ntp_time_ms_;
_codec = packet.codec;
@@ -213,7 +213,6 @@
void VCMFrameBuffer::Reset() {
TRACE_EVENT0("webrtc", "VCMFrameBuffer::Reset");
_length = 0;
- _timeStamp = 0;
_sessionInfo.Reset();
_payloadType = 0;
_nackCount = 0;
diff --git a/modules/video_coding/frame_buffer2.cc b/modules/video_coding/frame_buffer2.cc
index f13ed35..8bb241a 100644
--- a/modules/video_coding/frame_buffer2.cc
+++ b/modules/video_coding/frame_buffer2.cc
@@ -117,7 +117,8 @@
next_frame_it_ = frame_it;
if (frame->RenderTime() == -1)
- frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
+ frame->SetRenderTime(
+ timing_->RenderTimeMs(frame->Timestamp(), now_ms));
wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms);
// This will cause the frame buffer to prefer high framerate rather
@@ -146,7 +147,7 @@
if (!frame->delayed_by_retransmission()) {
int64_t frame_delay;
- if (inter_frame_delay_.CalculateDelay(frame->timestamp, &frame_delay,
+ if (inter_frame_delay_.CalculateDelay(frame->Timestamp(), &frame_delay,
frame->ReceivedTime())) {
jitter_estimator_->UpdateEstimate(frame_delay, frame->size());
}
@@ -163,7 +164,7 @@
if (HasBadRenderTiming(*frame, now_ms)) {
jitter_estimator_->Reset();
timing_->Reset();
- frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
+ frame->SetRenderTime(timing_->RenderTimeMs(frame->Timestamp(), now_ms));
}
UpdateJitterDelay();
@@ -177,17 +178,17 @@
const VideoLayerFrameId& frame_key = next_frame_it_->first;
const bool frame_is_higher_spatial_layer_of_last_decoded_frame =
- last_decoded_frame_timestamp_ == frame->timestamp &&
+ last_decoded_frame_timestamp_ == frame->Timestamp() &&
last_decoded_frame_key.picture_id == frame_key.picture_id &&
last_decoded_frame_key.spatial_layer < frame_key.spatial_layer;
- if (AheadOrAt(last_decoded_frame_timestamp_, frame->timestamp) &&
+ if (AheadOrAt(last_decoded_frame_timestamp_, frame->Timestamp()) &&
!frame_is_higher_spatial_layer_of_last_decoded_frame) {
// TODO(brandtr): Consider clearing the entire buffer when we hit
// these conditions.
RTC_LOG(LS_WARNING)
<< "Frame with (timestamp:picture_id:spatial_id) ("
- << frame->timestamp << ":" << frame->id.picture_id << ":"
+ << frame->Timestamp() << ":" << frame->id.picture_id << ":"
<< static_cast<int>(frame->id.spatial_layer) << ")"
<< " sent to decoder after frame with"
<< " (timestamp:picture_id:spatial_id) ("
@@ -198,7 +199,7 @@
}
AdvanceLastDecodedFrame(next_frame_it_);
- last_decoded_frame_timestamp_ = frame->timestamp;
+ last_decoded_frame_timestamp_ = frame->Timestamp();
*frame_out = std::move(frame);
return kFrameFound;
}
@@ -297,7 +298,7 @@
timing_->set_max_playout_delay(playout_delay.max_ms);
if (!frame.delayed_by_retransmission())
- timing_->IncomingTimestamp(frame.timestamp, frame.ReceivedTime());
+ timing_->IncomingTimestamp(frame.Timestamp(), frame.ReceivedTime());
}
int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
@@ -343,7 +344,7 @@
if (last_decoded_frame_it_ != frames_.end() &&
id <= last_decoded_frame_it_->first) {
- if (AheadOf(frame->timestamp, last_decoded_frame_timestamp_) &&
+ if (AheadOf(frame->Timestamp(), last_decoded_frame_timestamp_) &&
frame->is_keyframe()) {
// If this frame has a newer timestamp but an earlier picture id then we
// assume there has been a jump in the picture id due to some encoder
diff --git a/modules/video_coding/frame_buffer2_unittest.cc b/modules/video_coding/frame_buffer2_unittest.cc
index 1378be5..357ba86 100644
--- a/modules/video_coding/frame_buffer2_unittest.cc
+++ b/modules/video_coding/frame_buffer2_unittest.cc
@@ -90,8 +90,6 @@
public:
bool GetBitstream(uint8_t* destination) const override { return true; }
- uint32_t Timestamp() const override { return timestamp; }
-
int64_t ReceivedTime() const override { return 0; }
int64_t RenderTime() const override { return _renderTimeMs; }
@@ -165,7 +163,7 @@
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
frame->id.picture_id = picture_id;
frame->id.spatial_layer = spatial_layer;
- frame->timestamp = ts_ms * 90;
+ frame->SetTimestamp(ts_ms * 90);
frame->num_references = references.size();
frame->inter_layer_predicted = inter_layer_predicted;
for (size_t r = 0; r < references.size(); ++r)
@@ -520,7 +518,7 @@
frame->SetSize(kFrameSize);
frame->id.picture_id = pid;
frame->id.spatial_layer = 0;
- frame->timestamp = ts;
+ frame->SetTimestamp(ts);
frame->num_references = 0;
frame->inter_layer_predicted = false;
diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc
index b990cdd..57c60ca 100644
--- a/modules/video_coding/frame_object.cc
+++ b/modules/video_coding/frame_object.cc
@@ -26,7 +26,6 @@
: packet_buffer_(packet_buffer),
first_seq_num_(first_seq_num),
last_seq_num_(last_seq_num),
- timestamp_(0),
received_time_(received_time),
times_nacked_(times_nacked) {
VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
@@ -41,7 +40,7 @@
CopyCodecSpecific(&first_packet->video_header);
_completeFrame = true;
_payloadType = first_packet->payloadType;
- _timeStamp = first_packet->timestamp;
+ SetTimestamp(first_packet->timestamp);
ntp_time_ms_ = first_packet->ntp_time_ms_;
_frameType = first_packet->frameType;
@@ -69,7 +68,7 @@
_encodedHeight = first_packet->height;
// EncodedFrame members
- timestamp = first_packet->timestamp;
+ SetTimestamp(first_packet->timestamp);
VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
RTC_CHECK(last_packet);
@@ -140,10 +139,6 @@
return packet_buffer_->GetBitstream(*this, destination);
}
-uint32_t RtpFrameObject::Timestamp() const {
- return timestamp_;
-}
-
int64_t RtpFrameObject::ReceivedTime() const {
return received_time_;
}
diff --git a/modules/video_coding/frame_object.h b/modules/video_coding/frame_object.h
index 8980984..6c6480a 100644
--- a/modules/video_coding/frame_object.h
+++ b/modules/video_coding/frame_object.h
@@ -37,7 +37,6 @@
enum FrameType frame_type() const;
VideoCodecType codec_type() const;
bool GetBitstream(uint8_t* destination) const override;
- uint32_t Timestamp() const override;
int64_t ReceivedTime() const override;
int64_t RenderTime() const override;
bool delayed_by_retransmission() const override;
@@ -49,7 +48,6 @@
VideoCodecType codec_type_;
uint16_t first_seq_num_;
uint16_t last_seq_num_;
- uint32_t timestamp_;
int64_t received_time_;
// Equal to times nacked of the packet with the highet times nacked
diff --git a/modules/video_coding/generic_decoder.cc b/modules/video_coding/generic_decoder.cc
index eb16400..92e53da 100644
--- a/modules/video_coding/generic_decoder.cc
+++ b/modules/video_coding/generic_decoder.cc
@@ -211,7 +211,7 @@
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
- frame.EncodedImage()._timeStamp);
+ frame.Timestamp());
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
@@ -225,7 +225,7 @@
} else {
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
}
- _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
+ _callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
@@ -234,13 +234,13 @@
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
if (ret < WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
- << frame.TimeStamp() << ", error code: " << ret;
- _callback->Pop(frame.TimeStamp());
+ << frame.Timestamp() << ", error code: " << ret;
+ _callback->Pop(frame.Timestamp());
return ret;
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
// No output
- _callback->Pop(frame.TimeStamp());
+ _callback->Pop(frame.Timestamp());
}
return ret;
}
diff --git a/modules/video_coding/generic_encoder.cc b/modules/video_coding/generic_encoder.cc
index 5cea0a5..a8999fc 100644
--- a/modules/video_coding/generic_encoder.cc
+++ b/modules/video_coding/generic_encoder.cc
@@ -265,13 +265,14 @@
// Because some hardware encoders don't preserve capture timestamp we
// use RTP timestamps here.
while (!encode_start_list->empty() &&
- IsNewerTimestamp(encoded_image->_timeStamp,
+ IsNewerTimestamp(encoded_image->Timestamp(),
encode_start_list->front().rtp_timestamp)) {
post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
encode_start_list->pop_front();
}
if (encode_start_list->size() > 0 &&
- encode_start_list->front().rtp_timestamp == encoded_image->_timeStamp) {
+ encode_start_list->front().rtp_timestamp ==
+ encoded_image->Timestamp()) {
result.emplace(encode_start_list->front().encode_start_time_ms);
if (encoded_image->capture_time_ms_ !=
encode_start_list->front().capture_time_ms) {
@@ -365,8 +366,8 @@
int64_t clock_offset_ms = now_ms - encoded_image->timing_.encode_finish_ms;
// Translate capture timestamp to local WebRTC clock.
encoded_image->capture_time_ms_ += clock_offset_ms;
- encoded_image->_timeStamp =
- static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90);
+ encoded_image->SetTimestamp(
+ static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90));
encode_start_ms.emplace(encoded_image->timing_.encode_start_ms +
clock_offset_ms);
}
@@ -389,7 +390,7 @@
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
- "timestamp", encoded_image._timeStamp);
+ "timestamp", encoded_image.Timestamp());
size_t simulcast_svc_idx = 0;
if (codec_specific->codecType == kVideoCodecVP9) {
if (codec_specific->codecSpecific.VP9.num_spatial_layers > 1)
diff --git a/modules/video_coding/generic_encoder_unittest.cc b/modules/video_coding/generic_encoder_unittest.cc
index 6037381..c889769 100644
--- a/modules/video_coding/generic_encoder_unittest.cc
+++ b/modules/video_coding/generic_encoder_unittest.cc
@@ -94,7 +94,7 @@
CodecSpecificInfo codec_specific;
image._length = FrameSize(min_frame_size, max_frame_size, s, i);
image.capture_time_ms_ = current_timestamp;
- image._timeStamp = static_cast<uint32_t>(current_timestamp * 90);
+ image.SetTimestamp(static_cast<uint32_t>(current_timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
codec_specific.codecSpecific.generic.simulcast_idx = s;
callback.OnEncodeStarted(static_cast<uint32_t>(current_timestamp * 90),
@@ -187,7 +187,7 @@
int64_t timestamp = 1;
image._length = 500;
image.capture_time_ms_ = timestamp;
- image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+ image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
codec_specific.codecSpecific.generic.simulcast_idx = 0;
FakeEncodedImageCallback sink;
@@ -204,7 +204,7 @@
// New frame, now skip OnEncodeStarted. Should not result in timing frame.
image.capture_time_ms_ = ++timestamp;
- image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+ image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_FALSE(sink.WasTimingFrame());
}
@@ -219,7 +219,7 @@
int64_t timestamp = 1;
image._length = 500;
image.capture_time_ms_ = timestamp;
- image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+ image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
codec_specific.codecSpecific.generic.simulcast_idx = 0;
FakeEncodedImageCallback sink;
@@ -237,7 +237,7 @@
// New frame, but this time with encode timestamps set in timing_.
// This should be a timing frame.
image.capture_time_ms_ = ++timestamp;
- image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+ image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
image.timing_.encode_start_ms = timestamp + kEncodeStartDelayMs;
image.timing_.encode_finish_ms = timestamp + kEncodeFinishDelayMs;
callback.OnEncodedImage(image, &codec_specific, nullptr);
@@ -263,27 +263,27 @@
// Any non-zero bitrate needed to be set before the first frame.
callback.OnTargetBitrateChanged(500, 0);
image.capture_time_ms_ = kTimestampMs1;
- image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
- callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
EXPECT_EQ(0u, sink.GetNumFramesDropped());
callback.OnEncodedImage(image, &codec_specific, nullptr);
image.capture_time_ms_ = kTimestampMs2;
- image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
- callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
// No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
// that frame with timestamp2 was dropped.
EXPECT_EQ(0u, sink.GetNumFramesDropped());
image.capture_time_ms_ = kTimestampMs3;
- image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
- callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
image.capture_time_ms_ = kTimestampMs4;
- image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
- callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
}
@@ -299,8 +299,8 @@
// Any non-zero bitrate needed to be set before the first frame.
callback.OnTargetBitrateChanged(500, 0);
image.capture_time_ms_ = kTimestampMs; // Incorrect timesetamp.
- image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
- callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+ image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+ callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
image.capture_time_ms_ = 0; // Incorrect timesetamp.
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(kTimestampMs, sink.GetLastCaptureTimestamp());
diff --git a/modules/video_coding/jitter_buffer.cc b/modules/video_coding/jitter_buffer.cc
index 03a9845..4a0622a 100644
--- a/modules/video_coding/jitter_buffer.cc
+++ b/modules/video_coding/jitter_buffer.cc
@@ -54,7 +54,7 @@
}
void FrameList::InsertFrame(VCMFrameBuffer* frame) {
- insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
+ insert(rbegin().base(), FrameListPair(frame->Timestamp(), frame));
}
VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
@@ -110,7 +110,7 @@
}
free_frames->push_back(oldest_frame);
TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
- oldest_frame->TimeStamp());
+ oldest_frame->Timestamp());
erase(begin());
}
}
@@ -212,7 +212,7 @@
continue;
}
SsMap::iterator ss_it;
- if (Find(frame_it.second->TimeStamp(), &ss_it)) {
+ if (Find(frame_it.second->Timestamp(), &ss_it)) {
if (gof_idx >= ss_it->second.num_frames_in_gof) {
continue; // Assume corresponding SS not yet received.
}
@@ -528,7 +528,7 @@
}
}
- *timestamp = oldest_frame->TimeStamp();
+ *timestamp = oldest_frame->Timestamp();
return true;
}
@@ -564,7 +564,7 @@
// Wait for this one to get complete.
waiting_for_completion_.frame_size = frame->Length();
waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
- waiting_for_completion_.timestamp = frame->TimeStamp();
+ waiting_for_completion_.timestamp = frame->Timestamp();
}
}
@@ -715,8 +715,8 @@
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
if (previous_state != kStateComplete) {
- TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
- frame->TimeStamp());
+ TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->Timestamp(), "timestamp",
+ frame->Timestamp());
}
if (buffer_state > 0) {
@@ -831,7 +831,7 @@
for (FrameList::const_iterator it = decodable_frames_.begin();
it != decodable_frames_.end(); ++it) {
VCMFrameBuffer* decodable_frame = it->second;
- if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
+ if (IsNewerTimestamp(decodable_frame->Timestamp(), frame.Timestamp())) {
break;
}
decoding_state.SetState(decodable_frame);
@@ -865,7 +865,7 @@
it != incomplete_frames_.end();) {
VCMFrameBuffer* frame = it->second;
if (IsNewerTimestamp(original_decoded_state.time_stamp(),
- frame->TimeStamp())) {
+ frame->Timestamp())) {
++it;
continue;
}
@@ -947,11 +947,11 @@
if (incomplete_frames_.empty()) {
return 0;
}
- uint32_t start_timestamp = incomplete_frames_.Front()->TimeStamp();
+ uint32_t start_timestamp = incomplete_frames_.Front()->Timestamp();
if (!decodable_frames_.empty()) {
- start_timestamp = decodable_frames_.Back()->TimeStamp();
+ start_timestamp = decodable_frames_.Back()->Timestamp();
}
- return incomplete_frames_.Back()->TimeStamp() - start_timestamp;
+ return incomplete_frames_.Back()->Timestamp() - start_timestamp;
}
uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
@@ -1184,10 +1184,10 @@
incoming_frame_count_++;
if (frame.FrameType() == kVideoFrameKey) {
- TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
"KeyComplete");
} else {
- TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
"DeltaComplete");
}
@@ -1263,7 +1263,7 @@
}
// No retransmitted frames should be a part of the jitter
// estimate.
- UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(),
+ UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.Timestamp(),
frame.Length(), incomplete_frame);
}
diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc
index d1744d5..e38a7eb 100644
--- a/modules/video_coding/jitter_buffer_unittest.cc
+++ b/modules/video_coding/jitter_buffer_unittest.cc
@@ -259,7 +259,7 @@
VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
if (!found_frame)
return nullptr;
- return jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
+ return jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
}
VCMEncodedFrame* DecodeIncompleteFrame() {
@@ -414,7 +414,7 @@
return false;
VCMEncodedFrame* frame =
- jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
+ jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
bool ret = (frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
return ret;
@@ -964,12 +964,12 @@
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(1000U, frame_out->TimeStamp());
+ EXPECT_EQ(1000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
- EXPECT_EQ(13000U, frame_out->TimeStamp());
+ EXPECT_EQ(13000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
}
@@ -1029,7 +1029,7 @@
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(3000U, frame_out->TimeStamp());
+ EXPECT_EQ(3000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_FALSE(
@@ -1037,14 +1037,14 @@
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
- EXPECT_EQ(6000U, frame_out->TimeStamp());
+ EXPECT_EQ(6000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
- EXPECT_EQ(9000U, frame_out->TimeStamp());
+ EXPECT_EQ(9000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@@ -1123,7 +1123,7 @@
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(3000U, frame_out->TimeStamp());
+ EXPECT_EQ(3000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_FALSE(
@@ -1131,7 +1131,7 @@
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
- EXPECT_EQ(6000U, frame_out->TimeStamp());
+ EXPECT_EQ(6000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@@ -1481,8 +1481,8 @@
uint32_t next_timestamp;
VCMEncodedFrame* frame = jitter_buffer_->NextCompleteFrame(0);
EXPECT_NE(frame, nullptr);
- EXPECT_EQ(packet_->timestamp, frame->TimeStamp());
- frame = jitter_buffer_->ExtractAndSetDecode(frame->TimeStamp());
+ EXPECT_EQ(packet_->timestamp, frame->Timestamp());
+ frame = jitter_buffer_->ExtractAndSetDecode(frame->Timestamp());
EXPECT_TRUE(frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
@@ -1728,7 +1728,7 @@
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(3000u, frame_out->TimeStamp());
+ EXPECT_EQ(3000u, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
@@ -1763,7 +1763,7 @@
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(timestamp_, frame_out->TimeStamp());
+ EXPECT_EQ(timestamp_, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
@@ -1873,13 +1873,13 @@
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
+ EXPECT_EQ(0xffffff00, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
- EXPECT_EQ(2700u, frame_out2->TimeStamp());
+ EXPECT_EQ(2700u, frame_out2->Timestamp());
CheckOutFrame(frame_out2, size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
jitter_buffer_->ReleaseFrame(frame_out2);
@@ -1916,13 +1916,13 @@
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
+ EXPECT_EQ(0xffffff00, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
- EXPECT_EQ(2700u, frame_out2->TimeStamp());
+ EXPECT_EQ(2700u, frame_out2->Timestamp());
CheckOutFrame(frame_out2, size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
jitter_buffer_->ReleaseFrame(frame_out2);
@@ -2017,7 +2017,7 @@
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
- EXPECT_EQ(first_key_frame_timestamp, frame_out->TimeStamp());
+ EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
@@ -2043,7 +2043,7 @@
VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
// Timestamp should never be the last TS inserted.
if (testFrame != NULL) {
- EXPECT_TRUE(testFrame->TimeStamp() < timestamp_);
+ EXPECT_TRUE(testFrame->Timestamp() < timestamp_);
jitter_buffer_->ReleaseFrame(testFrame);
}
}
diff --git a/modules/video_coding/receiver.cc b/modules/video_coding/receiver.cc
index 4c56d12..7f22b73 100644
--- a/modules/video_coding/receiver.cc
+++ b/modules/video_coding/receiver.cc
@@ -140,7 +140,7 @@
jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
if (found_frame) {
- frame_timestamp = found_frame->TimeStamp();
+ frame_timestamp = found_frame->Timestamp();
min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
} else {
@@ -212,7 +212,7 @@
return NULL;
}
frame->SetRenderTime(render_time_ms);
- TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS",
"render_time", frame->RenderTimeMs());
if (!frame->Complete()) {
// Update stats for incomplete frames.
diff --git a/modules/video_coding/utility/ivf_file_writer.cc b/modules/video_coding/utility/ivf_file_writer.cc
index 454aeb0..fcf2dda 100644
--- a/modules/video_coding/utility/ivf_file_writer.cc
+++ b/modules/video_coding/utility/ivf_file_writer.cc
@@ -115,7 +115,7 @@
height_ = encoded_image._encodedHeight;
RTC_CHECK_GT(width_, 0);
RTC_CHECK_GT(height_, 0);
- using_capture_timestamps_ = encoded_image._timeStamp == 0;
+ using_capture_timestamps_ = encoded_image.Timestamp() == 0;
codec_type_ = codec_type;
@@ -151,7 +151,7 @@
int64_t timestamp = using_capture_timestamps_
? encoded_image.capture_time_ms_
- : wrap_handler_.Unwrap(encoded_image._timeStamp);
+ : wrap_handler_.Unwrap(encoded_image.Timestamp());
if (last_timestamp_ != -1 && timestamp <= last_timestamp_) {
RTC_LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_
<< " -> " << timestamp;
diff --git a/modules/video_coding/utility/ivf_file_writer_unittest.cc b/modules/video_coding/utility/ivf_file_writer_unittest.cc
index 2172b00..c287920 100644
--- a/modules/video_coding/utility/ivf_file_writer_unittest.cc
+++ b/modules/video_coding/utility/ivf_file_writer_unittest.cc
@@ -50,7 +50,7 @@
if (use_capture_tims_ms) {
frame.capture_time_ms_ = i;
} else {
- frame._timeStamp = i;
+ frame.SetTimestamp(i);
}
if (!file_writer_->WriteFrame(frame, codec_type))
return false;
diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
index e81c2f4..03de176 100644
--- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc
+++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -109,7 +109,7 @@
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
codec_specific_info->codecSpecific.VP8.temporalIdx;
}
- return Result(Result::OK, encoded_image._timeStamp);
+ return Result(Result::OK, encoded_image.Timestamp());
}
// This method only makes sense for VP8.
void GetLastEncodedFrameInfo(int* temporal_layer,
diff --git a/modules/video_coding/video_sender_unittest.cc b/modules/video_coding/video_sender_unittest.cc
index c391510..5c7f32d 100644
--- a/modules/video_coding/video_sender_unittest.cc
+++ b/modules/video_coding/video_sender_unittest.cc
@@ -100,7 +100,7 @@
assert(codec_specific_info);
frame_data_.push_back(
FrameData(encoded_image._length, *codec_specific_info));
- return Result(Result::OK, encoded_image._timeStamp);
+ return Result(Result::OK, encoded_image.Timestamp());
}
void Reset() {