Add ParsedPayload::video_header() accessor.
Preparation CL to remove RTPTypeHeader.
Bug: none
Change-Id: I695acf20082b94744a2f6c7692f1b2128932cd79
Reviewed-on: https://webrtc-review.googlesource.com/86132
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23835}
diff --git a/modules/rtp_rtcp/source/rtp_format.h b/modules/rtp_rtcp/source/rtp_format.h
index df35276..e702270 100644
--- a/modules/rtp_rtcp/source/rtp_format.h
+++ b/modules/rtp_rtcp/source/rtp_format.h
@@ -55,6 +55,8 @@
const uint8_t* payload;
size_t payload_length;
FrameType frame_type;
+ RTPVideoHeader& video_header() { return type.Video; }
+ const RTPVideoHeader& video_header() const { return type.Video; }
RTPTypeHeader type;
};
diff --git a/modules/rtp_rtcp/source/rtp_format_h264.cc b/modules/rtp_rtcp/source/rtp_format_h264.cc
index c8c6f5f..adb1c15 100644
--- a/modules/rtp_rtcp/source/rtp_format_h264.cc
+++ b/modules/rtp_rtcp/source/rtp_format_h264.cc
@@ -429,7 +429,7 @@
modified_buffer_.reset();
uint8_t nal_type = payload_data[0] & kTypeMask;
- parsed_payload->type.Video.codecHeader.H264.nalus_length = 0;
+ parsed_payload->video_header().codecHeader.H264.nalus_length = 0;
if (nal_type == H264::NaluType::kFuA) {
// Fragmented NAL units (FU-A).
if (!ParseFuaNalu(parsed_payload, payload_data))
@@ -453,13 +453,13 @@
bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
ParsedPayload* parsed_payload,
const uint8_t* payload_data) {
- parsed_payload->type.Video.width = 0;
- parsed_payload->type.Video.height = 0;
- parsed_payload->type.Video.codec = kVideoCodecH264;
- parsed_payload->type.Video.simulcastIdx = 0;
- parsed_payload->type.Video.is_first_packet_in_frame = true;
+ parsed_payload->video_header().width = 0;
+ parsed_payload->video_header().height = 0;
+ parsed_payload->video_header().codec = kVideoCodecH264;
+ parsed_payload->video_header().simulcastIdx = 0;
+ parsed_payload->video_header().is_first_packet_in_frame = true;
RTPVideoHeaderH264* h264_header =
- &parsed_payload->type.Video.codecHeader.H264;
+ &parsed_payload->video_header().codecHeader.H264;
const uint8_t* nalu_start = payload_data + kNalHeaderSize;
const size_t nalu_length = length_ - kNalHeaderSize;
@@ -569,8 +569,8 @@
}
if (sps) {
- parsed_payload->type.Video.width = sps->width;
- parsed_payload->type.Video.height = sps->height;
+ parsed_payload->video_header().width = sps->width;
+ parsed_payload->video_header().height = sps->height;
nalu.sps_id = sps->id;
} else {
RTC_LOG(LS_WARNING) << "Failed to parse SPS id from SPS slice.";
@@ -618,7 +618,7 @@
RTC_LOG(LS_WARNING) << "Unexpected STAP-A or FU-A received.";
return false;
}
- RTPVideoHeaderH264* h264 = &parsed_payload->type.Video.codecHeader.H264;
+ RTPVideoHeaderH264* h264 = &parsed_payload->video_header().codecHeader.H264;
if (h264->nalus_length == kMaxNalusPerPacket) {
RTC_LOG(LS_WARNING)
<< "Received packet containing more than " << kMaxNalusPerPacket
@@ -672,12 +672,12 @@
} else {
parsed_payload->frame_type = kVideoFrameDelta;
}
- parsed_payload->type.Video.width = 0;
- parsed_payload->type.Video.height = 0;
- parsed_payload->type.Video.codec = kVideoCodecH264;
- parsed_payload->type.Video.simulcastIdx = 0;
- parsed_payload->type.Video.is_first_packet_in_frame = first_fragment;
- RTPVideoHeaderH264* h264 = &parsed_payload->type.Video.codecHeader.H264;
+ parsed_payload->video_header().width = 0;
+ parsed_payload->video_header().height = 0;
+ parsed_payload->video_header().codec = kVideoCodecH264;
+ parsed_payload->video_header().simulcastIdx = 0;
+ parsed_payload->video_header().is_first_packet_in_frame = first_fragment;
+ RTPVideoHeaderH264* h264 = &parsed_payload->video_header().codecHeader.H264;
h264->packetization_type = kH264FuA;
h264->nalu_type = original_nal_type;
if (first_fragment) {
diff --git a/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc b/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
index 0eaf7a5..c5f2382 100644
--- a/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
@@ -600,11 +600,11 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264SingleNalu,
- payload.type.Video.codecHeader.H264.packetization_type);
- EXPECT_EQ(kIdr, payload.type.Video.codecHeader.H264.nalu_type);
+ payload.video_header().codecHeader.H264.packetization_type);
+ EXPECT_EQ(kIdr, payload.video_header().codecHeader.H264.nalu_type);
}
TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
@@ -616,12 +616,12 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264SingleNalu,
- payload.type.Video.codecHeader.H264.packetization_type);
- EXPECT_EQ(1280u, payload.type.Video.width);
- EXPECT_EQ(720u, payload.type.Video.height);
+ payload.video_header().codecHeader.H264.packetization_type);
+ EXPECT_EQ(1280u, payload.video_header().width);
+ EXPECT_EQ(720u, payload.video_header().height);
}
TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
@@ -646,9 +646,9 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
- const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
+ const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
EXPECT_EQ(kH264StapA, h264.packetization_type);
// NALU type for aggregated packets is the type of the first packet only.
EXPECT_EQ(kSps, h264.nalu_type);
@@ -677,11 +677,12 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
- EXPECT_EQ(kH264StapA, payload.type.Video.codecHeader.H264.packetization_type);
- EXPECT_EQ(1280u, payload.type.Video.width);
- EXPECT_EQ(720u, payload.type.Video.height);
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
+ EXPECT_EQ(kH264StapA,
+ payload.video_header().codecHeader.H264.packetization_type);
+ EXPECT_EQ(1280u, payload.video_header().width);
+ EXPECT_EQ(720u, payload.video_header().height);
}
TEST_F(RtpDepacketizerH264Test, TestEmptyStapARejected) {
@@ -804,11 +805,12 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
- EXPECT_EQ(kH264StapA, payload.type.Video.codecHeader.H264.packetization_type);
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
+ EXPECT_EQ(kH264StapA,
+ payload.video_header().codecHeader.H264.packetization_type);
// NALU type for aggregated packets is the type of the first packet only.
- EXPECT_EQ(kSlice, payload.type.Video.codecHeader.H264.nalu_type);
+ EXPECT_EQ(kSlice, payload.video_header().codecHeader.H264.nalu_type);
}
TEST_F(RtpDepacketizerH264Test, TestFuA) {
@@ -843,9 +845,9 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet1, sizeof(packet1)));
ExpectPacket(&payload, kExpected1, sizeof(kExpected1));
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
- const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
+ const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
EXPECT_EQ(kH264FuA, h264.packetization_type);
EXPECT_EQ(kIdr, h264.nalu_type);
ASSERT_EQ(1u, h264.nalus_length);
@@ -859,10 +861,10 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_FALSE(payload.type.Video.is_first_packet_in_frame);
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
{
- const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
+ const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
EXPECT_EQ(kH264FuA, h264.packetization_type);
EXPECT_EQ(kIdr, h264.nalu_type);
// NALU info is only expected for the first FU-A packet.
@@ -873,10 +875,10 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
- EXPECT_FALSE(payload.type.Video.is_first_packet_in_frame);
+ EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
+ EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
{
- const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
+ const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
EXPECT_EQ(kH264FuA, h264.packetization_type);
EXPECT_EQ(kIdr, h264.nalu_type);
// NALU info is only expected for the first FU-A packet.
@@ -928,7 +930,7 @@
};
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
- const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
+ const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
EXPECT_EQ(kSei, h264.nalu_type);
diff --git a/modules/rtp_rtcp/source/rtp_format_video_generic.cc b/modules/rtp_rtcp/source/rtp_format_video_generic.cc
index 8be5b4a..a2a3ad1 100644
--- a/modules/rtp_rtcp/source/rtp_format_video_generic.cc
+++ b/modules/rtp_rtcp/source/rtp_format_video_generic.cc
@@ -129,11 +129,11 @@
((generic_header & RtpFormatVideoGeneric::kKeyFrameBit) != 0)
? kVideoFrameKey
: kVideoFrameDelta;
- parsed_payload->type.Video.is_first_packet_in_frame =
+ parsed_payload->video_header().is_first_packet_in_frame =
(generic_header & RtpFormatVideoGeneric::kFirstPacketBit) != 0;
- parsed_payload->type.Video.codec = kVideoCodecGeneric;
- parsed_payload->type.Video.width = 0;
- parsed_payload->type.Video.height = 0;
+ parsed_payload->video_header().codec = kVideoCodecGeneric;
+ parsed_payload->video_header().width = 0;
+ parsed_payload->video_header().height = 0;
parsed_payload->payload = payload_data;
parsed_payload->payload_length = payload_data_length;
diff --git a/modules/rtp_rtcp/source/rtp_format_vp8.cc b/modules/rtp_rtcp/source/rtp_format_vp8.cc
index 0f559e8..38d6d68 100644
--- a/modules/rtp_rtcp/source/rtp_format_vp8.cc
+++ b/modules/rtp_rtcp/source/rtp_format_vp8.cc
@@ -129,8 +129,8 @@
// in the beginning of the partition.
return -1;
}
- parsed_payload->type.Video.width = ((data[7] << 8) + data[6]) & 0x3FFF;
- parsed_payload->type.Video.height = ((data[9] << 8) + data[8]) & 0x3FFF;
+ parsed_payload->video_header().width = ((data[7] << 8) + data[6]) & 0x3FFF;
+ parsed_payload->video_header().height = ((data[9] << 8) + data[8]) & 0x3FFF;
return 0;
}
@@ -480,22 +480,22 @@
bool beginning_of_partition = (*payload_data & 0x10) ? true : false; // S bit
int partition_id = (*payload_data & 0x0F); // PartID field
- parsed_payload->type.Video.width = 0;
- parsed_payload->type.Video.height = 0;
- parsed_payload->type.Video.is_first_packet_in_frame =
+ parsed_payload->video_header().width = 0;
+ parsed_payload->video_header().height = 0;
+ parsed_payload->video_header().is_first_packet_in_frame =
beginning_of_partition && (partition_id == 0);
- parsed_payload->type.Video.simulcastIdx = 0;
- parsed_payload->type.Video.codec = kVideoCodecVP8;
- parsed_payload->type.Video.codecHeader.VP8.nonReference =
+ parsed_payload->video_header().simulcastIdx = 0;
+ parsed_payload->video_header().codec = kVideoCodecVP8;
+ parsed_payload->video_header().codecHeader.VP8.nonReference =
(*payload_data & 0x20) ? true : false; // N bit
- parsed_payload->type.Video.codecHeader.VP8.partitionId = partition_id;
- parsed_payload->type.Video.codecHeader.VP8.beginningOfPartition =
+ parsed_payload->video_header().codecHeader.VP8.partitionId = partition_id;
+ parsed_payload->video_header().codecHeader.VP8.beginningOfPartition =
beginning_of_partition;
- parsed_payload->type.Video.codecHeader.VP8.pictureId = kNoPictureId;
- parsed_payload->type.Video.codecHeader.VP8.tl0PicIdx = kNoTl0PicIdx;
- parsed_payload->type.Video.codecHeader.VP8.temporalIdx = kNoTemporalIdx;
- parsed_payload->type.Video.codecHeader.VP8.layerSync = false;
- parsed_payload->type.Video.codecHeader.VP8.keyIdx = kNoKeyIdx;
+ parsed_payload->video_header().codecHeader.VP8.pictureId = kNoPictureId;
+ parsed_payload->video_header().codecHeader.VP8.tl0PicIdx = kNoTl0PicIdx;
+ parsed_payload->video_header().codecHeader.VP8.temporalIdx = kNoTemporalIdx;
+ parsed_payload->video_header().codecHeader.VP8.layerSync = false;
+ parsed_payload->video_header().codecHeader.VP8.keyIdx = kNoKeyIdx;
if (partition_id > 8) {
// Weak check for corrupt payload_data: PartID MUST NOT be larger than 8.
@@ -512,7 +512,7 @@
if (extension) {
const int parsed_bytes =
- ParseVP8Extension(&parsed_payload->type.Video.codecHeader.VP8,
+ ParseVP8Extension(&parsed_payload->video_header().codecHeader.VP8,
payload_data, payload_data_length);
if (parsed_bytes < 0)
return false;
diff --git a/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc b/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
index cff14ed..4213c0b 100644
--- a/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
@@ -59,23 +59,23 @@
// | padding |
// : :
// +-+-+-+-+-+-+-+-+
-void VerifyBasicHeader(RTPTypeHeader* type, bool N, bool S, int part_id) {
- ASSERT_TRUE(type != NULL);
- EXPECT_EQ(N, type->Video.codecHeader.VP8.nonReference);
- EXPECT_EQ(S, type->Video.codecHeader.VP8.beginningOfPartition);
- EXPECT_EQ(part_id, type->Video.codecHeader.VP8.partitionId);
+void VerifyBasicHeader(RTPVideoHeader* header, bool N, bool S, int part_id) {
+ ASSERT_TRUE(header != NULL);
+ EXPECT_EQ(N, header->codecHeader.VP8.nonReference);
+ EXPECT_EQ(S, header->codecHeader.VP8.beginningOfPartition);
+ EXPECT_EQ(part_id, header->codecHeader.VP8.partitionId);
}
-void VerifyExtensions(RTPTypeHeader* type,
+void VerifyExtensions(RTPVideoHeader* header,
int16_t picture_id, /* I */
int16_t tl0_pic_idx, /* L */
uint8_t temporal_idx, /* T */
int key_idx /* K */) {
- ASSERT_TRUE(type != NULL);
- EXPECT_EQ(picture_id, type->Video.codecHeader.VP8.pictureId);
- EXPECT_EQ(tl0_pic_idx, type->Video.codecHeader.VP8.tl0PicIdx);
- EXPECT_EQ(temporal_idx, type->Video.codecHeader.VP8.temporalIdx);
- EXPECT_EQ(key_idx, type->Video.codecHeader.VP8.keyIdx);
+ ASSERT_TRUE(header != NULL);
+ EXPECT_EQ(picture_id, header->codecHeader.VP8.pictureId);
+ EXPECT_EQ(tl0_pic_idx, header->codecHeader.VP8.tl0PicIdx);
+ EXPECT_EQ(temporal_idx, header->codecHeader.VP8.temporalIdx);
+ EXPECT_EQ(key_idx, header->codecHeader.VP8.keyIdx);
}
} // namespace
@@ -299,10 +299,10 @@
sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
- EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
- VerifyBasicHeader(&payload.type, 0, 1, 4);
- VerifyExtensions(&payload.type, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx,
- kNoKeyIdx);
+ EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
+ VerifyBasicHeader(&payload.video_header(), 0, 1, 4);
+ VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
+ kNoTemporalIdx, kNoKeyIdx);
}
TEST_F(RtpDepacketizerVp8Test, PictureID) {
@@ -319,10 +319,10 @@
ExpectPacket(&payload, packet + kHeaderLength1,
sizeof(packet) - kHeaderLength1);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
- EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
- VerifyBasicHeader(&payload.type, 1, 0, 0);
- VerifyExtensions(&payload.type, kPictureId, kNoTl0PicIdx, kNoTemporalIdx,
- kNoKeyIdx);
+ EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
+ VerifyBasicHeader(&payload.video_header(), 1, 0, 0);
+ VerifyExtensions(&payload.video_header(), kPictureId, kNoTl0PicIdx,
+ kNoTemporalIdx, kNoKeyIdx);
// Re-use packet, but change to long PictureID.
packet[2] = 0x80 | kPictureId;
@@ -332,9 +332,9 @@
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet + kHeaderLength2,
sizeof(packet) - kHeaderLength2);
- VerifyBasicHeader(&payload.type, 1, 0, 0);
- VerifyExtensions(&payload.type, (kPictureId << 8) + kPictureId, kNoTl0PicIdx,
- kNoTemporalIdx, kNoKeyIdx);
+ VerifyBasicHeader(&payload.video_header(), 1, 0, 0);
+ VerifyExtensions(&payload.video_header(), (kPictureId << 8) + kPictureId,
+ kNoTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
}
TEST_F(RtpDepacketizerVp8Test, Tl0PicIdx) {
@@ -350,10 +350,10 @@
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
- VerifyBasicHeader(&payload.type, 0, 1, 0);
- VerifyExtensions(&payload.type, kNoPictureId, kTl0PicIdx, kNoTemporalIdx,
- kNoKeyIdx);
+ EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
+ VerifyBasicHeader(&payload.video_header(), 0, 1, 0);
+ VerifyExtensions(&payload.video_header(), kNoPictureId, kTl0PicIdx,
+ kNoTemporalIdx, kNoKeyIdx);
}
TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
@@ -368,10 +368,11 @@
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
- EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
- VerifyBasicHeader(&payload.type, 0, 0, 8);
- VerifyExtensions(&payload.type, kNoPictureId, kNoTl0PicIdx, 2, kNoKeyIdx);
- EXPECT_FALSE(payload.type.Video.codecHeader.VP8.layerSync);
+ EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
+ VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
+ VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx, 2,
+ kNoKeyIdx);
+ EXPECT_FALSE(payload.video_header().codecHeader.VP8.layerSync);
}
TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
@@ -387,10 +388,10 @@
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
- EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
- VerifyBasicHeader(&payload.type, 0, 0, 8);
- VerifyExtensions(&payload.type, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx,
- kKeyIdx);
+ EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
+ VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
+ VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
+ kNoTemporalIdx, kKeyIdx);
}
TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
@@ -408,9 +409,9 @@
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
- EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
- VerifyBasicHeader(&payload.type, 0, 0, 8);
- VerifyExtensions(&payload.type, (17 << 8) + 17, 42, 1, 17);
+ EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
+ VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
+ VerifyExtensions(&payload.video_header(), (17 << 8) + 17, 42, 1, 17);
}
TEST_F(RtpDepacketizerVp8Test, TooShortHeader) {
@@ -447,12 +448,12 @@
auto vp8_payload = rtp_payload.subview(kHeaderLength);
ExpectPacket(&payload, vp8_payload.data(), vp8_payload.size());
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
- EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
- VerifyBasicHeader(&payload.type, 1, 1, 0);
- VerifyExtensions(&payload.type, input_header.pictureId,
+ EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
+ VerifyBasicHeader(&payload.video_header(), 1, 1, 0);
+ VerifyExtensions(&payload.video_header(), input_header.pictureId,
input_header.tl0PicIdx, input_header.temporalIdx,
input_header.keyIdx);
- EXPECT_EQ(payload.type.Video.codecHeader.VP8.layerSync,
+ EXPECT_EQ(payload.video_header().codecHeader.VP8.layerSync,
input_header.layerSync);
}
diff --git a/modules/rtp_rtcp/source/rtp_format_vp9.cc b/modules/rtp_rtcp/source/rtp_format_vp9.cc
index cae7911..9d315a5 100644
--- a/modules/rtp_rtcp/source/rtp_format_vp9.cc
+++ b/modules/rtp_rtcp/source/rtp_format_vp9.cc
@@ -712,14 +712,14 @@
RETURN_FALSE_ON_ERROR(parser.ReadBits(&z_bit, 1));
// Parsed payload.
- parsed_payload->type.Video.width = 0;
- parsed_payload->type.Video.height = 0;
- parsed_payload->type.Video.simulcastIdx = 0;
- parsed_payload->type.Video.codec = kVideoCodecVP9;
+ parsed_payload->video_header().width = 0;
+ parsed_payload->video_header().height = 0;
+ parsed_payload->video_header().simulcastIdx = 0;
+ parsed_payload->video_header().codec = kVideoCodecVP9;
parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
- RTPVideoHeaderVP9* vp9 = &parsed_payload->type.Video.codecHeader.VP9;
+ RTPVideoHeaderVP9* vp9 = &parsed_payload->video_header().codecHeader.VP9;
vp9->InitRTPVideoHeaderVP9();
vp9->inter_pic_predicted = p_bit ? true : false;
vp9->flexible_mode = f_bit ? true : false;
@@ -748,11 +748,11 @@
}
if (vp9->spatial_layer_resolution_present) {
// TODO(asapersson): Add support for spatial layers.
- parsed_payload->type.Video.width = vp9->width[0];
- parsed_payload->type.Video.height = vp9->height[0];
+ parsed_payload->video_header().width = vp9->width[0];
+ parsed_payload->video_header().height = vp9->height[0];
}
}
- parsed_payload->type.Video.is_first_packet_in_frame =
+ parsed_payload->video_header().is_first_packet_in_frame =
b_bit && (!l_bit || !vp9->inter_layer_predicted);
uint64_t rem_bits = parser.RemainingBitCount();
diff --git a/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc b/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
index e798302..6f9bc3b 100644
--- a/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
@@ -82,8 +82,8 @@
std::unique_ptr<RtpDepacketizer> depacketizer(new RtpDepacketizerVp9());
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length));
- EXPECT_EQ(kVideoCodecVP9, parsed.type.Video.codec);
- VerifyHeader(expected, parsed.type.Video.codecHeader.VP9);
+ EXPECT_EQ(kVideoCodecVP9, parsed.video_header().codec);
+ VerifyHeader(expected, parsed.video_header().codecHeader.VP9);
const size_t kExpectedPayloadLength = expected_length - expected_hdr_length;
VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength);
}
@@ -760,7 +760,7 @@
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
EXPECT_EQ(kVideoFrameKey, parsed.frame_type);
- EXPECT_TRUE(parsed.type.Video.is_first_packet_in_frame);
+ EXPECT_TRUE(parsed.video_header().is_first_packet_in_frame);
}
TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
@@ -770,7 +770,7 @@
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
EXPECT_EQ(kVideoFrameDelta, parsed.frame_type);
- EXPECT_FALSE(parsed.type.Video.is_first_packet_in_frame);
+ EXPECT_FALSE(parsed.video_header().is_first_packet_in_frame);
}
TEST_F(RtpDepacketizerVp9Test, ParseResolution) {
@@ -790,8 +790,8 @@
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
- EXPECT_EQ(kWidth[0], parsed.type.Video.width);
- EXPECT_EQ(kHeight[0], parsed.type.Video.height);
+ EXPECT_EQ(kWidth[0], parsed.video_header().width);
+ EXPECT_EQ(kHeight[0], parsed.video_header().height);
}
TEST_F(RtpDepacketizerVp9Test, ParseFailsForNoPayloadLength) {
diff --git a/modules/rtp_rtcp/source/rtp_receiver_video.cc b/modules/rtp_rtcp/source/rtp_receiver_video.cc
index d93ee76..5e6bf3e 100644
--- a/modules/rtp_rtcp/source/rtp_receiver_video.cc
+++ b/modules/rtp_rtcp/source/rtp_receiver_video.cc
@@ -48,7 +48,7 @@
const uint8_t* payload,
size_t payload_length,
int64_t timestamp_ms) {
- rtp_header->type.Video.codec =
+ rtp_header->video_header().codec =
specific_payload.video_payload().videoCodecType;
RTC_DCHECK_GE(payload_length, rtp_header->header.paddingLength);
@@ -66,7 +66,7 @@
// We are not allowed to hold a critical section when calling below functions.
std::unique_ptr<RtpDepacketizer> depacketizer(
- RtpDepacketizer::Create(rtp_header->type.Video.codec));
+ RtpDepacketizer::Create(rtp_header->video_header().codec));
if (depacketizer.get() == NULL) {
RTC_LOG(LS_ERROR) << "Failed to create depacketizer.";
return -1;
@@ -77,28 +77,28 @@
return -1;
rtp_header->frameType = parsed_payload.frame_type;
- rtp_header->type = parsed_payload.type;
- rtp_header->type.Video.rotation = kVideoRotation_0;
- rtp_header->type.Video.content_type = VideoContentType::UNSPECIFIED;
- rtp_header->type.Video.video_timing.flags = VideoSendTiming::kInvalid;
+ rtp_header->video_header() = parsed_payload.video_header();
+ rtp_header->video_header().rotation = kVideoRotation_0;
+ rtp_header->video_header().content_type = VideoContentType::UNSPECIFIED;
+ rtp_header->video_header().video_timing.flags = VideoSendTiming::kInvalid;
// Retrieve the video rotation information.
if (rtp_header->header.extension.hasVideoRotation) {
- rtp_header->type.Video.rotation =
+ rtp_header->video_header().rotation =
rtp_header->header.extension.videoRotation;
}
if (rtp_header->header.extension.hasVideoContentType) {
- rtp_header->type.Video.content_type =
+ rtp_header->video_header().content_type =
rtp_header->header.extension.videoContentType;
}
if (rtp_header->header.extension.has_video_timing) {
- rtp_header->type.Video.video_timing =
+ rtp_header->video_header().video_timing =
rtp_header->header.extension.video_timing;
}
- rtp_header->type.Video.playout_delay =
+ rtp_header->video_header().playout_delay =
rtp_header->header.extension.playout_delay;
return data_callback_->OnReceivedPayloadData(parsed_payload.payload,
diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc
index 71c17ea..45b2c2d 100644
--- a/modules/video_coding/jitter_buffer_unittest.cc
+++ b/modules/video_coding/jitter_buffer_unittest.cc
@@ -242,7 +242,7 @@
rtpHeader.header.timestamp = timestamp_;
rtpHeader.header.markerBit = true;
rtpHeader.frameType = kVideoFrameDelta;
- rtpHeader.type.Video.codec = kVideoCodecUnknown;
+ rtpHeader.video_header().codec = kVideoCodecUnknown;
packet_.reset(new VCMPacket(data_, size_, rtpHeader));
}
@@ -800,7 +800,7 @@
rtpHeader.header.sequenceNumber = seq_num_ + 2;
rtpHeader.header.timestamp = timestamp_ + (33 * 90);
rtpHeader.header.markerBit = false;
- rtpHeader.type.Video.codec = kVideoCodecUnknown;
+ rtpHeader.video_header().codec = kVideoCodecUnknown;
VCMPacket empty_packet(data_, 0, rtpHeader);
EXPECT_EQ(kOldPacket,
jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
@@ -2164,7 +2164,7 @@
timestamp_ += 33 * 90;
WebRtcRTPHeader rtpHeader;
memset(&rtpHeader, 0, sizeof(rtpHeader));
- rtpHeader.type.Video.codec = kVideoCodecUnknown;
+ rtpHeader.video_header().codec = kVideoCodecUnknown;
VCMPacket emptypacket(data_, 0, rtpHeader);
emptypacket.seqNum = seq_num_;
emptypacket.timestamp = timestamp_;
diff --git a/modules/video_coding/packet.cc b/modules/video_coding/packet.cc
index 7db566a..e4e1ca4 100644
--- a/modules/video_coding/packet.cc
+++ b/modules/video_coding/packet.cc
@@ -50,20 +50,21 @@
timesNacked(-1),
frameType(rtpHeader.frameType),
codec(kVideoCodecUnknown),
- is_first_packet_in_frame(rtpHeader.type.Video.is_first_packet_in_frame),
+ is_first_packet_in_frame(
+ rtpHeader.video_header().is_first_packet_in_frame),
completeNALU(kNaluComplete),
insertStartCode(false),
- width(rtpHeader.type.Video.width),
- height(rtpHeader.type.Video.height),
- video_header(rtpHeader.type.Video) {
- CopyCodecSpecifics(rtpHeader.type.Video);
+ width(rtpHeader.video_header().width),
+ height(rtpHeader.video_header().height),
+ video_header(rtpHeader.video_header()) {
+ CopyCodecSpecifics(rtpHeader.video_header());
if (markerBit) {
- video_header.rotation = rtpHeader.type.Video.rotation;
+ video_header.rotation = rtpHeader.video_header().rotation;
}
// Playout decisions are made entirely based on first packet in a frame.
if (is_first_packet_in_frame) {
- video_header.playout_delay = rtpHeader.type.Video.playout_delay;
+ video_header.playout_delay = rtpHeader.video_header().playout_delay;
} else {
video_header.playout_delay = {-1, -1};
}
diff --git a/modules/video_coding/video_receiver_unittest.cc b/modules/video_coding/video_receiver_unittest.cc
index 2855f7a..a95b150 100644
--- a/modules/video_coding/video_receiver_unittest.cc
+++ b/modules/video_coding/video_receiver_unittest.cc
@@ -106,7 +106,7 @@
header.header.payloadType = kUnusedPayloadType;
header.header.ssrc = 1;
header.header.headerLength = 12;
- header.type.Video.codec = kVideoCodecVP8;
+ header.video_header().codec = kVideoCodecVP8;
for (int i = 0; i < 10; ++i) {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
InsertAndVerifyPaddingFrame(payload, &header);
@@ -130,17 +130,17 @@
header.header.payloadType = kUnusedPayloadType;
header.header.ssrc = 1;
header.header.headerLength = 12;
- header.type.Video.codec = kVideoCodecVP8;
+ header.video_header().codec = kVideoCodecVP8;
// Insert one video frame to get one frame decoded.
header.frameType = kVideoFrameKey;
- header.type.Video.is_first_packet_in_frame = true;
+ header.video_header().is_first_packet_in_frame = true;
header.header.markerBit = true;
InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
header.frameType = kEmptyFrame;
- header.type.Video.is_first_packet_in_frame = false;
+ header.video_header().is_first_packet_in_frame = false;
header.header.markerBit = false;
// Insert padding frames.
for (int i = 0; i < 10; ++i) {
@@ -176,15 +176,15 @@
WebRtcRTPHeader header;
memset(&header, 0, sizeof(header));
header.frameType = kEmptyFrame;
- header.type.Video.is_first_packet_in_frame = false;
+ header.video_header().is_first_packet_in_frame = false;
header.header.markerBit = false;
header.header.paddingLength = kPaddingSize;
header.header.payloadType = kUnusedPayloadType;
header.header.ssrc = 1;
header.header.headerLength = 12;
- header.type.Video.codec = kVideoCodecVP8;
- header.type.Video.codecHeader.VP8.pictureId = -1;
- header.type.Video.codecHeader.VP8.tl0PicIdx = -1;
+ header.video_header().codec = kVideoCodecVP8;
+ header.video_header().codecHeader.VP8.pictureId = -1;
+ header.video_header().codecHeader.VP8.tl0PicIdx = -1;
for (int i = 0; i < 3; ++i) {
// Insert 2 video frames.
for (int j = 0; j < 2; ++j) {
@@ -192,7 +192,7 @@
header.frameType = kVideoFrameKey;
else
header.frameType = kVideoFrameDelta;
- header.type.Video.is_first_packet_in_frame = true;
+ header.video_header().is_first_packet_in_frame = true;
header.header.markerBit = true;
InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
clock_.AdvanceTimeMilliseconds(33);
@@ -201,7 +201,7 @@
// Insert 2 padding only frames.
header.frameType = kEmptyFrame;
- header.type.Video.is_first_packet_in_frame = false;
+ header.video_header().is_first_packet_in_frame = false;
header.header.markerBit = false;
for (int j = 0; j < 2; ++j) {
// InsertAndVerifyPaddingFrame(payload, &header);
diff --git a/test/layer_filtering_transport.cc b/test/layer_filtering_transport.cc
index 723eda8..9f4333c 100644
--- a/test/layer_filtering_transport.cc
+++ b/test/layer_filtering_transport.cc
@@ -136,22 +136,23 @@
RtpDepacketizer::ParsedPayload parsed_payload;
if (depacketizer->Parse(&parsed_payload, payload, payload_data_length)) {
const int temporal_idx = static_cast<int>(
- is_vp8 ? parsed_payload.type.Video.codecHeader.VP8.temporalIdx
- : parsed_payload.type.Video.codecHeader.VP9.temporal_idx);
+ is_vp8 ? parsed_payload.video_header().codecHeader.VP8.temporalIdx
+ : parsed_payload.video_header().codecHeader.VP9.temporal_idx);
const int spatial_idx = static_cast<int>(
is_vp8 ? kNoSpatialIdx
- : parsed_payload.type.Video.codecHeader.VP9.spatial_idx);
+ : parsed_payload.video_header().codecHeader.VP9.spatial_idx);
const bool non_ref_for_inter_layer_pred =
is_vp8 ? false
- : parsed_payload.type.Video.codecHeader.VP9
- .non_ref_for_inter_layer_pred;
+ : parsed_payload.video_header()
+ .codecHeader.VP9.non_ref_for_inter_layer_pred;
// The number of spatial layers is sent in ssData, which is included only
// in the first packet of the first spatial layer of a key frame.
- if (!parsed_payload.type.Video.codecHeader.VP9.inter_pic_predicted &&
- parsed_payload.type.Video.codecHeader.VP9.beginning_of_frame == 1 &&
+ if (!parsed_payload.video_header().codecHeader.VP9.inter_pic_predicted &&
+ parsed_payload.video_header().codecHeader.VP9.beginning_of_frame ==
+ 1 &&
spatial_idx == 0) {
num_active_spatial_layers_ =
- parsed_payload.type.Video.codecHeader.VP9.num_spatial_layers;
+ parsed_payload.video_header().codecHeader.VP9.num_spatial_layers;
} else if (spatial_idx == kNoSpatialIdx)
num_active_spatial_layers_ = 1;
RTC_CHECK_GT(num_active_spatial_layers_, 0);
@@ -159,7 +160,7 @@
if (selected_sl_ >= 0 &&
spatial_idx ==
std::min(num_active_spatial_layers_ - 1, selected_sl_) &&
- parsed_payload.type.Video.codecHeader.VP9.end_of_frame) {
+ parsed_payload.video_header().codecHeader.VP9.end_of_frame) {
// This layer is now the last in the superframe.
set_marker_bit = true;
} else {
diff --git a/video/picture_id_tests.cc b/video/picture_id_tests.cc
index b30f324..073a560 100644
--- a/video/picture_id_tests.cc
+++ b/video/picture_id_tests.cc
@@ -98,19 +98,19 @@
switch (codec_type_) {
case kVideoCodecVP8:
parsed->picture_id =
- parsed_payload.type.Video.codecHeader.VP8.pictureId;
+ parsed_payload.video_header().codecHeader.VP8.pictureId;
parsed->tl0_pic_idx =
- parsed_payload.type.Video.codecHeader.VP8.tl0PicIdx;
+ parsed_payload.video_header().codecHeader.VP8.tl0PicIdx;
parsed->temporal_idx =
- parsed_payload.type.Video.codecHeader.VP8.temporalIdx;
+ parsed_payload.video_header().codecHeader.VP8.temporalIdx;
break;
case kVideoCodecVP9:
parsed->picture_id =
- parsed_payload.type.Video.codecHeader.VP9.picture_id;
+ parsed_payload.video_header().codecHeader.VP9.picture_id;
parsed->tl0_pic_idx =
- parsed_payload.type.Video.codecHeader.VP9.tl0_pic_idx;
+ parsed_payload.video_header().codecHeader.VP9.tl0_pic_idx;
parsed->temporal_idx =
- parsed_payload.type.Video.codecHeader.VP9.temporal_idx;
+ parsed_payload.video_header().codecHeader.VP9.temporal_idx;
break;
default:
RTC_NOTREACHED();
diff --git a/video/rtp_video_stream_receiver_unittest.cc b/video/rtp_video_stream_receiver_unittest.cc
index 2e0d258..b6d049e 100644
--- a/video/rtp_video_stream_receiver_unittest.cc
+++ b/video/rtp_video_stream_receiver_unittest.cc
@@ -136,7 +136,7 @@
WebRtcRTPHeader GetDefaultPacket() {
WebRtcRTPHeader packet;
memset(&packet, 0, sizeof(packet));
- packet.type.Video.codec = kVideoCodecH264;
+ packet.video_header().codec = kVideoCodecH264;
return packet;
}
@@ -151,8 +151,9 @@
info.pps_id = -1;
data->push_back(H264::NaluType::kSps);
data->push_back(sps_id);
- packet->type.Video.codecHeader.H264
- .nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
+ packet->video_header()
+ .codecHeader.H264
+ .nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
}
void AddPps(WebRtcRTPHeader* packet,
@@ -165,8 +166,9 @@
info.pps_id = pps_id;
data->push_back(H264::NaluType::kPps);
data->push_back(pps_id);
- packet->type.Video.codecHeader.H264
- .nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
+ packet->video_header()
+ .codecHeader.H264
+ .nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
}
void AddIdr(WebRtcRTPHeader* packet, int pps_id) {
@@ -174,8 +176,9 @@
info.type = H264::NaluType::kIdr;
info.sps_id = -1;
info.pps_id = pps_id;
- packet->type.Video.codecHeader.H264
- .nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
+ packet->video_header()
+ .codecHeader.H264
+ .nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
}
protected:
@@ -204,9 +207,9 @@
memset(&rtp_header, 0, sizeof(rtp_header));
rtp_header.header.sequenceNumber = 1;
rtp_header.header.markerBit = 1;
- rtp_header.type.Video.is_first_packet_in_frame = true;
+ rtp_header.video_header().is_first_packet_in_frame = true;
rtp_header.frameType = kVideoFrameKey;
- rtp_header.type.Video.codec = kVideoCodecGeneric;
+ rtp_header.video_header().codec = kVideoCodecGeneric;
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
@@ -239,9 +242,9 @@
memset(&rtp_header, 0, sizeof(rtp_header));
rtp_header.header.sequenceNumber = 1;
rtp_header.header.markerBit = 1;
- rtp_header.type.Video.is_first_packet_in_frame = true;
+ rtp_header.video_header().is_first_packet_in_frame = true;
rtp_header.frameType = kVideoFrameKey;
- rtp_header.type.Video.codec = kVideoCodecGeneric;
+ rtp_header.video_header().codec = kVideoCodecGeneric;
constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
mock_on_complete_frame_callback_.AppendExpectedBitstream(
expected_bitsteam, sizeof(expected_bitsteam));
@@ -268,7 +271,7 @@
WebRtcRTPHeader sps_packet = GetDefaultPacket();
AddSps(&sps_packet, 0, &sps_data);
sps_packet.header.sequenceNumber = 0;
- sps_packet.type.Video.is_first_packet_in_frame = true;
+ sps_packet.video_header().is_first_packet_in_frame = true;
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
@@ -280,7 +283,7 @@
WebRtcRTPHeader pps_packet = GetDefaultPacket();
AddPps(&pps_packet, 0, 1, &pps_data);
pps_packet.header.sequenceNumber = 1;
- pps_packet.type.Video.is_first_packet_in_frame = true;
+ pps_packet.video_header().is_first_packet_in_frame = true;
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
@@ -291,7 +294,7 @@
std::vector<uint8_t> idr_data;
WebRtcRTPHeader idr_packet = GetDefaultPacket();
AddIdr(&idr_packet, 1);
- idr_packet.type.Video.is_first_packet_in_frame = true;
+ idr_packet.video_header().is_first_packet_in_frame = true;
idr_packet.header.sequenceNumber = 2;
idr_packet.header.markerBit = 1;
idr_packet.frameType = kVideoFrameKey;
@@ -331,12 +334,12 @@
WebRtcRTPHeader idr_packet = GetDefaultPacket();
AddIdr(&idr_packet, 0);
idr_packet.header.payloadType = kPayloadType;
- idr_packet.type.Video.is_first_packet_in_frame = true;
+ idr_packet.video_header().is_first_packet_in_frame = true;
idr_packet.header.sequenceNumber = 2;
idr_packet.header.markerBit = 1;
- idr_packet.type.Video.is_first_packet_in_frame = true;
+ idr_packet.video_header().is_first_packet_in_frame = true;
idr_packet.frameType = kVideoFrameKey;
- idr_packet.type.Video.codec = kVideoCodecH264;
+ idr_packet.video_header().codec = kVideoCodecH264;
data.insert(data.end(), {1, 2, 3});
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
@@ -352,11 +355,11 @@
std::vector<uint8_t> data;
data.insert(data.end(), {1, 2, 3});
header.header.payloadType = 99;
- header.type.Video.is_first_packet_in_frame = true;
+ header.video_header().is_first_packet_in_frame = true;
header.header.sequenceNumber = 2;
header.header.markerBit = true;
header.frameType = kVideoFrameKey;
- header.type.Video.codec = kVideoCodecGeneric;
+ header.video_header().codec = kVideoCodecGeneric;
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
@@ -388,9 +391,9 @@
memset(&rtp_header, 0, sizeof(rtp_header));
rtp_header.header.sequenceNumber = 1;
rtp_header.header.markerBit = 1;
- rtp_header.type.Video.is_first_packet_in_frame = true;
+ rtp_header.video_header().is_first_packet_in_frame = true;
rtp_header.frameType = kVideoFrameDelta;
- rtp_header.type.Video.codec = kVideoCodecGeneric;
+ rtp_header.video_header().codec = kVideoCodecGeneric;
EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
diff --git a/video/video_quality_test.cc b/video/video_quality_test.cc
index e735c38..ba0e23b 100644
--- a/video/video_quality_test.cc
+++ b/video/video_quality_test.cc
@@ -561,11 +561,11 @@
depacketizer->Parse(&parsed_payload, payload, payload_data_length);
RTC_DCHECK(result);
const int temporal_idx = static_cast<int>(
- is_vp8 ? parsed_payload.type.Video.codecHeader.VP8.temporalIdx
- : parsed_payload.type.Video.codecHeader.VP9.temporal_idx);
+ is_vp8 ? parsed_payload.video_header().codecHeader.VP8.temporalIdx
+ : parsed_payload.video_header().codecHeader.VP9.temporal_idx);
const int spatial_idx = static_cast<int>(
is_vp8 ? kNoSpatialIdx
- : parsed_payload.type.Video.codecHeader.VP9.spatial_idx);
+ : parsed_payload.video_header().codecHeader.VP9.spatial_idx);
return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
temporal_idx <= selected_tl_) &&
(selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index 2bb1cbd..c6122a7 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -3170,19 +3170,19 @@
RtpDepacketizer::ParsedPayload parsed;
RtpDepacketizerVp9 depacketizer;
EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
- EXPECT_EQ(VideoCodecType::kVideoCodecVP9, parsed.type.Video.codec);
+ EXPECT_EQ(VideoCodecType::kVideoCodecVP9, parsed.video_header().codec);
// Verify common fields for all configurations.
- VerifyCommonHeader(parsed.type.Video.codecHeader.VP9);
- CompareConsecutiveFrames(header, parsed.type.Video);
+ VerifyCommonHeader(parsed.video_header().codecHeader.VP9);
+ CompareConsecutiveFrames(header, parsed.video_header());
// Verify configuration specific settings.
- InspectHeader(parsed.type.Video.codecHeader.VP9);
+ InspectHeader(parsed.video_header().codecHeader.VP9);
++packets_sent_;
if (header.markerBit) {
++frames_sent_;
}
last_header_ = header;
- last_vp9_ = parsed.type.Video.codecHeader.VP9;
+ last_vp9_ = parsed.video_header().codecHeader.VP9;
}
return SEND_PACKET;
}