Unify FrameType and VideoFrameType.
Prevents some heap allocation and frame-type conversion since interfaces mismatch. Also it's less confusing to have one type for this. BUG=webrtc:5042 R=magjed@webrtc.org, mflodman@webrtc.org, henrik.lundin@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org Review URL: https://codereview.webrtc.org/1371043003 Cr-Commit-Position: refs/heads/master@{#10320}
This commit is contained in:
parent
4306fc70d7
commit
22993e1a0c
@ -86,10 +86,9 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
|
||||
int32_t InitEncode(const webrtc::VideoCodec* codec_settings,
|
||||
int32_t /* number_of_cores */,
|
||||
size_t /* max_payload_size */) override;
|
||||
int32_t Encode(
|
||||
const webrtc::VideoFrame& input_image,
|
||||
const webrtc::CodecSpecificInfo* /* codec_specific_info */,
|
||||
const std::vector<webrtc::VideoFrameType>* frame_types) override;
|
||||
int32_t Encode(const webrtc::VideoFrame& input_image,
|
||||
const webrtc::CodecSpecificInfo* /* codec_specific_info */,
|
||||
const std::vector<webrtc::FrameType>* frame_types) override;
|
||||
int32_t RegisterEncodeCompleteCallback(
|
||||
webrtc::EncodedImageCallback* callback) override;
|
||||
int32_t Release() override;
|
||||
@ -121,7 +120,7 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
|
||||
int32_t InitEncodeOnCodecThread(int width, int height, int kbps, int fps);
|
||||
int32_t EncodeOnCodecThread(
|
||||
const webrtc::VideoFrame& input_image,
|
||||
const std::vector<webrtc::VideoFrameType>* frame_types);
|
||||
const std::vector<webrtc::FrameType>* frame_types);
|
||||
int32_t RegisterEncodeCompleteCallbackOnCodecThread(
|
||||
webrtc::EncodedImageCallback* callback);
|
||||
int32_t ReleaseOnCodecThread();
|
||||
@ -338,7 +337,7 @@ int32_t MediaCodecVideoEncoder::InitEncode(
|
||||
int32_t MediaCodecVideoEncoder::Encode(
|
||||
const webrtc::VideoFrame& frame,
|
||||
const webrtc::CodecSpecificInfo* /* codec_specific_info */,
|
||||
const std::vector<webrtc::VideoFrameType>* frame_types) {
|
||||
const std::vector<webrtc::FrameType>* frame_types) {
|
||||
return codec_thread_->Invoke<int32_t>(Bind(
|
||||
&MediaCodecVideoEncoder::EncodeOnCodecThread, this, frame, frame_types));
|
||||
}
|
||||
@ -501,7 +500,7 @@ int32_t MediaCodecVideoEncoder::InitEncodeOnCodecThread(
|
||||
|
||||
int32_t MediaCodecVideoEncoder::EncodeOnCodecThread(
|
||||
const webrtc::VideoFrame& frame,
|
||||
const std::vector<webrtc::VideoFrameType>* frame_types) {
|
||||
const std::vector<webrtc::FrameType>* frame_types) {
|
||||
CheckOnCodecThread();
|
||||
JNIEnv* jni = AttachCurrentThreadIfNeeded();
|
||||
ScopedLocalRefFrame local_ref_frame(jni);
|
||||
|
||||
@ -155,10 +155,9 @@ class FakeWebRtcVideoEncoder : public webrtc::VideoEncoder {
|
||||
return codec_settings_;
|
||||
}
|
||||
|
||||
virtual int32_t Encode(
|
||||
const webrtc::VideoFrame& inputImage,
|
||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<webrtc::VideoFrameType>* frame_types) {
|
||||
virtual int32_t Encode(const webrtc::VideoFrame& inputImage,
|
||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<webrtc::FrameType>* frame_types) {
|
||||
rtc::CritScope lock(&crit_);
|
||||
++num_frames_encoded_;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
|
||||
@ -156,15 +156,22 @@ enum ProcessingTypes
|
||||
kRecordingPreprocessing
|
||||
};
|
||||
|
||||
enum FrameType
|
||||
{
|
||||
kFrameEmpty = 0,
|
||||
kAudioFrameSpeech = 1,
|
||||
kAudioFrameCN = 2,
|
||||
kVideoFrameKey = 3, // independent frame
|
||||
kVideoFrameDelta = 4, // depends on the previus frame
|
||||
enum FrameType {
|
||||
kEmptyFrame = 0,
|
||||
kAudioFrameSpeech = 1,
|
||||
kAudioFrameCN = 2,
|
||||
kVideoFrameKey = 3,
|
||||
kVideoFrameDelta = 4,
|
||||
// TODO(pbos): Remove below aliases (non-kVideo prefixed) as soon as no
|
||||
// VideoEncoder implementation in Chromium uses them.
|
||||
kKeyFrame = kVideoFrameKey,
|
||||
kDeltaFrame = kVideoFrameDelta,
|
||||
};
|
||||
|
||||
// TODO(pbos): Remove VideoFrameType when VideoEncoder implementations no longer
|
||||
// depend on it.
|
||||
using VideoFrameType = FrameType;
|
||||
|
||||
// Statistics for an RTCP channel
|
||||
struct RtcpStatistics {
|
||||
RtcpStatistics()
|
||||
|
||||
@ -21,7 +21,7 @@ class VideoFrame;
|
||||
|
||||
struct EncodedFrame {
|
||||
public:
|
||||
EncodedFrame() : data_(NULL), length_(0), frame_type_(kFrameEmpty) {}
|
||||
EncodedFrame() : data_(NULL), length_(0), frame_type_(kEmptyFrame) {}
|
||||
EncodedFrame(const uint8_t* data, size_t length, FrameType frame_type)
|
||||
: data_(data), length_(length), frame_type_(frame_type) {}
|
||||
|
||||
|
||||
@ -46,7 +46,7 @@ class AcmReceiverTest : public AudioPacketizationCallback,
|
||||
: timestamp_(0),
|
||||
packet_sent_(false),
|
||||
last_packet_send_timestamp_(timestamp_),
|
||||
last_frame_type_(kFrameEmpty) {
|
||||
last_frame_type_(kEmptyFrame) {
|
||||
AudioCoding::Config config;
|
||||
config.transport = this;
|
||||
acm_.reset(new AudioCodingImpl(config));
|
||||
@ -121,7 +121,7 @@ class AcmReceiverTest : public AudioPacketizationCallback,
|
||||
const uint8_t* payload_data,
|
||||
size_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
if (frame_type == kFrameEmpty)
|
||||
if (frame_type == kEmptyFrame)
|
||||
return 0;
|
||||
|
||||
rtp_header_.header.payloadType = payload_type;
|
||||
|
||||
@ -46,7 +46,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
||||
: timestamp_(0),
|
||||
packet_sent_(false),
|
||||
last_packet_send_timestamp_(timestamp_),
|
||||
last_frame_type_(kFrameEmpty) {
|
||||
last_frame_type_(kEmptyFrame) {
|
||||
AudioCodingModule::Config config;
|
||||
acm_.reset(new AudioCodingModuleImpl(config));
|
||||
receiver_.reset(new AcmReceiver(config));
|
||||
@ -120,7 +120,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
||||
const uint8_t* payload_data,
|
||||
size_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
if (frame_type == kFrameEmpty)
|
||||
if (frame_type == kEmptyFrame)
|
||||
return 0;
|
||||
|
||||
rtp_header_.header.payloadType = payload_type;
|
||||
|
||||
@ -171,7 +171,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
|
||||
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
|
||||
FrameType frame_type;
|
||||
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
|
||||
frame_type = kFrameEmpty;
|
||||
frame_type = kEmptyFrame;
|
||||
encoded_info.payload_type = previous_pltype;
|
||||
} else {
|
||||
RTC_DCHECK_GT(encode_buffer_.size(), 0u);
|
||||
|
||||
@ -92,7 +92,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
|
||||
public:
|
||||
PacketizationCallbackStubOldApi()
|
||||
: num_calls_(0),
|
||||
last_frame_type_(kFrameEmpty),
|
||||
last_frame_type_(kEmptyFrame),
|
||||
last_payload_type_(-1),
|
||||
last_timestamp_(0),
|
||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()) {}
|
||||
@ -416,18 +416,18 @@ class AudioCodingModuleTestWithComfortNoiseOldApi
|
||||
int ix;
|
||||
FrameType type;
|
||||
} expectation[] = {{2, kAudioFrameCN},
|
||||
{5, kFrameEmpty},
|
||||
{8, kFrameEmpty},
|
||||
{5, kEmptyFrame},
|
||||
{8, kEmptyFrame},
|
||||
{11, kAudioFrameCN},
|
||||
{14, kFrameEmpty},
|
||||
{17, kFrameEmpty},
|
||||
{14, kEmptyFrame},
|
||||
{17, kEmptyFrame},
|
||||
{20, kAudioFrameCN},
|
||||
{23, kFrameEmpty},
|
||||
{26, kFrameEmpty},
|
||||
{29, kFrameEmpty},
|
||||
{23, kEmptyFrame},
|
||||
{26, kEmptyFrame},
|
||||
{29, kEmptyFrame},
|
||||
{32, kAudioFrameCN},
|
||||
{35, kFrameEmpty},
|
||||
{38, kFrameEmpty}};
|
||||
{35, kEmptyFrame},
|
||||
{38, kEmptyFrame}};
|
||||
for (int i = 0; i < kLoops; ++i) {
|
||||
int num_calls_before = packet_cb_.num_calls();
|
||||
EXPECT_EQ(i / blocks_per_packet, num_calls_before);
|
||||
@ -447,7 +447,7 @@ class AudioCodingModuleTestWithComfortNoiseOldApi
|
||||
|
||||
// Checks that the transport callback is invoked once per frame period of the
|
||||
// underlying speech encoder, even when comfort noise is produced.
|
||||
// Also checks that the frame type is kAudioFrameCN or kFrameEmpty.
|
||||
// Also checks that the frame type is kAudioFrameCN or kEmptyFrame.
|
||||
// This test and the next check the same thing, but differ in the order of
|
||||
// speech codec and CNG registration.
|
||||
TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
|
||||
|
||||
@ -42,7 +42,7 @@ int32_t Channel::SendData(FrameType frameType,
|
||||
} else {
|
||||
rtpInfo.type.Audio.isCNG = false;
|
||||
}
|
||||
if (frameType == kFrameEmpty) {
|
||||
if (frameType == kEmptyFrame) {
|
||||
// When frame is empty, we should not transmit it. The frame size of the
|
||||
// next non-empty frame will be based on the previous frame size.
|
||||
_useLastFrameSize = _lastFrameSizeSample > 0;
|
||||
|
||||
@ -74,7 +74,7 @@ int32_t TestPack::SendData(FrameType frame_type, uint8_t payload_type,
|
||||
} else {
|
||||
rtp_info.type.Audio.isCNG = false;
|
||||
}
|
||||
if (frame_type == kFrameEmpty) {
|
||||
if (frame_type == kEmptyFrame) {
|
||||
// Skip this frame.
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ int32_t TestPackStereo::SendData(const FrameType frame_type,
|
||||
rtp_info.header.sequenceNumber = seq_no_++;
|
||||
rtp_info.header.payloadType = payload_type;
|
||||
rtp_info.header.timestamp = timestamp;
|
||||
if (frame_type == kFrameEmpty) {
|
||||
if (frame_type == kEmptyFrame) {
|
||||
// Skip this frame
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ int32_t ActivityMonitor::InFrameType(FrameType frame_type) {
|
||||
|
||||
void ActivityMonitor::PrintStatistics() {
|
||||
printf("\n");
|
||||
printf("kFrameEmpty %u\n", counter_[kFrameEmpty]);
|
||||
printf("kEmptyFrame %u\n", counter_[kEmptyFrame]);
|
||||
printf("kAudioFrameSpeech %u\n", counter_[kAudioFrameSpeech]);
|
||||
printf("kAudioFrameCN %u\n", counter_[kAudioFrameCN]);
|
||||
printf("kVideoFrameKey %u\n", counter_[kVideoFrameKey]);
|
||||
@ -248,7 +248,7 @@ void TestOpusDtx::Perform() {
|
||||
32000, 1, out_filename, false, expects);
|
||||
|
||||
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
|
||||
expects[kFrameEmpty] = 1;
|
||||
expects[kEmptyFrame] = 1;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
|
||||
32000, 1, out_filename, true, expects);
|
||||
|
||||
@ -256,13 +256,13 @@ void TestOpusDtx::Perform() {
|
||||
out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
|
||||
RegisterCodec(kOpusStereo);
|
||||
EXPECT_EQ(0, acm_send_->DisableOpusDtx());
|
||||
expects[kFrameEmpty] = 0;
|
||||
expects[kEmptyFrame] = 0;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
|
||||
32000, 2, out_filename, false, expects);
|
||||
|
||||
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
|
||||
|
||||
expects[kFrameEmpty] = 1;
|
||||
expects[kEmptyFrame] = 1;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
|
||||
32000, 2, out_filename, true, expects);
|
||||
#endif
|
||||
|
||||
@ -29,7 +29,7 @@ class ActivityMonitor : public ACMVADCallback {
|
||||
void ResetStatistics();
|
||||
void GetStatistics(uint32_t* stats);
|
||||
private:
|
||||
// 0 - kFrameEmpty
|
||||
// 0 - kEmptyFrame
|
||||
// 1 - kAudioFrameSpeech
|
||||
// 2 - kAudioFrameCN
|
||||
// 3 - kVideoFrameKey (not used by audio)
|
||||
@ -60,7 +60,7 @@ class TestVadDtx : public ACMTest {
|
||||
// 0 : there have been no packets of type |x|,
|
||||
// 1 : there have been packets of type |x|,
|
||||
// with |x| indicates the following packet types
|
||||
// 0 - kFrameEmpty
|
||||
// 0 - kEmptyFrame
|
||||
// 1 - kAudioFrameSpeech
|
||||
// 2 - kAudioFrameCN
|
||||
// 3 - kVideoFrameKey (not used by audio)
|
||||
|
||||
@ -288,7 +288,7 @@ VADCallback::VADCallback() {
|
||||
}
|
||||
|
||||
void VADCallback::PrintFrameTypes() {
|
||||
printf("kFrameEmpty......... %d\n", _numFrameTypes[kFrameEmpty]);
|
||||
printf("kEmptyFrame......... %d\n", _numFrameTypes[kEmptyFrame]);
|
||||
printf("kAudioFrameSpeech... %d\n", _numFrameTypes[kAudioFrameSpeech]);
|
||||
printf("kAudioFrameCN....... %d\n", _numFrameTypes[kAudioFrameCN]);
|
||||
printf("kVideoFrameKey...... %d\n", _numFrameTypes[kVideoFrameKey]);
|
||||
|
||||
@ -83,7 +83,7 @@ void TestFua(size_t frame_size,
|
||||
fragmentation.fragmentationOffset[0] = 0;
|
||||
fragmentation.fragmentationLength[0] = frame_size;
|
||||
rtc::scoped_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
|
||||
kRtpVideoH264, max_payload_size, NULL, kFrameEmpty));
|
||||
kRtpVideoH264, max_payload_size, NULL, kEmptyFrame));
|
||||
packetizer->SetPayloadData(frame.get(), frame_size, &fragmentation);
|
||||
|
||||
rtc::scoped_ptr<uint8_t[]> packet(new uint8_t[max_payload_size]);
|
||||
@ -157,7 +157,7 @@ TEST(RtpPacketizerH264Test, TestSingleNalu) {
|
||||
fragmentation.fragmentationOffset[0] = 0;
|
||||
fragmentation.fragmentationLength[0] = sizeof(frame);
|
||||
rtc::scoped_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
packetizer->SetPayloadData(frame, sizeof(frame), &fragmentation);
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
size_t length = 0;
|
||||
@ -186,7 +186,7 @@ TEST(RtpPacketizerH264Test, TestSingleNaluTwoPackets) {
|
||||
frame[fragmentation.fragmentationOffset[1]] = 0x01;
|
||||
|
||||
rtc::scoped_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
@ -223,7 +223,7 @@ TEST(RtpPacketizerH264Test, TestStapA) {
|
||||
fragmentation.fragmentationLength[2] =
|
||||
kNalHeaderSize + kFrameSize - kPayloadOffset;
|
||||
rtc::scoped_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
@ -258,7 +258,7 @@ TEST(RtpPacketizerH264Test, TestTooSmallForStapAHeaders) {
|
||||
fragmentation.fragmentationLength[2] =
|
||||
kNalHeaderSize + kFrameSize - kPayloadOffset;
|
||||
rtc::scoped_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
@ -306,7 +306,7 @@ TEST(RtpPacketizerH264Test, TestMixedStapA_FUA) {
|
||||
}
|
||||
}
|
||||
rtc::scoped_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
// First expecting two FU-A packets.
|
||||
|
||||
@ -35,7 +35,8 @@ const size_t kRtpHeaderLength = 12;
|
||||
|
||||
const char* FrameTypeToString(FrameType frame_type) {
|
||||
switch (frame_type) {
|
||||
case kFrameEmpty: return "empty";
|
||||
case kEmptyFrame:
|
||||
return "empty";
|
||||
case kAudioFrameSpeech: return "audio_speech";
|
||||
case kAudioFrameCN: return "audio_cn";
|
||||
case kVideoFrameKey: return "video_key";
|
||||
@ -509,7 +510,7 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
|
||||
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp,
|
||||
"Send", "type", FrameTypeToString(frame_type));
|
||||
assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
|
||||
frame_type == kFrameEmpty);
|
||||
frame_type == kEmptyFrame);
|
||||
|
||||
ret_val = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
|
||||
payload_data, payload_size, fragmentation);
|
||||
@ -518,7 +519,7 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
|
||||
"Send", "type", FrameTypeToString(frame_type));
|
||||
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
|
||||
|
||||
if (frame_type == kFrameEmpty)
|
||||
if (frame_type == kEmptyFrame)
|
||||
return 0;
|
||||
|
||||
ret_val =
|
||||
|
||||
@ -208,8 +208,8 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
// A source MAY send events and coded audio packets for the same time
|
||||
// but we don't support it
|
||||
if (_dtmfEventIsOn) {
|
||||
if (frameType == kFrameEmpty) {
|
||||
// kFrameEmpty is used to drive the DTMF when in CN mode
|
||||
if (frameType == kEmptyFrame) {
|
||||
// kEmptyFrame is used to drive the DTMF when in CN mode
|
||||
// it can be triggered more frequently than we want to send the
|
||||
// DTMF packets.
|
||||
if (packet_size_samples > (captureTimeStamp - _dtmfTimestampLastSent)) {
|
||||
@ -259,7 +259,7 @@ int32_t RTPSenderAudio::SendAudio(
|
||||
return 0;
|
||||
}
|
||||
if (payloadSize == 0 || payloadData == NULL) {
|
||||
if (frameType == kFrameEmpty) {
|
||||
if (frameType == kEmptyFrame) {
|
||||
// we don't send empty audio RTP packets
|
||||
// no error since we use it to drive DTMF when we use VAD
|
||||
return 0;
|
||||
|
||||
@ -1266,7 +1266,7 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
|
||||
// audio channel.
|
||||
// This test checks the marker bit for the first packet and the consequent
|
||||
// packets of the same telephone event. Since it is specifically for DTMF
|
||||
// events, ignoring audio packets and sending kFrameEmpty instead of those.
|
||||
// events, ignoring audio packets and sending kEmptyFrame instead of those.
|
||||
TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
|
||||
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "telephone-event";
|
||||
uint8_t payload_type = 126;
|
||||
@ -1284,13 +1284,13 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
|
||||
// During start, it takes the starting timestamp as last sent timestamp.
|
||||
// The duration is calculated as the difference of current and last sent
|
||||
// timestamp. So for first call it will skip since the duration is zero.
|
||||
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
|
||||
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
|
||||
capture_time_ms, 0, nullptr, 0,
|
||||
nullptr));
|
||||
// DTMF Sample Length is (Frequency/1000) * Duration.
|
||||
// So in this case, it is (8000/1000) * 500 = 4000.
|
||||
// Sending it as two packets.
|
||||
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
|
||||
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
|
||||
capture_time_ms + 2000, 0, nullptr,
|
||||
0, nullptr));
|
||||
rtc::scoped_ptr<webrtc::RtpHeaderParser> rtp_parser(
|
||||
@ -1303,7 +1303,7 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
|
||||
// Marker Bit should be set to 1 for first packet.
|
||||
EXPECT_TRUE(rtp_header.markerBit);
|
||||
|
||||
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
|
||||
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
|
||||
capture_time_ms + 4000, 0, nullptr,
|
||||
0, nullptr));
|
||||
ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
|
||||
|
||||
@ -242,7 +242,7 @@ int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
|
||||
int H264VideoToolboxEncoder::Encode(
|
||||
const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
if (input_image.IsZeroSize()) {
|
||||
// It's possible to get zero sizes as a signal to produce keyframes (this
|
||||
// happens for internal sources). But this shouldn't happen in
|
||||
|
||||
@ -38,7 +38,7 @@ class H264VideoToolboxEncoder : public H264Encoder {
|
||||
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ class I420Encoder : public VideoEncoder {
|
||||
// <0 - Error
|
||||
int Encode(const VideoFrame& inputImage,
|
||||
const CodecSpecificInfo* /*codecSpecificInfo*/,
|
||||
const std::vector<VideoFrameType>* /*frame_types*/) override;
|
||||
const std::vector<FrameType>* /*frame_types*/) override;
|
||||
|
||||
// Register an encode complete callback object.
|
||||
//
|
||||
|
||||
@ -74,7 +74,7 @@ int I420Encoder::InitEncode(const VideoCodec* codecSettings,
|
||||
|
||||
int I420Encoder::Encode(const VideoFrame& inputImage,
|
||||
const CodecSpecificInfo* /*codecSpecificInfo*/,
|
||||
const std::vector<VideoFrameType>* /*frame_types*/) {
|
||||
const std::vector<FrameType>* /*frame_types*/) {
|
||||
if (!_inited) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@ class MockVideoEncoder : public VideoEncoder {
|
||||
MOCK_METHOD3(Encode,
|
||||
int32_t(const VideoFrame& inputImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<VideoFrameType>* frame_types));
|
||||
const std::vector<FrameType>* frame_types));
|
||||
MOCK_METHOD1(RegisterEncodeCompleteCallback,
|
||||
int32_t(EncodedImageCallback* callback));
|
||||
MOCK_METHOD0(Release, int32_t());
|
||||
|
||||
@ -39,7 +39,7 @@ struct FrameStatistic {
|
||||
|
||||
// Copied from EncodedImage
|
||||
size_t encoded_frame_length_in_bytes;
|
||||
webrtc::VideoFrameType frame_type;
|
||||
webrtc::FrameType frame_type;
|
||||
};
|
||||
|
||||
// Handles statistics from a single video processing run.
|
||||
|
||||
@ -162,7 +162,7 @@ size_t VideoProcessorImpl::EncodedFrameSize() {
|
||||
return encoded_frame_size_;
|
||||
}
|
||||
|
||||
VideoFrameType VideoProcessorImpl::EncodedFrameType() {
|
||||
FrameType VideoProcessorImpl::EncodedFrameType() {
|
||||
return encoded_frame_type_;
|
||||
}
|
||||
|
||||
@ -199,7 +199,7 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
|
||||
source_frame_.set_timestamp(frame_number);
|
||||
|
||||
// Decide if we're going to force a keyframe:
|
||||
std::vector<VideoFrameType> frame_types(1, kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(1, kDeltaFrame);
|
||||
if (config_.keyframe_interval > 0 &&
|
||||
frame_number % config_.keyframe_interval == 0) {
|
||||
frame_types[0] = kKeyFrame;
|
||||
|
||||
@ -147,7 +147,7 @@ class VideoProcessor {
|
||||
virtual size_t EncodedFrameSize() = 0;
|
||||
|
||||
// Return the encoded frame type (key or delta).
|
||||
virtual VideoFrameType EncodedFrameType() = 0;
|
||||
virtual FrameType EncodedFrameType() = 0;
|
||||
|
||||
// Return the number of dropped frames.
|
||||
virtual int NumberDroppedFrames() = 0;
|
||||
@ -183,7 +183,7 @@ class VideoProcessorImpl : public VideoProcessor {
|
||||
// Return the size of the encoded frame in bytes.
|
||||
size_t EncodedFrameSize() override;
|
||||
// Return the encoded frame type (key or delta).
|
||||
VideoFrameType EncodedFrameType() override;
|
||||
FrameType EncodedFrameType() override;
|
||||
// Return the number of dropped frames.
|
||||
int NumberDroppedFrames() override;
|
||||
// Return the number of spatial resizes.
|
||||
@ -212,7 +212,7 @@ class VideoProcessorImpl : public VideoProcessor {
|
||||
// If Init() has executed successfully.
|
||||
bool initialized_;
|
||||
size_t encoded_frame_size_;
|
||||
VideoFrameType encoded_frame_type_;
|
||||
FrameType encoded_frame_type_;
|
||||
int prev_time_stamp_;
|
||||
int num_dropped_frames_;
|
||||
int num_spatial_resizes_;
|
||||
|
||||
@ -268,7 +268,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
}
|
||||
|
||||
// For every encoded frame, update the rate control metrics.
|
||||
void UpdateRateControlMetrics(int frame_num, VideoFrameType frame_type) {
|
||||
void UpdateRateControlMetrics(int frame_num, FrameType frame_type) {
|
||||
float encoded_size_kbits = processor_->EncodedFrameSize() * 8.0f / 1000.0f;
|
||||
// Update layer data.
|
||||
// Update rate mismatch relative to per-frame bandwidth for delta frames.
|
||||
@ -450,7 +450,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
ResetRateControlMetrics(
|
||||
rate_profile.frame_index_rate_update[update_index + 1]);
|
||||
int frame_number = 0;
|
||||
VideoFrameType frame_type = kDeltaFrame;
|
||||
FrameType frame_type = kDeltaFrame;
|
||||
while (processor_->ProcessFrame(frame_number) &&
|
||||
frame_number < num_frames) {
|
||||
// Get the layer index for the frame |frame_number|.
|
||||
|
||||
@ -233,7 +233,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
|
||||
int SimulcastEncoderAdapter::Encode(
|
||||
const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
if (!Initialized()) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
@ -267,7 +267,7 @@ int SimulcastEncoderAdapter::Encode(
|
||||
if (!streaminfos_[stream_idx].send_stream)
|
||||
continue;
|
||||
|
||||
std::vector<VideoFrameType> stream_frame_types;
|
||||
std::vector<FrameType> stream_frame_types;
|
||||
if (send_key_frame) {
|
||||
stream_frame_types.push_back(kKeyFrame);
|
||||
streaminfos_[stream_idx].key_frame_request = false;
|
||||
|
||||
@ -42,7 +42,7 @@ class SimulcastEncoderAdapter : public VP8Encoder {
|
||||
size_t max_payload_size) override;
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
|
||||
int SetRates(uint32_t new_bitrate_kbit, uint32_t new_framerate) override;
|
||||
|
||||
@ -117,7 +117,7 @@ class MockVideoEncoder : public VideoEncoder {
|
||||
|
||||
int32_t Encode(const VideoFrame& inputImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
const std::vector<FrameType>* frame_types) override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -338,7 +338,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
decoder_->Release();
|
||||
}
|
||||
|
||||
void ExpectStreams(VideoFrameType frame_type, int expected_video_streams) {
|
||||
void ExpectStreams(FrameType frame_type, int expected_video_streams) {
|
||||
ASSERT_GE(expected_video_streams, 0);
|
||||
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
|
||||
if (expected_video_streams >= 1) {
|
||||
@ -389,8 +389,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// a key frame was only requested for some of them.
|
||||
void TestKeyFrameRequestsOnAllStreams() {
|
||||
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -424,8 +423,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestPaddingAllStreams() {
|
||||
// We should always encode the base layer.
|
||||
encoder_->SetRates(kMinBitrates[0] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -437,8 +435,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestPaddingTwoStreams() {
|
||||
// We have just enough to get only the first stream and padding for two.
|
||||
encoder_->SetRates(kMinBitrates[0], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -451,8 +448,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// We are just below limit of sending second stream, so we should get
|
||||
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -464,8 +460,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestPaddingOneStream() {
|
||||
// We have just enough to send two streams, so padding for one stream.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -479,8 +474,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2] - 1, 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -493,8 +487,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// We have just enough to send all streams.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -507,8 +500,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// We should get three media streams.
|
||||
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
|
||||
kMaxBitrates[2], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -589,8 +581,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
|
||||
// Encode one frame and verify.
|
||||
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
EXPECT_CALL(encoder_callback_, Encoded(
|
||||
AllOf(Field(&EncodedImage::_frameType, kKeyFrame),
|
||||
Field(&EncodedImage::_encodedWidth, width),
|
||||
|
||||
@ -706,7 +706,7 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
|
||||
|
||||
int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp());
|
||||
|
||||
if (!inited_)
|
||||
|
||||
@ -48,7 +48,7 @@ class VP8EncoderImpl : public VP8Encoder {
|
||||
|
||||
virtual int Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types);
|
||||
const std::vector<FrameType>* frame_types);
|
||||
|
||||
virtual int RegisterEncodeCompleteCallback(EncodedImageCallback* callback);
|
||||
|
||||
|
||||
@ -422,7 +422,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
|
||||
|
||||
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
if (!inited_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
@ -432,7 +432,7 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
if (encoded_complete_callback_ == NULL) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
VideoFrameType frame_type = kDeltaFrame;
|
||||
FrameType frame_type = kDeltaFrame;
|
||||
// We only support one stream at the moment.
|
||||
if (frame_types && frame_types->size() > 0) {
|
||||
frame_type = (*frame_types)[0];
|
||||
|
||||
@ -35,7 +35,7 @@ class VP9EncoderImpl : public VP9Encoder {
|
||||
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
|
||||
|
||||
@ -181,7 +181,7 @@ TEST(TestDecodingState, UpdateOldPacket) {
|
||||
// Now insert empty packet belonging to the same frame.
|
||||
packet.timestamp = 1;
|
||||
packet.seqNum = 2;
|
||||
packet.frameType = kFrameEmpty;
|
||||
packet.frameType = kEmptyFrame;
|
||||
packet.sizeBytes = 0;
|
||||
dec_state.UpdateOldPacket(&packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 2);
|
||||
@ -196,7 +196,7 @@ TEST(TestDecodingState, UpdateOldPacket) {
|
||||
// sequence number.
|
||||
packet.timestamp = 0;
|
||||
packet.seqNum = 4;
|
||||
packet.frameType = kFrameEmpty;
|
||||
packet.frameType = kEmptyFrame;
|
||||
packet.sizeBytes = 0;
|
||||
dec_state.UpdateOldPacket(&packet);
|
||||
EXPECT_EQ(dec_state.sequence_num(), 3);
|
||||
|
||||
@ -226,38 +226,4 @@ void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize)
|
||||
}
|
||||
}
|
||||
|
||||
webrtc::FrameType VCMEncodedFrame::ConvertFrameType(VideoFrameType frameType)
|
||||
{
|
||||
switch(frameType) {
|
||||
case kKeyFrame:
|
||||
return kVideoFrameKey;
|
||||
case kDeltaFrame:
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
// Bogus default return value.
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
|
||||
VideoFrameType VCMEncodedFrame::ConvertFrameType(webrtc::FrameType frame_type) {
|
||||
switch (frame_type) {
|
||||
case kVideoFrameKey:
|
||||
return kKeyFrame;
|
||||
case kVideoFrameDelta:
|
||||
return kDeltaFrame;
|
||||
default:
|
||||
assert(false);
|
||||
return kDeltaFrame;
|
||||
}
|
||||
}
|
||||
|
||||
void VCMEncodedFrame::ConvertFrameTypes(
|
||||
const std::vector<webrtc::FrameType>& frame_types,
|
||||
std::vector<VideoFrameType>* video_frame_types) {
|
||||
assert(video_frame_types);
|
||||
video_frame_types->reserve(frame_types.size());
|
||||
for (size_t i = 0; i < frame_types.size(); ++i) {
|
||||
(*video_frame_types)[i] = ConvertFrameType(frame_types[i]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
||||
@ -68,7 +68,7 @@ public:
|
||||
/**
|
||||
* Get frame type
|
||||
*/
|
||||
webrtc::FrameType FrameType() const {return ConvertFrameType(_frameType);}
|
||||
webrtc::FrameType FrameType() const { return _frameType; }
|
||||
/**
|
||||
* Get frame rotation
|
||||
*/
|
||||
@ -95,12 +95,6 @@ public:
|
||||
|
||||
const RTPFragmentationHeader* FragmentationHeader() const;
|
||||
|
||||
static webrtc::FrameType ConvertFrameType(VideoFrameType frameType);
|
||||
static VideoFrameType ConvertFrameType(webrtc::FrameType frameType);
|
||||
static void ConvertFrameTypes(
|
||||
const std::vector<webrtc::FrameType>& frame_types,
|
||||
std::vector<VideoFrameType>* video_frame_types);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Verifies that current allocated buffer size is larger than or equal to the input size.
|
||||
|
||||
@ -98,7 +98,7 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
|
||||
// We only take the ntp timestamp of the first packet of a frame.
|
||||
ntp_time_ms_ = packet.ntp_time_ms_;
|
||||
_codec = packet.codec;
|
||||
if (packet.frameType != kFrameEmpty) {
|
||||
if (packet.frameType != kEmptyFrame) {
|
||||
// first media packet
|
||||
SetState(kStateIncomplete);
|
||||
}
|
||||
@ -280,7 +280,7 @@ VCMFrameBuffer::PrepareForDecode(bool continuous) {
|
||||
#endif
|
||||
// Transfer frame information to EncodedFrame and create any codec
|
||||
// specific information.
|
||||
_frameType = ConvertFrameType(_sessionInfo.FrameType());
|
||||
_frameType = _sessionInfo.FrameType();
|
||||
_completeFrame = _sessionInfo.complete();
|
||||
_missingFrame = !continuous;
|
||||
}
|
||||
|
||||
@ -140,9 +140,8 @@ VCMGenericEncoder::InitEncode(const VideoCodec* settings,
|
||||
int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<FrameType>& frameTypes) {
|
||||
std::vector<VideoFrameType> video_frame_types(frameTypes.size(),
|
||||
kDeltaFrame);
|
||||
VCMEncodedFrame::ConvertFrameTypes(frameTypes, &video_frame_types);
|
||||
for (FrameType frame_type : frameTypes)
|
||||
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
|
||||
|
||||
rotation_ = inputFrame.rotation();
|
||||
|
||||
@ -153,12 +152,11 @@ int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
|
||||
vcm_encoded_frame_callback_->SetRotation(rotation_);
|
||||
}
|
||||
|
||||
int32_t result =
|
||||
encoder_->Encode(inputFrame, codecSpecificInfo, &video_frame_types);
|
||||
int32_t result = encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
|
||||
if (is_screenshare_ &&
|
||||
result == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT) {
|
||||
// Target bitrate exceeded, encoder state has been reset - try again.
|
||||
return encoder_->Encode(inputFrame, codecSpecificInfo, &video_frame_types);
|
||||
return encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -223,10 +221,7 @@ VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
|
||||
int32_t VCMGenericEncoder::RequestFrame(
|
||||
const std::vector<FrameType>& frame_types) {
|
||||
VideoFrame image;
|
||||
std::vector<VideoFrameType> video_frame_types(frame_types.size(),
|
||||
kDeltaFrame);
|
||||
VCMEncodedFrame::ConvertFrameTypes(frame_types, &video_frame_types);
|
||||
return encoder_->Encode(image, NULL, &video_frame_types);
|
||||
return encoder_->Encode(image, NULL, &frame_types);
|
||||
}
|
||||
|
||||
int32_t
|
||||
@ -294,6 +289,8 @@ int32_t VCMEncodedFrameCallback::Encoded(
|
||||
const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const RTPFragmentationHeader* fragmentationHeader) {
|
||||
RTC_DCHECK(encodedImage._frameType == kVideoFrameKey ||
|
||||
encodedImage._frameType == kVideoFrameDelta);
|
||||
post_encode_callback_->Encoded(encodedImage, NULL, NULL);
|
||||
|
||||
if (_sendCallback == NULL) {
|
||||
|
||||
@ -643,7 +643,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
|
||||
|
||||
// Empty packets may bias the jitter estimate (lacking size component),
|
||||
// therefore don't let empty packet trigger the following updates:
|
||||
if (packet.frameType != kFrameEmpty) {
|
||||
if (packet.frameType != kEmptyFrame) {
|
||||
if (waiting_for_completion_.timestamp == packet.timestamp) {
|
||||
// This can get bad if we have a lot of duplicate packets,
|
||||
// we will then count some packet multiple times.
|
||||
|
||||
@ -168,10 +168,9 @@ class TestRunningJitterBuffer : public ::testing::Test {
|
||||
}
|
||||
|
||||
VCMFrameBufferEnum InsertFrame(FrameType frame_type) {
|
||||
stream_generator_->GenerateFrame(frame_type,
|
||||
(frame_type != kFrameEmpty) ? 1 : 0,
|
||||
(frame_type == kFrameEmpty) ? 1 : 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
stream_generator_->GenerateFrame(
|
||||
frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
|
||||
(frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
|
||||
VCMFrameBufferEnum ret = InsertPacketAndPop(0);
|
||||
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
|
||||
return ret;
|
||||
@ -1050,7 +1049,7 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->completeNALU = kNaluEnd;
|
||||
packet_->frameType = kFrameEmpty;
|
||||
packet_->frameType = kEmptyFrame;
|
||||
|
||||
EXPECT_EQ(jitter_buffer_->InsertPacket(*packet_, &retransmitted),
|
||||
kDecodableSession);
|
||||
@ -1524,7 +1523,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
|
||||
packet_->markerBit = false;
|
||||
packet_->seqNum = seq_num_;
|
||||
packet_->timestamp = timestamp_;
|
||||
packet_->frameType = kFrameEmpty;
|
||||
packet_->frameType = kEmptyFrame;
|
||||
|
||||
EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_,
|
||||
&retransmitted));
|
||||
@ -1895,7 +1894,7 @@ TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
|
||||
TEST_F(TestJitterBufferNack, EmptyPackets) {
|
||||
// Make sure empty packets doesn't clog the jitter buffer.
|
||||
jitter_buffer_->SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
|
||||
EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kFrameEmpty), kNoError);
|
||||
EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kEmptyFrame), kNoError);
|
||||
InsertFrame(kVideoFrameKey);
|
||||
EXPECT_TRUE(DecodeCompleteFrame());
|
||||
}
|
||||
|
||||
@ -16,23 +16,21 @@
|
||||
namespace webrtc {
|
||||
|
||||
VCMPacket::VCMPacket()
|
||||
:
|
||||
payloadType(0),
|
||||
timestamp(0),
|
||||
ntp_time_ms_(0),
|
||||
seqNum(0),
|
||||
dataPtr(NULL),
|
||||
sizeBytes(0),
|
||||
markerBit(false),
|
||||
frameType(kFrameEmpty),
|
||||
codec(kVideoCodecUnknown),
|
||||
isFirstPacket(false),
|
||||
completeNALU(kNaluUnset),
|
||||
insertStartCode(false),
|
||||
width(0),
|
||||
height(0),
|
||||
codecSpecificHeader() {
|
||||
}
|
||||
: payloadType(0),
|
||||
timestamp(0),
|
||||
ntp_time_ms_(0),
|
||||
seqNum(0),
|
||||
dataPtr(NULL),
|
||||
sizeBytes(0),
|
||||
markerBit(false),
|
||||
frameType(kEmptyFrame),
|
||||
codec(kVideoCodecUnknown),
|
||||
isFirstPacket(false),
|
||||
completeNALU(kNaluUnset),
|
||||
insertStartCode(false),
|
||||
width(0),
|
||||
height(0),
|
||||
codecSpecificHeader() {}
|
||||
|
||||
VCMPacket::VCMPacket(const uint8_t* ptr,
|
||||
const size_t size,
|
||||
@ -88,7 +86,7 @@ void VCMPacket::Reset() {
|
||||
dataPtr = NULL;
|
||||
sizeBytes = 0;
|
||||
markerBit = false;
|
||||
frameType = kFrameEmpty;
|
||||
frameType = kEmptyFrame;
|
||||
codec = kVideoCodecUnknown;
|
||||
isFirstPacket = false;
|
||||
completeNALU = kNaluUnset;
|
||||
|
||||
@ -63,10 +63,8 @@ class TestVCMReceiver : public ::testing::Test {
|
||||
int32_t InsertFrame(FrameType frame_type, bool complete) {
|
||||
int num_of_packets = complete ? 1 : 2;
|
||||
stream_generator_->GenerateFrame(
|
||||
frame_type,
|
||||
(frame_type != kFrameEmpty) ? num_of_packets : 0,
|
||||
(frame_type == kFrameEmpty) ? 1 : 0,
|
||||
clock_->TimeInMilliseconds());
|
||||
frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
|
||||
(frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
|
||||
int32_t ret = InsertPacketAndPop(0);
|
||||
if (!complete) {
|
||||
// Drop the second packet.
|
||||
|
||||
@ -464,7 +464,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
uint8_t* frame_buffer,
|
||||
VCMDecodeErrorMode decode_error_mode,
|
||||
const FrameData& frame_data) {
|
||||
if (packet.frameType == kFrameEmpty) {
|
||||
if (packet.frameType == kEmptyFrame) {
|
||||
// Update sequence number of an empty packet.
|
||||
// Only media packets are inserted into the packet list.
|
||||
InformOfEmptyPacket(packet.seqNum);
|
||||
@ -516,7 +516,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
|
||||
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
|
||||
"of frame boundaries";
|
||||
return -3;
|
||||
} else if (frame_type_ == kFrameEmpty && packet.frameType != kFrameEmpty) {
|
||||
} else if (frame_type_ == kEmptyFrame && packet.frameType != kEmptyFrame) {
|
||||
// Update the frame type with the type of the first media packet.
|
||||
// TODO(mikhal): Can this trigger?
|
||||
frame_type_ = packet.frameType;
|
||||
|
||||
@ -175,7 +175,7 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
|
||||
packet_.markerBit = true;
|
||||
packet_.seqNum = 2;
|
||||
packet_.sizeBytes = 0;
|
||||
packet_.frameType = kFrameEmpty;
|
||||
packet_.frameType = kEmptyFrame;
|
||||
EXPECT_EQ(0,
|
||||
session_.InsertPacket(packet_,
|
||||
frame_buffer_,
|
||||
@ -888,7 +888,7 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
|
||||
TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
|
||||
packet_.isFirstPacket = false;
|
||||
packet_.completeNALU = kNaluComplete;
|
||||
packet_.frameType = kFrameEmpty;
|
||||
packet_.frameType = kEmptyFrame;
|
||||
packet_.sizeBytes = 0;
|
||||
packet_.seqNum = 0;
|
||||
packet_.markerBit = false;
|
||||
|
||||
@ -46,8 +46,8 @@ void StreamGenerator::GenerateFrame(FrameType type,
|
||||
++sequence_number_;
|
||||
}
|
||||
for (int i = 0; i < num_empty_packets; ++i) {
|
||||
packets_.push_back(GeneratePacket(
|
||||
sequence_number_, timestamp, 0, false, false, kFrameEmpty));
|
||||
packets_.push_back(GeneratePacket(sequence_number_, timestamp, 0, false,
|
||||
false, kEmptyFrame));
|
||||
++sequence_number_;
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,7 +88,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
|
||||
const uint8_t payload[kPaddingSize] = {0};
|
||||
WebRtcRTPHeader header;
|
||||
memset(&header, 0, sizeof(header));
|
||||
header.frameType = kFrameEmpty;
|
||||
header.frameType = kEmptyFrame;
|
||||
header.header.markerBit = false;
|
||||
header.header.paddingLength = kPaddingSize;
|
||||
header.header.payloadType = kUnusedPayloadType;
|
||||
@ -112,7 +112,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
|
||||
const uint8_t payload[kFrameSize] = {0};
|
||||
WebRtcRTPHeader header;
|
||||
memset(&header, 0, sizeof(header));
|
||||
header.frameType = kFrameEmpty;
|
||||
header.frameType = kEmptyFrame;
|
||||
header.header.markerBit = false;
|
||||
header.header.paddingLength = kPaddingSize;
|
||||
header.header.payloadType = kUnusedPayloadType;
|
||||
@ -127,7 +127,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
|
||||
clock_.AdvanceTimeMilliseconds(33);
|
||||
header.header.timestamp += 3000;
|
||||
|
||||
header.frameType = kFrameEmpty;
|
||||
header.frameType = kEmptyFrame;
|
||||
header.type.Video.isFirstPacket = false;
|
||||
header.header.markerBit = false;
|
||||
// Insert padding frames.
|
||||
@ -163,7 +163,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
||||
const uint8_t payload[kFrameSize] = {0};
|
||||
WebRtcRTPHeader header;
|
||||
memset(&header, 0, sizeof(header));
|
||||
header.frameType = kFrameEmpty;
|
||||
header.frameType = kEmptyFrame;
|
||||
header.type.Video.isFirstPacket = false;
|
||||
header.header.markerBit = false;
|
||||
header.header.paddingLength = kPaddingSize;
|
||||
@ -188,7 +188,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
||||
}
|
||||
|
||||
// Insert 2 padding only frames.
|
||||
header.frameType = kFrameEmpty;
|
||||
header.frameType = kEmptyFrame;
|
||||
header.type.Video.isFirstPacket = false;
|
||||
header.header.markerBit = false;
|
||||
for (int j = 0; j < 2; ++j) {
|
||||
|
||||
@ -313,7 +313,7 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
|
||||
}
|
||||
// TODO(holmer): Add support for dropping frames per stream. Currently we
|
||||
// only have one frame dropper for all streams.
|
||||
if (_nextFrameTypes[0] == kFrameEmpty) {
|
||||
if (_nextFrameTypes[0] == kEmptyFrame) {
|
||||
return VCM_OK;
|
||||
}
|
||||
if (_mediaOpt.DropFrame()) {
|
||||
|
||||
@ -244,7 +244,7 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
|
||||
}
|
||||
assert(stream >= 0);
|
||||
assert(stream < kNumberOfStreams);
|
||||
std::vector<VideoFrameType> frame_types(kNumberOfStreams, kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(kNumberOfStreams, kDeltaFrame);
|
||||
frame_types[stream] = kKeyFrame;
|
||||
EXPECT_CALL(
|
||||
encoder_,
|
||||
|
||||
@ -41,7 +41,7 @@ int32_t ConfigurableFrameSizeEncoder::InitEncode(
|
||||
int32_t ConfigurableFrameSizeEncoder::Encode(
|
||||
const VideoFrame& inputImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
EncodedImage encodedImage(
|
||||
buffer_.get(), current_frame_size_, max_frame_size_);
|
||||
encodedImage._completeFrame = true;
|
||||
|
||||
@ -30,7 +30,7 @@ class ConfigurableFrameSizeEncoder : public VideoEncoder {
|
||||
|
||||
int32_t Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
|
||||
int32_t RegisterEncodeCompleteCallback(
|
||||
EncodedImageCallback* callback) override;
|
||||
|
||||
@ -47,7 +47,7 @@ int32_t FakeEncoder::InitEncode(const VideoCodec* config,
|
||||
|
||||
int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
assert(config_.maxFramerate > 0);
|
||||
int64_t time_since_last_encode_ms = 1000 / config_.maxFramerate;
|
||||
int64_t time_now_ms = clock_->TimeInMilliseconds();
|
||||
@ -189,7 +189,7 @@ DelayedEncoder::DelayedEncoder(Clock* clock, int delay_ms)
|
||||
|
||||
int32_t DelayedEncoder::Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
SleepMs(delay_ms_);
|
||||
return FakeEncoder::Encode(input_image, codec_specific_info, frame_types);
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ class FakeEncoder : public VideoEncoder {
|
||||
size_t max_payload_size) override;
|
||||
int32_t Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
int32_t RegisterEncodeCompleteCallback(
|
||||
EncodedImageCallback* callback) override;
|
||||
int32_t Release() override;
|
||||
@ -74,7 +74,7 @@ class DelayedEncoder : public test::FakeEncoder {
|
||||
|
||||
int32_t Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
|
||||
private:
|
||||
const int delay_ms_;
|
||||
|
||||
@ -27,11 +27,8 @@ int32_t EncodedFrameCallbackAdapter::Encoded(
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
RTC_DCHECK(observer_ != nullptr);
|
||||
FrameType frame_type =
|
||||
VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
|
||||
const EncodedFrame frame(encodedImage._buffer,
|
||||
encodedImage._length,
|
||||
frame_type);
|
||||
const EncodedFrame frame(encodedImage._buffer, encodedImage._length,
|
||||
encodedImage._frameType);
|
||||
observer_->EncodedFrameCallback(frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1589,7 +1589,7 @@ TEST_F(EndToEndTest, ObserversEncodedFrames) {
|
||||
public:
|
||||
EncodedFrameTestObserver()
|
||||
: length_(0),
|
||||
frame_type_(kFrameEmpty),
|
||||
frame_type_(kEmptyFrame),
|
||||
called_(EventWrapper::Create()) {}
|
||||
virtual ~EncodedFrameTestObserver() {}
|
||||
|
||||
@ -2957,7 +2957,7 @@ TEST_F(EndToEndTest, RespectsNetworkState) {
|
||||
|
||||
int32_t Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
const std::vector<FrameType>* frame_types) override {
|
||||
{
|
||||
rtc::CritScope lock(&test_crit_);
|
||||
if (sender_state_ == kNetworkDown) {
|
||||
@ -3080,7 +3080,7 @@ TEST_F(EndToEndTest, NewSendStreamsRespectNetworkDown) {
|
||||
UnusedEncoder() : FakeEncoder(Clock::GetRealTimeClock()) {}
|
||||
int32_t Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
const std::vector<FrameType>* frame_types) override {
|
||||
ADD_FAILURE() << "Unexpected frame encode.";
|
||||
return test::FakeEncoder::Encode(
|
||||
input_image, codec_specific_info, frame_types);
|
||||
|
||||
@ -99,7 +99,7 @@ int32_t VideoEncoderSoftwareFallbackWrapper::Release() {
|
||||
int32_t VideoEncoderSoftwareFallbackWrapper::Encode(
|
||||
const VideoFrame& frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
if (fallback_encoder_)
|
||||
return fallback_encoder_->Encode(frame, codec_specific_info, frame_types);
|
||||
return encoder_->Encode(frame, codec_specific_info, frame_types);
|
||||
|
||||
@ -32,7 +32,7 @@ class VideoEncoderSoftwareFallbackWrapperTest : public ::testing::Test {
|
||||
}
|
||||
int32_t Encode(const VideoFrame& frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
const std::vector<FrameType>* frame_types) override {
|
||||
++encode_count_;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
@ -120,7 +120,7 @@ void VideoEncoderSoftwareFallbackWrapperTest::UtilizeFallbackEncoder() {
|
||||
memset(frame_.buffer(webrtc::kVPlane), 128,
|
||||
frame_.allocated_size(webrtc::kVPlane));
|
||||
|
||||
std::vector<VideoFrameType> types(1, kKeyFrame);
|
||||
std::vector<FrameType> types(1, kKeyFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
fallback_wrapper_.Encode(frame_, nullptr, &types));
|
||||
EXPECT_EQ(0, fake_encoder_.encode_count_);
|
||||
@ -163,7 +163,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
|
||||
EXPECT_EQ(&callback2, fake_encoder_.encode_complete_callback_);
|
||||
|
||||
// Encoding a frame using the fallback should arrive at the new callback.
|
||||
std::vector<VideoFrameType> types(1, kKeyFrame);
|
||||
std::vector<FrameType> types(1, kKeyFrame);
|
||||
frame_.set_timestamp(frame_.timestamp() + 1000);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
fallback_wrapper_.Encode(frame_, nullptr, &types));
|
||||
|
||||
@ -1252,7 +1252,7 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
|
||||
|
||||
int32_t Encode(const VideoFrame& inputImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
const std::vector<FrameType>* frame_types) override {
|
||||
EXPECT_TRUE(IsReadyForEncode());
|
||||
|
||||
observation_complete_->Set();
|
||||
@ -1451,7 +1451,7 @@ class VideoCodecConfigObserver : public test::SendTest,
|
||||
|
||||
int32_t Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
const std::vector<FrameType>* frame_types) override {
|
||||
// Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
|
||||
return 0;
|
||||
}
|
||||
@ -1742,7 +1742,7 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) {
|
||||
private:
|
||||
int32_t Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const std::vector<VideoFrameType>* frame_types) override {
|
||||
const std::vector<FrameType>* frame_types) override {
|
||||
CodecSpecificInfo specifics;
|
||||
memset(&specifics, 0, sizeof(specifics));
|
||||
specifics.codecType = kVideoCodecGeneric;
|
||||
|
||||
@ -98,7 +98,7 @@ class VideoEncoder {
|
||||
// WEBRTC_VIDEO_CODEC_TIMEOUT
|
||||
virtual int32_t Encode(const VideoFrame& frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) = 0;
|
||||
const std::vector<FrameType>* frame_types) = 0;
|
||||
|
||||
// Inform the encoder of the new packet loss rate and the round-trip time of
|
||||
// the network.
|
||||
@ -147,7 +147,7 @@ class VideoEncoderSoftwareFallbackWrapper : public VideoEncoder {
|
||||
int32_t Release() override;
|
||||
int32_t Encode(const VideoFrame& frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
|
||||
|
||||
int32_t SetRates(uint32_t bitrate, uint32_t framerate) override;
|
||||
|
||||
@ -251,10 +251,10 @@ class ViEChannel : public VCMFrameTypeCallback,
|
||||
int min_playout_delay_ms,
|
||||
int render_delay_ms);
|
||||
|
||||
// Implements VideoFrameTypeCallback.
|
||||
// Implements FrameTypeCallback.
|
||||
virtual int32_t RequestKeyFrame();
|
||||
|
||||
// Implements VideoFrameTypeCallback.
|
||||
// Implements FrameTypeCallback.
|
||||
virtual int32_t SliceLossIndicationRequest(
|
||||
const uint64_t picture_id);
|
||||
|
||||
|
||||
@ -558,10 +558,11 @@ int32_t ViEEncoder::SendData(
|
||||
stats_proxy_->OnSendEncodedImage(encoded_image, rtp_video_hdr);
|
||||
|
||||
return send_payload_router_->RoutePayload(
|
||||
VCMEncodedFrame::ConvertFrameType(encoded_image._frameType), payload_type,
|
||||
encoded_image._timeStamp, encoded_image.capture_time_ms_,
|
||||
encoded_image._buffer, encoded_image._length, &fragmentation_header,
|
||||
rtp_video_hdr) ? 0 : -1;
|
||||
encoded_image._frameType, payload_type, encoded_image._timeStamp,
|
||||
encoded_image.capture_time_ms_, encoded_image._buffer,
|
||||
encoded_image._length, &fragmentation_header, rtp_video_hdr)
|
||||
? 0
|
||||
: -1;
|
||||
}
|
||||
|
||||
int32_t ViEEncoder::SendStatistics(const uint32_t bit_rate,
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
#define WEBRTC_VIDEO_FRAME_H_
|
||||
|
||||
#include "webrtc/base/scoped_ref_ptr.h"
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/common_video/interface/video_frame_buffer.h"
|
||||
#include "webrtc/common_video/rotation.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
@ -166,11 +167,6 @@ class VideoFrame {
|
||||
VideoRotation rotation_;
|
||||
};
|
||||
|
||||
enum VideoFrameType {
|
||||
kKeyFrame = 0,
|
||||
kDeltaFrame = 1,
|
||||
};
|
||||
|
||||
// TODO(pbos): Rename EncodedFrame and reformat this class' members.
|
||||
class EncodedImage {
|
||||
public:
|
||||
@ -192,8 +188,7 @@ class EncodedImage {
|
||||
// NTP time of the capture time in local timebase in milliseconds.
|
||||
int64_t ntp_time_ms_ = 0;
|
||||
int64_t capture_time_ms_ = 0;
|
||||
// TODO(pbos): Use webrtc::FrameType directly (and remove VideoFrameType).
|
||||
VideoFrameType _frameType = kDeltaFrame;
|
||||
FrameType _frameType = kDeltaFrame;
|
||||
uint8_t* _buffer;
|
||||
size_t _length;
|
||||
size_t _size;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user