2021-04-06 20:47:11 +02:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
|
|
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
#include "net/dcsctp/socket/stream_reset_handler.h"
|
|
|
|
|
|
|
|
|
|
#include <array>
|
|
|
|
|
#include <cstdint>
|
|
|
|
|
#include <memory>
|
|
|
|
|
#include <type_traits>
|
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
|
|
#include "absl/types/optional.h"
|
|
|
|
|
#include "api/array_view.h"
|
2022-01-26 18:38:13 +01:00
|
|
|
#include "api/task_queue/task_queue_base.h"
|
2021-09-20 11:35:59 +02:00
|
|
|
#include "net/dcsctp/common/handover_testing.h"
|
2021-04-06 20:47:11 +02:00
|
|
|
#include "net/dcsctp/common/internal_types.h"
|
|
|
|
|
#include "net/dcsctp/packet/chunk/reconfig_chunk.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/parameter.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h"
|
|
|
|
|
#include "net/dcsctp/public/dcsctp_message.h"
|
|
|
|
|
#include "net/dcsctp/rx/data_tracker.h"
|
|
|
|
|
#include "net/dcsctp/rx/reassembly_queue.h"
|
|
|
|
|
#include "net/dcsctp/socket/mock_context.h"
|
|
|
|
|
#include "net/dcsctp/socket/mock_dcsctp_socket_callbacks.h"
|
|
|
|
|
#include "net/dcsctp/testing/data_generator.h"
|
|
|
|
|
#include "net/dcsctp/testing/testing_macros.h"
|
|
|
|
|
#include "net/dcsctp/timer/timer.h"
|
|
|
|
|
#include "net/dcsctp/tx/mock_send_queue.h"
|
|
|
|
|
#include "net/dcsctp/tx/retransmission_queue.h"
|
|
|
|
|
#include "rtc_base/gunit.h"
|
|
|
|
|
#include "test/gmock.h"
|
|
|
|
|
|
|
|
|
|
namespace dcsctp {
|
|
|
|
|
namespace {
|
|
|
|
|
using ::testing::IsEmpty;
|
|
|
|
|
using ::testing::NiceMock;
|
|
|
|
|
using ::testing::Return;
|
|
|
|
|
using ::testing::SizeIs;
|
|
|
|
|
using ::testing::UnorderedElementsAre;
|
|
|
|
|
using ResponseResult = ReconfigurationResponseParameter::Result;
|
|
|
|
|
|
|
|
|
|
constexpr TSN kMyInitialTsn = MockContext::MyInitialTsn();
|
|
|
|
|
constexpr ReconfigRequestSN kMyInitialReqSn = ReconfigRequestSN(*kMyInitialTsn);
|
|
|
|
|
constexpr TSN kPeerInitialTsn = MockContext::PeerInitialTsn();
|
|
|
|
|
constexpr ReconfigRequestSN kPeerInitialReqSn =
|
|
|
|
|
ReconfigRequestSN(*kPeerInitialTsn);
|
|
|
|
|
constexpr uint32_t kArwnd = 131072;
|
|
|
|
|
constexpr DurationMs kRto = DurationMs(250);
|
|
|
|
|
|
|
|
|
|
constexpr std::array<uint8_t, 4> kShortPayload = {1, 2, 3, 4};
|
|
|
|
|
|
|
|
|
|
MATCHER_P3(SctpMessageIs, stream_id, ppid, expected_payload, "") {
|
|
|
|
|
if (arg.stream_id() != stream_id) {
|
|
|
|
|
*result_listener << "the stream_id is " << *arg.stream_id();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (arg.ppid() != ppid) {
|
|
|
|
|
*result_listener << "the ppid is " << *arg.ppid();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (std::vector<uint8_t>(arg.payload().begin(), arg.payload().end()) !=
|
|
|
|
|
std::vector<uint8_t>(expected_payload.begin(), expected_payload.end())) {
|
|
|
|
|
*result_listener << "the payload is wrong";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TSN AddTo(TSN tsn, int delta) {
|
|
|
|
|
return TSN(*tsn + delta);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ReconfigRequestSN AddTo(ReconfigRequestSN req_sn, int delta) {
|
|
|
|
|
return ReconfigRequestSN(*req_sn + delta);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class StreamResetHandlerTest : public testing::Test {
|
|
|
|
|
protected:
|
|
|
|
|
StreamResetHandlerTest()
|
|
|
|
|
: ctx_(&callbacks_),
|
2022-01-26 18:38:13 +01:00
|
|
|
timer_manager_([this](webrtc::TaskQueueBase::DelayPrecision precision) {
|
|
|
|
|
return callbacks_.CreateTimeout(precision);
|
|
|
|
|
}),
|
2021-04-06 20:47:11 +02:00
|
|
|
delayed_ack_timer_(timer_manager_.CreateTimer(
|
|
|
|
|
"test/delayed_ack",
|
|
|
|
|
[]() { return absl::nullopt; },
|
|
|
|
|
TimerOptions(DurationMs(0)))),
|
|
|
|
|
t3_rtx_timer_(timer_manager_.CreateTimer(
|
|
|
|
|
"test/t3_rtx",
|
|
|
|
|
[]() { return absl::nullopt; },
|
|
|
|
|
TimerOptions(DurationMs(0)))),
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_(std::make_unique<DataTracker>("log: ",
|
|
|
|
|
delayed_ack_timer_.get(),
|
|
|
|
|
kPeerInitialTsn)),
|
|
|
|
|
reasm_(std::make_unique<ReassemblyQueue>("log: ",
|
|
|
|
|
kPeerInitialTsn,
|
|
|
|
|
kArwnd)),
|
|
|
|
|
retransmission_queue_(std::make_unique<RetransmissionQueue>(
|
2021-04-06 20:47:11 +02:00
|
|
|
"",
|
2022-05-27 14:44:57 +02:00
|
|
|
&callbacks_,
|
2021-04-06 20:47:11 +02:00
|
|
|
kMyInitialTsn,
|
|
|
|
|
kArwnd,
|
|
|
|
|
producer_,
|
|
|
|
|
[](DurationMs rtt_ms) {},
|
|
|
|
|
[]() {},
|
|
|
|
|
*t3_rtx_timer_,
|
2021-09-16 07:27:47 +02:00
|
|
|
DcSctpOptions())),
|
|
|
|
|
handler_(
|
|
|
|
|
std::make_unique<StreamResetHandler>("log: ",
|
|
|
|
|
&ctx_,
|
|
|
|
|
&timer_manager_,
|
|
|
|
|
data_tracker_.get(),
|
|
|
|
|
reasm_.get(),
|
|
|
|
|
retransmission_queue_.get())) {
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_CALL(ctx_, current_rto).WillRepeatedly(Return(kRto));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AdvanceTime(DurationMs duration) {
|
|
|
|
|
callbacks_.AdvanceTime(kRto);
|
dcsctp: Expire timers just before triggering them
In real life, when a Timeout expires, the caller is supposed to call
DcSctpSocket::HandleTimeout directly, as the Timeout that just expired
is stopped (it just expired), but the Timer still believes it's running.
The system is not in a consistent state.
In tests, all timeouts were evaluated at the same time, which, if two
timeouts expired at the same time, would put them both as "not running",
and with their timers believing they were running. So if you would do
any operation on a timer whose timeout had just expired, the timeout
would assert saying that "you can't stop a stopped timeout" or similar.
This isn't relevant in non-test scenarios.
Solved by expiring timeouts one by one.
Bug: webrtc:12614
Change-Id: I79d006f4d3e96854d77cec3eb0080aa23b8569cb
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/217560
Reviewed-by: Florent Castelli <orphis@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#33925}
2021-05-05 14:00:50 +02:00
|
|
|
for (;;) {
|
|
|
|
|
absl::optional<TimeoutID> timeout_id = callbacks_.GetNextExpiredTimeout();
|
|
|
|
|
if (!timeout_id.has_value()) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
timer_manager_.HandleTimeout(*timeout_id);
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Handles the passed in RE-CONFIG `chunk` and returns the responses
|
|
|
|
|
// that are sent in the response RE-CONFIG.
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> HandleAndCatchResponse(
|
|
|
|
|
ReConfigChunk chunk) {
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(chunk));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
std::vector<uint8_t> payload = callbacks_.ConsumeSentPacket();
|
|
|
|
|
if (payload.empty()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses;
|
|
|
|
|
absl::optional<SctpPacket> p = SctpPacket::Parse(payload);
|
|
|
|
|
if (!p.has_value()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
if (p->descriptors().size() != 1) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
absl::optional<ReConfigChunk> response_chunk =
|
|
|
|
|
ReConfigChunk::Parse(p->descriptors()[0].data);
|
|
|
|
|
if (!response_chunk.has_value()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
for (const auto& desc : response_chunk->parameters().descriptors()) {
|
|
|
|
|
if (desc.type == ReconfigurationResponseParameter::kType) {
|
|
|
|
|
absl::optional<ReconfigurationResponseParameter> response =
|
|
|
|
|
ReconfigurationResponseParameter::Parse(desc.data);
|
|
|
|
|
if (!response.has_value()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
responses.emplace_back(*std::move(response));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return responses;
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
void PerformHandover() {
|
|
|
|
|
EXPECT_TRUE(handler_->GetHandoverReadiness().IsReady());
|
|
|
|
|
EXPECT_TRUE(data_tracker_->GetHandoverReadiness().IsReady());
|
|
|
|
|
EXPECT_TRUE(reasm_->GetHandoverReadiness().IsReady());
|
|
|
|
|
EXPECT_TRUE(retransmission_queue_->GetHandoverReadiness().IsReady());
|
|
|
|
|
|
|
|
|
|
DcSctpSocketHandoverState state;
|
|
|
|
|
handler_->AddHandoverState(state);
|
|
|
|
|
data_tracker_->AddHandoverState(state);
|
|
|
|
|
reasm_->AddHandoverState(state);
|
|
|
|
|
|
|
|
|
|
retransmission_queue_->AddHandoverState(state);
|
|
|
|
|
|
2021-09-20 11:35:59 +02:00
|
|
|
g_handover_state_transformer_for_test(&state);
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_ = std::make_unique<DataTracker>(
|
2022-06-27 20:35:37 +00:00
|
|
|
"log: ", delayed_ack_timer_.get(), kPeerInitialTsn);
|
|
|
|
|
data_tracker_->RestoreFromState(state);
|
|
|
|
|
reasm_ =
|
|
|
|
|
std::make_unique<ReassemblyQueue>("log: ", kPeerInitialTsn, kArwnd);
|
|
|
|
|
reasm_->RestoreFromState(state);
|
2021-09-16 07:27:47 +02:00
|
|
|
retransmission_queue_ = std::make_unique<RetransmissionQueue>(
|
2022-05-27 14:44:57 +02:00
|
|
|
"", &callbacks_, kMyInitialTsn, kArwnd, producer_,
|
|
|
|
|
[](DurationMs rtt_ms) {}, []() {}, *t3_rtx_timer_, DcSctpOptions(),
|
2021-09-16 07:27:47 +02:00
|
|
|
/*supports_partial_reliability=*/true,
|
2022-06-27 20:35:37 +00:00
|
|
|
/*use_message_interleaving=*/false);
|
|
|
|
|
retransmission_queue_->RestoreFromState(state);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_ = std::make_unique<StreamResetHandler>(
|
|
|
|
|
"log: ", &ctx_, &timer_manager_, data_tracker_.get(), reasm_.get(),
|
|
|
|
|
retransmission_queue_.get(), &state);
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-06 20:47:11 +02:00
|
|
|
DataGenerator gen_;
|
|
|
|
|
NiceMock<MockDcSctpSocketCallbacks> callbacks_;
|
|
|
|
|
NiceMock<MockContext> ctx_;
|
|
|
|
|
NiceMock<MockSendQueue> producer_;
|
|
|
|
|
TimerManager timer_manager_;
|
|
|
|
|
std::unique_ptr<Timer> delayed_ack_timer_;
|
|
|
|
|
std::unique_ptr<Timer> t3_rtx_timer_;
|
2021-09-16 07:27:47 +02:00
|
|
|
std::unique_ptr<DataTracker> data_tracker_;
|
|
|
|
|
std::unique_ptr<ReassemblyQueue> reasm_;
|
|
|
|
|
std::unique_ptr<RetransmissionQueue> retransmission_queue_;
|
|
|
|
|
std::unique_ptr<StreamResetHandler> handler_;
|
2021-04-06 20:47:11 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ChunkWithNoParametersReturnsError) {
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(1);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(ReConfigChunk(Parameters()));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ChunkWithInvalidParametersReturnsError) {
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
// Two OutgoingSSNResetRequestParameter in a RE-CONFIG is not valid.
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(1),
|
|
|
|
|
ReconfigRequestSN(10),
|
|
|
|
|
kPeerInitialTsn, {StreamID(1)}));
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(2),
|
|
|
|
|
ReconfigRequestSN(10),
|
|
|
|
|
kPeerInitialTsn, {StreamID(2)}));
|
|
|
|
|
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(1);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(ReConfigChunk(builder.Build()));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, FailToDeliverWithoutResettingStream) {
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
data_tracker_->Observe(AddTo(kPeerInitialTsn, 1));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
|
|
|
|
|
gen_.ResetStream();
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 2), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(), IsEmpty());
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetStreamsNotDeferred) {
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
data_tracker_->Observe(AddTo(kPeerInitialTsn, 1));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1),
|
|
|
|
|
{StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
EXPECT_THAT(responses, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses[0].result(), ResponseResult::kSuccessPerformed);
|
|
|
|
|
|
|
|
|
|
gen_.ResetStream();
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 2), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetStreamsDeferred) {
|
|
|
|
|
DataGeneratorOptions opts;
|
|
|
|
|
opts.message_id = MID(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
opts.message_id = MID(1);
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 1),
|
|
|
|
|
gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
data_tracker_->Observe(AddTo(kPeerInitialTsn, 1));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 3),
|
|
|
|
|
{StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
EXPECT_THAT(responses, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses[0].result(), ResponseResult::kInProgress);
|
|
|
|
|
|
|
|
|
|
opts.message_id = MID(1);
|
|
|
|
|
opts.ppid = PPID(5);
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 5),
|
|
|
|
|
gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
|
|
|
|
reasm_->MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
opts.message_id = MID(0);
|
|
|
|
|
opts.ppid = PPID(4);
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 4),
|
|
|
|
|
gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
|
|
|
|
reasm_->MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
opts.message_id = MID(3);
|
|
|
|
|
opts.ppid = PPID(3);
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 3),
|
|
|
|
|
gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
|
|
|
|
reasm_->MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
opts.message_id = MID(2);
|
|
|
|
|
opts.ppid = PPID(2);
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 2),
|
|
|
|
|
gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
|
|
|
|
reasm_->MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 5));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
EXPECT_THAT(
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(SctpMessageIs(StreamID(1), PPID(2), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(3), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(4), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(5), kShortPayload)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingRequestDirectly) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetMultipleStreamsInOneRequest) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(40)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(41)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42))).Times(2);
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(44)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
|
|
|
|
handler_->ResetStreams(
|
2021-04-06 20:47:11 +02:00
|
|
|
std::vector<StreamID>({StreamID(43), StreamID(44), StreamID(41)}));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42), StreamID(40)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(
|
|
|
|
|
std::vector<StreamID>({StreamID(40), StreamID(41), StreamID(42),
|
|
|
|
|
StreamID(43), StreamID(44)})));
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req.stream_ids(),
|
|
|
|
|
UnorderedElementsAre(StreamID(40), StreamID(41), StreamID(42),
|
|
|
|
|
StreamID(43), StreamID(44)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingRequestDeferred) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
2021-04-06 20:47:11 +02:00
|
|
|
.WillOnce(Return(false))
|
|
|
|
|
.WillOnce(Return(false))
|
|
|
|
|
.WillOnce(Return(true));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_FALSE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_FALSE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_TRUE(handler_->MakeStreamResetRequest().has_value());
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingResettingOnPositiveResponse) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req.request_sequence_number(), ResponseResult::kSuccessPerformed));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, CommitResetStreams);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams).Times(0);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Processing a response shouldn't result in sending anything.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingResetRollbackOnError) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req.request_sequence_number(), ResponseResult::kErrorBadSequenceNumber));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, CommitResetStreams).Times(0);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Only requests should result in sending responses.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingResetRetransmitOnInProgress) {
|
|
|
|
|
static constexpr StreamID kStreamToReset = StreamID(42);
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(kStreamToReset));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({kStreamToReset}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({kStreamToReset})));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig1 = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig1.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req1,
|
|
|
|
|
reconfig1->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
// Simulate that the peer responded "In Progress".
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(req1.request_sequence_number(),
|
|
|
|
|
ResponseResult::kInProgress));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(0);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams()).Times(0);
|
|
|
|
|
|
|
|
|
|
// Processing a response shouldn't result in sending anything.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Let some time pass, so that the reconfig timer expires, and retries the
|
|
|
|
|
// same request.
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(1);
|
2021-04-06 20:47:11 +02:00
|
|
|
AdvanceTime(kRto);
|
|
|
|
|
|
|
|
|
|
std::vector<uint8_t> payload = callbacks_.ConsumeSentPacket();
|
|
|
|
|
ASSERT_FALSE(payload.empty());
|
|
|
|
|
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(payload));
|
|
|
|
|
ASSERT_THAT(packet.descriptors(), SizeIs(1));
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
ReConfigChunk reconfig2,
|
|
|
|
|
ReConfigChunk::Parse(packet.descriptors()[0].data));
|
|
|
|
|
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req2,
|
|
|
|
|
reconfig2.parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req2.request_sequence_number(),
|
|
|
|
|
AddTo(req1.request_sequence_number(), 1));
|
|
|
|
|
EXPECT_THAT(req2.stream_ids(), UnorderedElementsAre(kStreamToReset));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetWhileRequestIsSentWillQueue) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig1 = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig1.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req1,
|
|
|
|
|
reconfig1->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
EXPECT_EQ(req1.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req1.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
AddTo(retransmission_queue_->next_tsn(), -1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req1.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
|
|
|
|
|
// Streams reset while the request is in-flight will be queued.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(41)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
2021-04-06 20:47:11 +02:00
|
|
|
StreamID stream_ids[] = {StreamID(41), StreamID(43)};
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(stream_ids);
|
|
|
|
|
EXPECT_EQ(handler_->MakeStreamResetRequest(), absl::nullopt);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req1.request_sequence_number(), ResponseResult::kSuccessPerformed));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams()).Times(0);
|
|
|
|
|
|
|
|
|
|
// Processing a response shouldn't result in sending anything.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Response has been processed. A new request can be sent.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(41), StreamID(43)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig2 = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig2.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req2,
|
|
|
|
|
reconfig2->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
EXPECT_EQ(req2.request_sequence_number(), AddTo(kMyInitialReqSn, 1));
|
|
|
|
|
EXPECT_EQ(req2.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req2.stream_ids(),
|
|
|
|
|
UnorderedElementsAre(StreamID(41), StreamID(43)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendIncomingResetJustReturnsNothingPerformed) {
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(
|
|
|
|
|
IncomingSSNResetRequestParameter(kPeerInitialReqSn, {StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
ASSERT_THAT(responses, SizeIs(1));
|
|
|
|
|
EXPECT_THAT(responses[0].response_sequence_number(), kPeerInitialReqSn);
|
|
|
|
|
EXPECT_THAT(responses[0].result(), ResponseResult::kSuccessNothingToDo);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendSameRequestTwiceReturnsNothingToDo) {
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
data_tracker_->Observe(AddTo(kPeerInitialTsn, 1));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder1;
|
|
|
|
|
builder1.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1),
|
|
|
|
|
{StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses1 =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder1.Build()));
|
|
|
|
|
EXPECT_THAT(responses1, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses1[0].result(), ResponseResult::kSuccessPerformed);
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder2;
|
|
|
|
|
builder2.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1),
|
|
|
|
|
{StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses2 =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder2.Build()));
|
|
|
|
|
EXPECT_THAT(responses2, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses2[0].result(), ResponseResult::kSuccessNothingToDo);
|
|
|
|
|
}
|
2021-09-16 07:27:47 +02:00
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest,
|
|
|
|
|
HandoverIsAllowedOnlyWhenNoStreamIsBeingOrWillBeReset) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_EQ(
|
|
|
|
|
handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(HandoverUnreadinessReason::kPendingStreamReset));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(true))
|
|
|
|
|
.WillOnce(Return(false));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_TRUE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_EQ(handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(
|
|
|
|
|
HandoverUnreadinessReason::kPendingStreamResetRequest));
|
|
|
|
|
|
|
|
|
|
// Reset more streams while the request is in-flight.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(41)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
2021-09-16 07:27:47 +02:00
|
|
|
StreamID stream_ids[] = {StreamID(41), StreamID(43)};
|
|
|
|
|
handler_->ResetStreams(stream_ids);
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_EQ(handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus()
|
|
|
|
|
.Add(HandoverUnreadinessReason::kPendingStreamResetRequest)
|
|
|
|
|
.Add(HandoverUnreadinessReason::kPendingStreamReset));
|
|
|
|
|
|
|
|
|
|
// Processing a response to first request.
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
handler_->HandleReConfig(
|
|
|
|
|
ReConfigChunk(Parameters::Builder()
|
|
|
|
|
.Add(ReconfigurationResponseParameter(
|
|
|
|
|
kMyInitialReqSn, ResponseResult::kSuccessPerformed))
|
|
|
|
|
.Build()));
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_EQ(
|
|
|
|
|
handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(HandoverUnreadinessReason::kPendingStreamReset));
|
|
|
|
|
|
|
|
|
|
// Second request can be sent.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(true))
|
|
|
|
|
.WillOnce(Return(false));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(41), StreamID(43)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_TRUE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_EQ(handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(
|
|
|
|
|
HandoverUnreadinessReason::kPendingStreamResetRequest));
|
|
|
|
|
|
|
|
|
|
// Processing a response to second request.
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
handler_->HandleReConfig(ReConfigChunk(
|
|
|
|
|
Parameters::Builder()
|
|
|
|
|
.Add(ReconfigurationResponseParameter(
|
|
|
|
|
AddTo(kMyInitialReqSn, 1), ResponseResult::kSuccessPerformed))
|
|
|
|
|
.Build()));
|
|
|
|
|
|
|
|
|
|
// Seconds response has been processed. No pending resets.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(false));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_TRUE(handler_->GetHandoverReadiness().IsReady());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, HandoverInInitialState) {
|
|
|
|
|
PerformHandover();
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
absl::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
|
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
|
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
|
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, HandoverAfterHavingResetOneStream) {
|
|
|
|
|
// Reset one stream
|
|
|
|
|
{
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(true))
|
|
|
|
|
.WillOnce(Return(false));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(ReConfigChunk reconfig,
|
|
|
|
|
handler_->MakeStreamResetRequest());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig.parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
|
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
|
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
handler_->HandleReConfig(
|
|
|
|
|
ReConfigChunk(Parameters::Builder()
|
|
|
|
|
.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req.request_sequence_number(),
|
|
|
|
|
ResponseResult::kSuccessPerformed))
|
|
|
|
|
.Build()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PerformHandover();
|
|
|
|
|
|
|
|
|
|
// Reset another stream after handover
|
|
|
|
|
{
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(43)}));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(43)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(ReConfigChunk reconfig,
|
|
|
|
|
handler_->MakeStreamResetRequest());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig.parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(),
|
|
|
|
|
ReconfigRequestSN(kMyInitialReqSn.value() + 1));
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
|
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
|
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(43)));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-06 12:32:01 +02:00
|
|
|
TEST_F(StreamResetHandlerTest, PerformCloseAfterOneFirstFailing) {
|
|
|
|
|
// Inject a stream reset on the first expected TSN (which hasn't been seen).
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), kPeerInitialTsn, {StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
// The socket is expected to say "in progress" as that TSN hasn't been seen.
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
EXPECT_THAT(responses, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses[0].result(), ResponseResult::kInProgress);
|
|
|
|
|
|
|
|
|
|
// Let the socket receive the TSN.
|
|
|
|
|
DataGeneratorOptions opts;
|
|
|
|
|
opts.message_id = MID(0);
|
|
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
|
|
|
|
reasm_->MaybeResetStreamsDeferred(kPeerInitialTsn);
|
|
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
|
|
|
|
|
// And emulate that time has passed, and the peer retries the stream reset,
|
|
|
|
|
// but now with an incremented request sequence number.
|
|
|
|
|
Parameters::Builder builder2;
|
|
|
|
|
builder2.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
ReconfigRequestSN(*kPeerInitialReqSn + 1), ReconfigRequestSN(3),
|
|
|
|
|
kPeerInitialTsn, {StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
// This is supposed to be handled well.
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses2 =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder2.Build()));
|
|
|
|
|
EXPECT_THAT(responses2, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses2[0].result(), ResponseResult::kSuccessPerformed);
|
|
|
|
|
}
|
2021-04-06 20:47:11 +02:00
|
|
|
} // namespace
|
|
|
|
|
} // namespace dcsctp
|