2021-04-06 20:47:11 +02:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
|
|
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
#include "net/dcsctp/socket/stream_reset_handler.h"
|
|
|
|
|
|
|
|
|
|
#include <array>
|
|
|
|
|
#include <cstdint>
|
|
|
|
|
#include <memory>
|
2024-08-29 13:00:40 +00:00
|
|
|
#include <optional>
|
2021-04-06 20:47:11 +02:00
|
|
|
#include <type_traits>
|
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
|
|
#include "api/array_view.h"
|
2022-01-26 18:38:13 +01:00
|
|
|
#include "api/task_queue/task_queue_base.h"
|
2021-09-20 11:35:59 +02:00
|
|
|
#include "net/dcsctp/common/handover_testing.h"
|
2021-04-06 20:47:11 +02:00
|
|
|
#include "net/dcsctp/common/internal_types.h"
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
#include "net/dcsctp/packet/chunk/forward_tsn_common.h"
|
2021-04-06 20:47:11 +02:00
|
|
|
#include "net/dcsctp/packet/chunk/reconfig_chunk.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/parameter.h"
|
|
|
|
|
#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h"
|
|
|
|
|
#include "net/dcsctp/public/dcsctp_message.h"
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
#include "net/dcsctp/public/types.h"
|
2021-04-06 20:47:11 +02:00
|
|
|
#include "net/dcsctp/rx/data_tracker.h"
|
|
|
|
|
#include "net/dcsctp/rx/reassembly_queue.h"
|
|
|
|
|
#include "net/dcsctp/socket/mock_context.h"
|
|
|
|
|
#include "net/dcsctp/socket/mock_dcsctp_socket_callbacks.h"
|
|
|
|
|
#include "net/dcsctp/testing/data_generator.h"
|
|
|
|
|
#include "net/dcsctp/testing/testing_macros.h"
|
|
|
|
|
#include "net/dcsctp/timer/timer.h"
|
|
|
|
|
#include "net/dcsctp/tx/mock_send_queue.h"
|
|
|
|
|
#include "net/dcsctp/tx/retransmission_queue.h"
|
|
|
|
|
#include "rtc_base/gunit.h"
|
|
|
|
|
#include "test/gmock.h"
|
|
|
|
|
|
|
|
|
|
namespace dcsctp {
|
|
|
|
|
namespace {
|
|
|
|
|
using ::testing::IsEmpty;
|
|
|
|
|
using ::testing::NiceMock;
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
using ::testing::Property;
|
2021-04-06 20:47:11 +02:00
|
|
|
using ::testing::Return;
|
|
|
|
|
using ::testing::SizeIs;
|
|
|
|
|
using ::testing::UnorderedElementsAre;
|
2023-10-20 13:16:35 +02:00
|
|
|
using ::webrtc::TimeDelta;
|
2021-04-06 20:47:11 +02:00
|
|
|
using ResponseResult = ReconfigurationResponseParameter::Result;
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
using SkippedStream = AnyForwardTsnChunk::SkippedStream;
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
constexpr TSN kMyInitialTsn = MockContext::MyInitialTsn();
|
|
|
|
|
constexpr ReconfigRequestSN kMyInitialReqSn = ReconfigRequestSN(*kMyInitialTsn);
|
|
|
|
|
constexpr TSN kPeerInitialTsn = MockContext::PeerInitialTsn();
|
|
|
|
|
constexpr ReconfigRequestSN kPeerInitialReqSn =
|
|
|
|
|
ReconfigRequestSN(*kPeerInitialTsn);
|
|
|
|
|
constexpr uint32_t kArwnd = 131072;
|
2023-10-26 14:22:39 +02:00
|
|
|
constexpr TimeDelta kRto = TimeDelta::Millis(250);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
constexpr std::array<uint8_t, 4> kShortPayload = {1, 2, 3, 4};
|
|
|
|
|
|
|
|
|
|
MATCHER_P3(SctpMessageIs, stream_id, ppid, expected_payload, "") {
|
|
|
|
|
if (arg.stream_id() != stream_id) {
|
|
|
|
|
*result_listener << "the stream_id is " << *arg.stream_id();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (arg.ppid() != ppid) {
|
|
|
|
|
*result_listener << "the ppid is " << *arg.ppid();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (std::vector<uint8_t>(arg.payload().begin(), arg.payload().end()) !=
|
|
|
|
|
std::vector<uint8_t>(expected_payload.begin(), expected_payload.end())) {
|
|
|
|
|
*result_listener << "the payload is wrong";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TSN AddTo(TSN tsn, int delta) {
|
|
|
|
|
return TSN(*tsn + delta);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ReconfigRequestSN AddTo(ReconfigRequestSN req_sn, int delta) {
|
|
|
|
|
return ReconfigRequestSN(*req_sn + delta);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class StreamResetHandlerTest : public testing::Test {
|
|
|
|
|
protected:
|
|
|
|
|
StreamResetHandlerTest()
|
|
|
|
|
: ctx_(&callbacks_),
|
2022-01-26 18:38:13 +01:00
|
|
|
timer_manager_([this](webrtc::TaskQueueBase::DelayPrecision precision) {
|
|
|
|
|
return callbacks_.CreateTimeout(precision);
|
|
|
|
|
}),
|
2021-04-06 20:47:11 +02:00
|
|
|
delayed_ack_timer_(timer_manager_.CreateTimer(
|
|
|
|
|
"test/delayed_ack",
|
2023-10-26 16:24:30 +02:00
|
|
|
[]() { return TimeDelta::Zero(); },
|
|
|
|
|
TimerOptions(TimeDelta::Zero()))),
|
2021-04-06 20:47:11 +02:00
|
|
|
t3_rtx_timer_(timer_manager_.CreateTimer(
|
|
|
|
|
"test/t3_rtx",
|
2023-10-26 16:24:30 +02:00
|
|
|
[]() { return TimeDelta::Zero(); },
|
|
|
|
|
TimerOptions(TimeDelta::Zero()))),
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_(std::make_unique<DataTracker>("log: ",
|
|
|
|
|
delayed_ack_timer_.get(),
|
|
|
|
|
kPeerInitialTsn)),
|
2024-04-04 12:57:24 +02:00
|
|
|
reasm_(std::make_unique<ReassemblyQueue>("log: ", kArwnd)),
|
2021-09-16 07:27:47 +02:00
|
|
|
retransmission_queue_(std::make_unique<RetransmissionQueue>(
|
2021-04-06 20:47:11 +02:00
|
|
|
"",
|
2022-05-27 14:44:57 +02:00
|
|
|
&callbacks_,
|
2021-04-06 20:47:11 +02:00
|
|
|
kMyInitialTsn,
|
|
|
|
|
kArwnd,
|
|
|
|
|
producer_,
|
2024-11-20 12:15:38 +02:00
|
|
|
[](TimeDelta /* rtt */) {},
|
2021-04-06 20:47:11 +02:00
|
|
|
[]() {},
|
|
|
|
|
*t3_rtx_timer_,
|
2021-09-16 07:27:47 +02:00
|
|
|
DcSctpOptions())),
|
|
|
|
|
handler_(
|
|
|
|
|
std::make_unique<StreamResetHandler>("log: ",
|
|
|
|
|
&ctx_,
|
|
|
|
|
&timer_manager_,
|
|
|
|
|
data_tracker_.get(),
|
|
|
|
|
reasm_.get(),
|
|
|
|
|
retransmission_queue_.get())) {
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_CALL(ctx_, current_rto).WillRepeatedly(Return(kRto));
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-08 16:31:44 +01:00
|
|
|
void AdvanceTime(TimeDelta duration) {
|
|
|
|
|
callbacks_.AdvanceTime(duration);
|
dcsctp: Expire timers just before triggering them
In real life, when a Timeout expires, the caller is supposed to call
DcSctpSocket::HandleTimeout directly, as the Timeout that just expired
is stopped (it just expired), but the Timer still believes it's running.
The system is not in a consistent state.
In tests, all timeouts were evaluated at the same time, which, if two
timeouts expired at the same time, would put them both as "not running",
and with their timers believing they were running. So if you would do
any operation on a timer whose timeout had just expired, the timeout
would assert saying that "you can't stop a stopped timeout" or similar.
This isn't relevant in non-test scenarios.
Solved by expiring timeouts one by one.
Bug: webrtc:12614
Change-Id: I79d006f4d3e96854d77cec3eb0080aa23b8569cb
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/217560
Reviewed-by: Florent Castelli <orphis@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#33925}
2021-05-05 14:00:50 +02:00
|
|
|
for (;;) {
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<TimeoutID> timeout_id = callbacks_.GetNextExpiredTimeout();
|
dcsctp: Expire timers just before triggering them
In real life, when a Timeout expires, the caller is supposed to call
DcSctpSocket::HandleTimeout directly, as the Timeout that just expired
is stopped (it just expired), but the Timer still believes it's running.
The system is not in a consistent state.
In tests, all timeouts were evaluated at the same time, which, if two
timeouts expired at the same time, would put them both as "not running",
and with their timers believing they were running. So if you would do
any operation on a timer whose timeout had just expired, the timeout
would assert saying that "you can't stop a stopped timeout" or similar.
This isn't relevant in non-test scenarios.
Solved by expiring timeouts one by one.
Bug: webrtc:12614
Change-Id: I79d006f4d3e96854d77cec3eb0080aa23b8569cb
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/217560
Reviewed-by: Florent Castelli <orphis@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#33925}
2021-05-05 14:00:50 +02:00
|
|
|
if (!timeout_id.has_value()) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
timer_manager_.HandleTimeout(*timeout_id);
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Handles the passed in RE-CONFIG `chunk` and returns the responses
|
|
|
|
|
// that are sent in the response RE-CONFIG.
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> HandleAndCatchResponse(
|
|
|
|
|
ReConfigChunk chunk) {
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(chunk));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
std::vector<uint8_t> payload = callbacks_.ConsumeSentPacket();
|
|
|
|
|
if (payload.empty()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses;
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<SctpPacket> p = SctpPacket::Parse(payload, DcSctpOptions());
|
2021-04-06 20:47:11 +02:00
|
|
|
if (!p.has_value()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
if (p->descriptors().size() != 1) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> response_chunk =
|
2021-04-06 20:47:11 +02:00
|
|
|
ReConfigChunk::Parse(p->descriptors()[0].data);
|
|
|
|
|
if (!response_chunk.has_value()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
for (const auto& desc : response_chunk->parameters().descriptors()) {
|
|
|
|
|
if (desc.type == ReconfigurationResponseParameter::kType) {
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReconfigurationResponseParameter> response =
|
2021-04-06 20:47:11 +02:00
|
|
|
ReconfigurationResponseParameter::Parse(desc.data);
|
|
|
|
|
if (!response.has_value()) {
|
|
|
|
|
EXPECT_TRUE(false);
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
responses.emplace_back(*std::move(response));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return responses;
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
void PerformHandover() {
|
|
|
|
|
EXPECT_TRUE(handler_->GetHandoverReadiness().IsReady());
|
|
|
|
|
EXPECT_TRUE(data_tracker_->GetHandoverReadiness().IsReady());
|
|
|
|
|
EXPECT_TRUE(reasm_->GetHandoverReadiness().IsReady());
|
|
|
|
|
EXPECT_TRUE(retransmission_queue_->GetHandoverReadiness().IsReady());
|
|
|
|
|
|
|
|
|
|
DcSctpSocketHandoverState state;
|
|
|
|
|
handler_->AddHandoverState(state);
|
|
|
|
|
data_tracker_->AddHandoverState(state);
|
|
|
|
|
reasm_->AddHandoverState(state);
|
|
|
|
|
|
|
|
|
|
retransmission_queue_->AddHandoverState(state);
|
|
|
|
|
|
2021-09-20 11:35:59 +02:00
|
|
|
g_handover_state_transformer_for_test(&state);
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_ = std::make_unique<DataTracker>(
|
2022-06-27 20:35:37 +00:00
|
|
|
"log: ", delayed_ack_timer_.get(), kPeerInitialTsn);
|
|
|
|
|
data_tracker_->RestoreFromState(state);
|
2024-04-04 12:57:24 +02:00
|
|
|
reasm_ = std::make_unique<ReassemblyQueue>("log: ", kArwnd);
|
2022-06-27 20:35:37 +00:00
|
|
|
reasm_->RestoreFromState(state);
|
2021-09-16 07:27:47 +02:00
|
|
|
retransmission_queue_ = std::make_unique<RetransmissionQueue>(
|
2024-11-20 12:15:38 +02:00
|
|
|
"", &callbacks_, kMyInitialTsn, kArwnd, producer_,
|
|
|
|
|
[](TimeDelta /* rtt */) {}, []() {}, *t3_rtx_timer_, DcSctpOptions(),
|
2021-09-16 07:27:47 +02:00
|
|
|
/*supports_partial_reliability=*/true,
|
2022-06-27 20:35:37 +00:00
|
|
|
/*use_message_interleaving=*/false);
|
|
|
|
|
retransmission_queue_->RestoreFromState(state);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_ = std::make_unique<StreamResetHandler>(
|
|
|
|
|
"log: ", &ctx_, &timer_manager_, data_tracker_.get(), reasm_.get(),
|
|
|
|
|
retransmission_queue_.get(), &state);
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-06 20:47:11 +02:00
|
|
|
DataGenerator gen_;
|
|
|
|
|
NiceMock<MockDcSctpSocketCallbacks> callbacks_;
|
|
|
|
|
NiceMock<MockContext> ctx_;
|
|
|
|
|
NiceMock<MockSendQueue> producer_;
|
|
|
|
|
TimerManager timer_manager_;
|
|
|
|
|
std::unique_ptr<Timer> delayed_ack_timer_;
|
|
|
|
|
std::unique_ptr<Timer> t3_rtx_timer_;
|
2021-09-16 07:27:47 +02:00
|
|
|
std::unique_ptr<DataTracker> data_tracker_;
|
|
|
|
|
std::unique_ptr<ReassemblyQueue> reasm_;
|
|
|
|
|
std::unique_ptr<RetransmissionQueue> retransmission_queue_;
|
|
|
|
|
std::unique_ptr<StreamResetHandler> handler_;
|
2021-04-06 20:47:11 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ChunkWithNoParametersReturnsError) {
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(1);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(ReConfigChunk(Parameters()));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ChunkWithInvalidParametersReturnsError) {
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
// Two OutgoingSSNResetRequestParameter in a RE-CONFIG is not valid.
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(1),
|
|
|
|
|
ReconfigRequestSN(10),
|
|
|
|
|
kPeerInitialTsn, {StreamID(1)}));
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(2),
|
|
|
|
|
ReconfigRequestSN(10),
|
|
|
|
|
kPeerInitialTsn, {StreamID(2)}));
|
|
|
|
|
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(1);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(ReConfigChunk(builder.Build()));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, FailToDeliverWithoutResettingStream) {
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
data_tracker_->Observe(AddTo(kPeerInitialTsn, 1));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
|
|
|
|
|
gen_.ResetStream();
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 2), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(), IsEmpty());
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetStreamsNotDeferred) {
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
data_tracker_->Observe(AddTo(kPeerInitialTsn, 1));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1),
|
|
|
|
|
{StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
EXPECT_THAT(responses, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses[0].result(), ResponseResult::kSuccessPerformed);
|
|
|
|
|
|
|
|
|
|
gen_.ResetStream();
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->Add(AddTo(kPeerInitialTsn, 2), gen_.Ordered({1, 2, 3, 4}, "BE"));
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
2021-04-06 20:47:11 +02:00
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(53), kShortPayload)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetStreamsDeferred) {
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
constexpr StreamID kStreamId = StreamID(1);
|
|
|
|
|
data_tracker_->Observe(TSN(10));
|
|
|
|
|
reasm_->Add(TSN(10), gen_.Ordered({1, 2, 3, 4}, "BE", {.mid = MID(0)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
data_tracker_->Observe(TSN(11));
|
|
|
|
|
reasm_->Add(TSN(11), gen_.Ordered({1, 2, 3, 4}, "BE", {.mid = MID(1)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
EXPECT_THAT(
|
|
|
|
|
reasm_->FlushMessages(),
|
|
|
|
|
UnorderedElementsAre(SctpMessageIs(kStreamId, PPID(53), kShortPayload),
|
|
|
|
|
SctpMessageIs(kStreamId, PPID(53), kShortPayload)));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
ReconfigRequestSN(10), ReconfigRequestSN(3), TSN(13), {kStreamId}));
|
|
|
|
|
EXPECT_THAT(HandleAndCatchResponse(ReConfigChunk(builder.Build())),
|
|
|
|
|
ElementsAre(Property(&ReconfigurationResponseParameter::result,
|
|
|
|
|
ResponseResult::kInProgress)));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
data_tracker_->Observe(TSN(15));
|
|
|
|
|
reasm_->Add(TSN(15), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.mid = MID(1), .ppid = PPID(5)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
data_tracker_->Observe(TSN(14));
|
|
|
|
|
reasm_->Add(TSN(14), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.mid = MID(0), .ppid = PPID(4)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
data_tracker_->Observe(TSN(13));
|
|
|
|
|
reasm_->Add(TSN(13), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.mid = MID(3), .ppid = PPID(3)}));
|
|
|
|
|
|
|
|
|
|
data_tracker_->Observe(TSN(12));
|
|
|
|
|
reasm_->Add(TSN(12), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.mid = MID(2), .ppid = PPID(2)}));
|
|
|
|
|
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
ReconfigRequestSN(11), ReconfigRequestSN(4), TSN(13), {kStreamId}));
|
|
|
|
|
EXPECT_THAT(HandleAndCatchResponse(ReConfigChunk(builder.Build())),
|
|
|
|
|
ElementsAre(Property(&ReconfigurationResponseParameter::result,
|
|
|
|
|
ResponseResult::kSuccessPerformed)));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
EXPECT_THAT(
|
2021-09-16 07:27:47 +02:00
|
|
|
reasm_->FlushMessages(),
|
dcsctp: Reset synchronously with incoming request
When a sender has requested a stream to be reset, and the last sender
assigned TSN hasn't been received yet, the receiver will enter deferred
reset mode, where it will store any data chunks received after that
given TSN, and replay those later, when the stream has been reset.
Before this CL, leaving deferred mode was done as soon as the sender's
last assigned TSN was received. That's actually not how the RFC
describes the process[1], but was done that way to properly handle some
sequences of RE-CONFIG and FORWARD-TSN. But after having read the RFCs
again, and realizing that whenever RFC6525 mention "any data arriving",
this also applies to any FORWARD-TSN[2] - it's better to reset streams
synchronously with the incoming requests, and defer not just DATA past
the sender last assigned TSN, but also any FORWARD-TSN after that TSN.
This mostly simplifies the code and is mostly a refactoring, but most
importantly aligns it with how the resetting procedure is explained in
the RFC. It also fixes two bugs:
* It defers FORWARD-TSN *as well as* DATA chunks with a TSN later
than the sender's last assigned TSN - see test case. The old
implementation tried to handle that by exiting the deferred reset
processing as soon as it reached the sender's last assigned TSN, but
it didn't manage to do that in all cases.
* It only defers DATA chunks for streams that are to be reset, not
all DATA chunks with a TSN > sender's last assigned TSN. This was
missed in the old implementation, but as it's now implemented
strictly according to the RFC, this was now done.
[1] https://datatracker.ietf.org/doc/html/rfc6525#section-5.2.2
[2] RFC6525 cover stream resetting, and RFC3758 cover FORWARD-TSN, and
the combination of these is not covered in the RFCs.
Bug: webrtc:14600
Change-Id: Ief878b755291b9c923aa6fb4317b0f5c00231df4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/322623
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#40889}
2023-09-27 18:01:18 +02:00
|
|
|
UnorderedElementsAre(SctpMessageIs(kStreamId, PPID(2), kShortPayload),
|
|
|
|
|
SctpMessageIs(kStreamId, PPID(3), kShortPayload),
|
|
|
|
|
SctpMessageIs(kStreamId, PPID(4), kShortPayload),
|
|
|
|
|
SctpMessageIs(kStreamId, PPID(5), kShortPayload)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetStreamsDeferredOnlySelectedStreams) {
|
|
|
|
|
// This test verifies the receiving behavior of receiving messages on
|
|
|
|
|
// streams 1, 2 and 3, and receiving a reset request on stream 1, 2, causing
|
|
|
|
|
// deferred reset processing.
|
|
|
|
|
|
|
|
|
|
// Reset stream 1,2 with "last assigned TSN=12"
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(10),
|
|
|
|
|
ReconfigRequestSN(3), TSN(12),
|
|
|
|
|
{StreamID(1), StreamID(2)}));
|
|
|
|
|
EXPECT_THAT(HandleAndCatchResponse(ReConfigChunk(builder.Build())),
|
|
|
|
|
ElementsAre(Property(&ReconfigurationResponseParameter::result,
|
|
|
|
|
ResponseResult::kInProgress)));
|
|
|
|
|
|
|
|
|
|
// TSN 10, SID 1 - before TSN 12 -> deliver
|
|
|
|
|
data_tracker_->Observe(TSN(10));
|
|
|
|
|
reasm_->Add(TSN(10), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.stream_id = StreamID(1),
|
|
|
|
|
.mid = MID(0),
|
|
|
|
|
.ppid = PPID(1001)}));
|
|
|
|
|
|
|
|
|
|
// TSN 11, SID 2 - before TSN 12 -> deliver
|
|
|
|
|
data_tracker_->Observe(TSN(11));
|
|
|
|
|
reasm_->Add(TSN(11), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.stream_id = StreamID(2),
|
|
|
|
|
.mid = MID(0),
|
|
|
|
|
.ppid = PPID(1002)}));
|
|
|
|
|
|
|
|
|
|
// TSN 12, SID 3 - at TSN 12 -> deliver
|
|
|
|
|
data_tracker_->Observe(TSN(12));
|
|
|
|
|
reasm_->Add(TSN(12), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.stream_id = StreamID(3),
|
|
|
|
|
.mid = MID(0),
|
|
|
|
|
.ppid = PPID(1003)}));
|
|
|
|
|
|
|
|
|
|
// TSN 13, SID 1 - after TSN 12 and SID=1 -> defer
|
|
|
|
|
data_tracker_->Observe(TSN(13));
|
|
|
|
|
reasm_->Add(TSN(13), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.stream_id = StreamID(1),
|
|
|
|
|
.mid = MID(0),
|
|
|
|
|
.ppid = PPID(1004)}));
|
|
|
|
|
|
|
|
|
|
// TSN 14, SID 2 - after TSN 12 and SID=2 -> defer
|
|
|
|
|
data_tracker_->Observe(TSN(14));
|
|
|
|
|
reasm_->Add(TSN(14), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.stream_id = StreamID(2),
|
|
|
|
|
.mid = MID(0),
|
|
|
|
|
.ppid = PPID(1005)}));
|
|
|
|
|
|
|
|
|
|
// TSN 15, SID 3 - after TSN 12, but SID 3 is not reset -> deliver
|
|
|
|
|
data_tracker_->Observe(TSN(15));
|
|
|
|
|
reasm_->Add(TSN(15), gen_.Ordered({1, 2, 3, 4}, "BE",
|
|
|
|
|
{.stream_id = StreamID(3),
|
|
|
|
|
.mid = MID(1),
|
|
|
|
|
.ppid = PPID(1006)}));
|
|
|
|
|
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
|
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(1001), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(2), PPID(1002), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(3), PPID(1003), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(3), PPID(1006), kShortPayload)));
|
|
|
|
|
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(11),
|
|
|
|
|
ReconfigRequestSN(3), TSN(13),
|
|
|
|
|
{StreamID(1), StreamID(2)}));
|
|
|
|
|
EXPECT_THAT(HandleAndCatchResponse(ReConfigChunk(builder.Build())),
|
|
|
|
|
ElementsAre(Property(&ReconfigurationResponseParameter::result,
|
|
|
|
|
ResponseResult::kSuccessPerformed)));
|
|
|
|
|
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
|
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(StreamID(1), PPID(1004), kShortPayload),
|
|
|
|
|
SctpMessageIs(StreamID(2), PPID(1005), kShortPayload)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetStreamsDefersForwardTsn) {
|
|
|
|
|
// This test verifies that FORWARD-TSNs are deferred if they want to move
|
|
|
|
|
// the cumulative ack TSN point past sender's last assigned TSN.
|
|
|
|
|
static constexpr StreamID kStreamId = StreamID(42);
|
|
|
|
|
|
|
|
|
|
// Simulate sender sends:
|
|
|
|
|
// * TSN 10 (SSN=0, BE, lost),
|
|
|
|
|
// * TSN 11 (SSN=1, BE, lost),
|
|
|
|
|
// * TSN 12 (SSN=2, BE, lost)
|
|
|
|
|
// * RESET THE STREAM
|
|
|
|
|
// * TSN 13 (SSN=0, B, received)
|
|
|
|
|
// * TSN 14 (SSN=0, E, lost),
|
|
|
|
|
// * TSN 15 (SSN=1, BE, received)
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
ReconfigRequestSN(10), ReconfigRequestSN(3), TSN(12), {kStreamId}));
|
|
|
|
|
EXPECT_THAT(HandleAndCatchResponse(ReConfigChunk(builder.Build())),
|
|
|
|
|
ElementsAre(Property(&ReconfigurationResponseParameter::result,
|
|
|
|
|
ResponseResult::kInProgress)));
|
|
|
|
|
|
|
|
|
|
// TSN 13, B, after TSN=12 -> defer
|
|
|
|
|
data_tracker_->Observe(TSN(13));
|
|
|
|
|
reasm_->Add(TSN(13),
|
|
|
|
|
gen_.Ordered(
|
|
|
|
|
{1, 2, 3, 4}, "B",
|
|
|
|
|
{.stream_id = kStreamId, .mid = MID(0), .ppid = PPID(1004)}));
|
|
|
|
|
|
|
|
|
|
// TSN 15, BE, after TSN=12 -> defer
|
|
|
|
|
data_tracker_->Observe(TSN(15));
|
|
|
|
|
reasm_->Add(TSN(15),
|
|
|
|
|
gen_.Ordered(
|
|
|
|
|
{1, 2, 3, 4}, "BE",
|
|
|
|
|
{.stream_id = kStreamId, .mid = MID(1), .ppid = PPID(1005)}));
|
|
|
|
|
|
|
|
|
|
// Time passes, sender decides to send FORWARD-TSN up to the RESET.
|
|
|
|
|
data_tracker_->HandleForwardTsn(TSN(12));
|
|
|
|
|
reasm_->HandleForwardTsn(
|
|
|
|
|
TSN(12), std::vector<SkippedStream>({SkippedStream(kStreamId, SSN(2))}));
|
|
|
|
|
|
|
|
|
|
// The receiver sends a SACK in response to that. The stream hasn't been
|
|
|
|
|
// reset yet, but the sender now decides that TSN=13-14 is to be skipped.
|
|
|
|
|
// As this has a TSN 14, after TSN=12 -> defer it.
|
|
|
|
|
data_tracker_->HandleForwardTsn(TSN(14));
|
|
|
|
|
reasm_->HandleForwardTsn(
|
|
|
|
|
TSN(14), std::vector<SkippedStream>({SkippedStream(kStreamId, SSN(0))}));
|
|
|
|
|
|
|
|
|
|
// Reset the stream -> deferred TSNs should be re-added.
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
ReconfigRequestSN(11), ReconfigRequestSN(3), TSN(12), {kStreamId}));
|
|
|
|
|
EXPECT_THAT(HandleAndCatchResponse(ReConfigChunk(builder.Build())),
|
|
|
|
|
ElementsAre(Property(&ReconfigurationResponseParameter::result,
|
|
|
|
|
ResponseResult::kSuccessPerformed)));
|
|
|
|
|
|
|
|
|
|
EXPECT_THAT(reasm_->FlushMessages(),
|
|
|
|
|
UnorderedElementsAre(
|
|
|
|
|
SctpMessageIs(kStreamId, PPID(1005), kShortPayload)));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingRequestDirectly) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetMultipleStreamsInOneRequest) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(40)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(41)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42))).Times(2);
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(44)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
|
|
|
|
handler_->ResetStreams(
|
2021-04-06 20:47:11 +02:00
|
|
|
std::vector<StreamID>({StreamID(43), StreamID(44), StreamID(41)}));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42), StreamID(40)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(
|
|
|
|
|
std::vector<StreamID>({StreamID(40), StreamID(41), StreamID(42),
|
|
|
|
|
StreamID(43), StreamID(44)})));
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req.stream_ids(),
|
|
|
|
|
UnorderedElementsAre(StreamID(40), StreamID(41), StreamID(42),
|
|
|
|
|
StreamID(43), StreamID(44)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingRequestDeferred) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
2021-04-06 20:47:11 +02:00
|
|
|
.WillOnce(Return(false))
|
|
|
|
|
.WillOnce(Return(false))
|
|
|
|
|
.WillOnce(Return(true));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_FALSE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_FALSE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_TRUE(handler_->MakeStreamResetRequest().has_value());
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingResettingOnPositiveResponse) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req.request_sequence_number(), ResponseResult::kSuccessPerformed));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, CommitResetStreams);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams).Times(0);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Processing a response shouldn't result in sending anything.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingResetRollbackOnError) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req.request_sequence_number(), ResponseResult::kErrorBadSequenceNumber));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, CommitResetStreams).Times(0);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Only requests should result in sending responses.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendOutgoingResetRetransmitOnInProgress) {
|
|
|
|
|
static constexpr StreamID kStreamToReset = StreamID(42);
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(kStreamToReset));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({kStreamToReset}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({kStreamToReset})));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig1 = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig1.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req1,
|
|
|
|
|
reconfig1->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
// Simulate that the peer responded "In Progress".
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(req1.request_sequence_number(),
|
|
|
|
|
ResponseResult::kInProgress));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(0);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams()).Times(0);
|
|
|
|
|
|
|
|
|
|
// Processing a response shouldn't result in sending anything.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Let some time pass, so that the reconfig timer expires, and retries the
|
|
|
|
|
// same request.
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(1);
|
2023-11-08 16:31:44 +01:00
|
|
|
AdvanceTime(kRto);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
std::vector<uint8_t> payload = callbacks_.ConsumeSentPacket();
|
|
|
|
|
ASSERT_FALSE(payload.empty());
|
|
|
|
|
|
2023-04-20 15:44:59 +00:00
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet,
|
|
|
|
|
SctpPacket::Parse(payload, DcSctpOptions()));
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_THAT(packet.descriptors(), SizeIs(1));
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
ReConfigChunk reconfig2,
|
|
|
|
|
ReConfigChunk::Parse(packet.descriptors()[0].data));
|
|
|
|
|
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req2,
|
|
|
|
|
reconfig2.parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req2.request_sequence_number(),
|
|
|
|
|
AddTo(req1.request_sequence_number(), 1));
|
|
|
|
|
EXPECT_THAT(req2.stream_ids(), UnorderedElementsAre(kStreamToReset));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, ResetWhileRequestIsSentWillQueue) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig1 = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig1.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req1,
|
|
|
|
|
reconfig1->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
EXPECT_EQ(req1.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req1.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
AddTo(retransmission_queue_->next_tsn(), -1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req1.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
|
|
|
|
|
// Streams reset while the request is in-flight will be queued.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(41)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
2021-04-06 20:47:11 +02:00
|
|
|
StreamID stream_ids[] = {StreamID(41), StreamID(43)};
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(stream_ids);
|
2024-08-29 13:00:40 +00:00
|
|
|
EXPECT_EQ(handler_->MakeStreamResetRequest(), std::nullopt);
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req1.request_sequence_number(), ResponseResult::kSuccessPerformed));
|
|
|
|
|
ReConfigChunk response_reconfig(builder.Build());
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
EXPECT_CALL(producer_, RollbackResetStreams()).Times(0);
|
|
|
|
|
|
|
|
|
|
// Processing a response shouldn't result in sending anything.
|
|
|
|
|
EXPECT_CALL(callbacks_, OnError).Times(0);
|
2021-08-12 15:21:25 +02:00
|
|
|
EXPECT_CALL(callbacks_, SendPacketWithStatus).Times(0);
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->HandleReConfig(std::move(response_reconfig));
|
2021-04-06 20:47:11 +02:00
|
|
|
|
|
|
|
|
// Response has been processed. A new request can be sent.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(41), StreamID(43)})));
|
|
|
|
|
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig2 = handler_->MakeStreamResetRequest();
|
2021-04-06 20:47:11 +02:00
|
|
|
ASSERT_TRUE(reconfig2.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req2,
|
|
|
|
|
reconfig2->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
EXPECT_EQ(req2.request_sequence_number(), AddTo(kMyInitialReqSn, 1));
|
|
|
|
|
EXPECT_EQ(req2.sender_last_assigned_tsn(),
|
2021-09-16 07:27:47 +02:00
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
2021-04-06 20:47:11 +02:00
|
|
|
EXPECT_THAT(req2.stream_ids(),
|
|
|
|
|
UnorderedElementsAre(StreamID(41), StreamID(43)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, SendIncomingResetJustReturnsNothingPerformed) {
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(
|
|
|
|
|
IncomingSSNResetRequestParameter(kPeerInitialReqSn, {StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
ASSERT_THAT(responses, SizeIs(1));
|
|
|
|
|
EXPECT_THAT(responses[0].response_sequence_number(), kPeerInitialReqSn);
|
|
|
|
|
EXPECT_THAT(responses[0].result(), ResponseResult::kSuccessNothingToDo);
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-04 19:37:32 +00:00
|
|
|
TEST_F(StreamResetHandlerTest, SendSameRequestTwiceIsIdempotent) {
|
|
|
|
|
// Simulate that receiving the same chunk twice (due to network issues,
|
|
|
|
|
// or retransmissions, causing a RECONFIG to be re-received) is idempotent.
|
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1),
|
|
|
|
|
{StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses1 =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
EXPECT_THAT(responses1, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses1[0].result(), ResponseResult::kInProgress);
|
|
|
|
|
}
|
2021-04-06 20:47:11 +02:00
|
|
|
}
|
2021-09-16 07:27:47 +02:00
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest,
|
|
|
|
|
HandoverIsAllowedOnlyWhenNoStreamIsBeingOrWillBeReset) {
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_EQ(
|
|
|
|
|
handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(HandoverUnreadinessReason::kPendingStreamReset));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(true))
|
|
|
|
|
.WillOnce(Return(false));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_TRUE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_EQ(handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(
|
|
|
|
|
HandoverUnreadinessReason::kPendingStreamResetRequest));
|
|
|
|
|
|
|
|
|
|
// Reset more streams while the request is in-flight.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(41)));
|
|
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
2021-09-16 07:27:47 +02:00
|
|
|
StreamID stream_ids[] = {StreamID(41), StreamID(43)};
|
|
|
|
|
handler_->ResetStreams(stream_ids);
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_EQ(handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus()
|
|
|
|
|
.Add(HandoverUnreadinessReason::kPendingStreamResetRequest)
|
|
|
|
|
.Add(HandoverUnreadinessReason::kPendingStreamReset));
|
|
|
|
|
|
|
|
|
|
// Processing a response to first request.
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
handler_->HandleReConfig(
|
|
|
|
|
ReConfigChunk(Parameters::Builder()
|
|
|
|
|
.Add(ReconfigurationResponseParameter(
|
|
|
|
|
kMyInitialReqSn, ResponseResult::kSuccessPerformed))
|
|
|
|
|
.Build()));
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_EQ(
|
|
|
|
|
handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(HandoverUnreadinessReason::kPendingStreamReset));
|
|
|
|
|
|
|
|
|
|
// Second request can be sent.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(true))
|
|
|
|
|
.WillOnce(Return(false));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(41), StreamID(43)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_TRUE(handler_->MakeStreamResetRequest().has_value());
|
|
|
|
|
EXPECT_EQ(handler_->GetHandoverReadiness(),
|
|
|
|
|
HandoverReadinessStatus(
|
|
|
|
|
HandoverUnreadinessReason::kPendingStreamResetRequest));
|
|
|
|
|
|
|
|
|
|
// Processing a response to second request.
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
handler_->HandleReConfig(ReConfigChunk(
|
|
|
|
|
Parameters::Builder()
|
|
|
|
|
.Add(ReconfigurationResponseParameter(
|
|
|
|
|
AddTo(kMyInitialReqSn, 1), ResponseResult::kSuccessPerformed))
|
|
|
|
|
.Build()));
|
|
|
|
|
|
|
|
|
|
// Seconds response has been processed. No pending resets.
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(false));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
EXPECT_TRUE(handler_->GetHandoverReadiness().IsReady());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, HandoverInInitialState) {
|
|
|
|
|
PerformHandover();
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2024-08-29 13:00:40 +00:00
|
|
|
std::optional<ReConfigChunk> reconfig = handler_->MakeStreamResetRequest();
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_TRUE(reconfig.has_value());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig->parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
|
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
|
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(StreamResetHandlerTest, HandoverAfterHavingResetOneStream) {
|
|
|
|
|
// Reset one stream
|
|
|
|
|
{
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(42)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(42)}));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(true))
|
|
|
|
|
.WillOnce(Return(false));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(42)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(ReConfigChunk reconfig,
|
|
|
|
|
handler_->MakeStreamResetRequest());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig.parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn);
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
|
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
|
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(42)));
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(producer_, CommitResetStreams()).Times(1);
|
|
|
|
|
handler_->HandleReConfig(
|
|
|
|
|
ReConfigChunk(Parameters::Builder()
|
|
|
|
|
.Add(ReconfigurationResponseParameter(
|
|
|
|
|
req.request_sequence_number(),
|
|
|
|
|
ResponseResult::kSuccessPerformed))
|
|
|
|
|
.Build()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PerformHandover();
|
|
|
|
|
|
|
|
|
|
// Reset another stream after handover
|
|
|
|
|
{
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, PrepareResetStream(StreamID(43)));
|
2021-09-16 07:27:47 +02:00
|
|
|
handler_->ResetStreams(std::vector<StreamID>({StreamID(43)}));
|
|
|
|
|
|
dcsctp: Handle rapid closing of streams
When streams were to be reset, but there was already an ongoing
stream reset command in-flight, those streams wouldn't be properly
reset. When multiple streams were reset close to each other (within
an RTT), some streams would not have their SSNs reset, which resulted
in the stream resuming the SSN sequence. This could result in ordered
streams not delivering all messages as the receiver wouldn't deliver any
messages with SSN different from the expected SSN=0.
In WebRTC data channels, this would be triggered if multiple channels
were closed at roughly the same time, then re-opened, and continued
to be used in ordered mode. Unordered messages would still be delivered,
but the stream state could be wrong as the DATA_CHANNEL_ACK message is
sent ordered, and possibly not delivered.
There were unit tests for this, but not on the socket level using
real components, but just on the stream reset handler using mocks,
where this issue wasn't found. Also, those mocks didn't validate that
the correct parameters were provided, so that's fixed now.
The root cause was the PrepareResetStreams was only called if there
wasn't an ongoing stream reset operation in progress. One may try to
solve it by always calling PrepareResetStreams also when there is an
ongoing request, or to call it when the request has finished. One would
then realize that when the response of the outgoing stream request is
received, and CommitResetStreams is called, it would reset all paused
and (prepared) to-be-reset streams - not just the ones in the outgoing
stream request.
One cause of this was the lack of a single source of truth of the stream
states. The SendQueue kept track of which streams that were paused, but
the stream reset handler kept track of which streams that were
resetting. As that's error prone, this CL moves the source of truth
completely to the SendQueue and defining explicit stream pause states. A
stream can be in one of these possible states:
* Not paused. This is the default for an active stream.
* Pending to be paused. This is when it's about to be reset, but
there is a message that has been partly sent, with fragments
remaining to be sent before it can be paused.
* Paused, with no partly sent message. In this state, it's ready to
be reset.
* Resetting. A stream transitions into this state when it has been
paused and has been included in an outgoing stream reset request.
When this request has been responded to, the stream can really be
reset (SSN=0, MID=0).
This CL also improves logging, and adds socket tests to catch this
issue.
Bug: webrtc:13994, chromium:1320194
Change-Id: I883570d1f277bc01e52b1afad62d6be2aca930a2
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261180
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Victor Boivie <boivie@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36771}
2022-05-02 17:15:57 +02:00
|
|
|
EXPECT_CALL(producer_, HasStreamsReadyToBeReset()).WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(producer_, GetStreamsReadyToBeReset())
|
|
|
|
|
.WillOnce(Return(std::vector<StreamID>({StreamID(43)})));
|
|
|
|
|
|
2021-09-16 07:27:47 +02:00
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(ReConfigChunk reconfig,
|
|
|
|
|
handler_->MakeStreamResetRequest());
|
|
|
|
|
ASSERT_HAS_VALUE_AND_ASSIGN(
|
|
|
|
|
OutgoingSSNResetRequestParameter req,
|
|
|
|
|
reconfig.parameters().get<OutgoingSSNResetRequestParameter>());
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(req.request_sequence_number(),
|
|
|
|
|
ReconfigRequestSN(kMyInitialReqSn.value() + 1));
|
|
|
|
|
EXPECT_EQ(req.sender_last_assigned_tsn(),
|
|
|
|
|
TSN(*retransmission_queue_->next_tsn() - 1));
|
|
|
|
|
EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(43)));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-06 12:32:01 +02:00
|
|
|
TEST_F(StreamResetHandlerTest, PerformCloseAfterOneFirstFailing) {
|
|
|
|
|
// Inject a stream reset on the first expected TSN (which hasn't been seen).
|
|
|
|
|
Parameters::Builder builder;
|
|
|
|
|
builder.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
kPeerInitialReqSn, ReconfigRequestSN(3), kPeerInitialTsn, {StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
// The socket is expected to say "in progress" as that TSN hasn't been seen.
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder.Build()));
|
|
|
|
|
EXPECT_THAT(responses, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses[0].result(), ResponseResult::kInProgress);
|
|
|
|
|
|
|
|
|
|
// Let the socket receive the TSN.
|
|
|
|
|
DataGeneratorOptions opts;
|
2023-10-04 14:25:26 +02:00
|
|
|
opts.mid = MID(0);
|
2022-05-06 12:32:01 +02:00
|
|
|
reasm_->Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE", opts));
|
|
|
|
|
data_tracker_->Observe(kPeerInitialTsn);
|
|
|
|
|
|
|
|
|
|
// And emulate that time has passed, and the peer retries the stream reset,
|
|
|
|
|
// but now with an incremented request sequence number.
|
|
|
|
|
Parameters::Builder builder2;
|
|
|
|
|
builder2.Add(OutgoingSSNResetRequestParameter(
|
|
|
|
|
ReconfigRequestSN(*kPeerInitialReqSn + 1), ReconfigRequestSN(3),
|
|
|
|
|
kPeerInitialTsn, {StreamID(1)}));
|
|
|
|
|
|
|
|
|
|
// This is supposed to be handled well.
|
|
|
|
|
std::vector<ReconfigurationResponseParameter> responses2 =
|
|
|
|
|
HandleAndCatchResponse(ReConfigChunk(builder2.Build()));
|
|
|
|
|
EXPECT_THAT(responses2, SizeIs(1));
|
|
|
|
|
EXPECT_EQ(responses2[0].result(), ResponseResult::kSuccessPerformed);
|
|
|
|
|
}
|
2021-04-06 20:47:11 +02:00
|
|
|
} // namespace
|
|
|
|
|
} // namespace dcsctp
|