dcsctp: Reset send queue when recreating TCB
This is an issue found in fuzzer, and doesn't really happen in WebRTC as it never closes the connection and reconnects. The issue is that the send queue outlives any connection since you're allowed to send messages (well, enqueue them) before the association is fully connected. So the send queue is always present but the TCB (information about the connection) is torn down when the connection is closed for example. And the TCB holds the Stream Reset handler, which is responsible for e.g. keeping track of stream reset sequence numbers and such - which is tied to the connection. So to ensure that the Stream Reset Handler is in charge of deciding if a stream reset is taking place, make sure that the send queue is in a known good state when the Stream Reset handler is created. Bug: webrtc:13994, chromium:1320194 Change-Id: I940e4690ac9237ac99dd69a9ffc060cdac61711d Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/261260 Reviewed-by: Harald Alvestrand <hta@webrtc.org> Commit-Queue: Victor Boivie <boivie@webrtc.org> Cr-Commit-Position: refs/heads/main@{#36779}
This commit is contained in:
parent
35ba0c5cd5
commit
3180a5ad06
@ -1305,9 +1305,6 @@ bool DcSctpSocket::HandleCookieEchoWithTCB(const CommonHeader& header,
|
||||
RTC_DLOG(LS_VERBOSE) << log_prefix()
|
||||
<< "Received COOKIE-ECHO indicating a restarted peer";
|
||||
|
||||
// If a message was partly sent, and the peer restarted, resend it in
|
||||
// full by resetting the send queue.
|
||||
send_queue_.Reset();
|
||||
tcb_ = nullptr;
|
||||
callbacks_.OnConnectionRestarted();
|
||||
} else if (header.verification_tag == tcb_->my_verification_tag() &&
|
||||
|
||||
@ -2341,6 +2341,32 @@ TEST(DcSctpSocketTest, CloseStreamsWithPendingRequest) {
|
||||
absl::optional<DcSctpMessage> msg6 = z.cb.ConsumeReceivedMessage();
|
||||
ASSERT_TRUE(msg6.has_value());
|
||||
EXPECT_EQ(msg6->stream_id(), StreamID(3));
|
||||
} // namespace
|
||||
}
|
||||
|
||||
TEST(DcSctpSocketTest, ReconnectSocketWithPendingStreamReset) {
|
||||
// This is an issue found by fuzzing, and doesn't really make sense in WebRTC
|
||||
// data channels as a SCTP connection is never ever closed and then
|
||||
// reconnected. SCTP connections are closed when the peer connection is
|
||||
// deleted, and then it doesn't do more with SCTP.
|
||||
SocketUnderTest a("A");
|
||||
SocketUnderTest z("Z");
|
||||
|
||||
ConnectSockets(a, z);
|
||||
|
||||
a.socket.ResetStreams(std::vector<StreamID>({StreamID(1)}));
|
||||
|
||||
// EXPECT_CALL(a.cb, OnAborted).Times(1);
|
||||
EXPECT_CALL(z.cb, OnAborted).Times(1);
|
||||
a.socket.Close();
|
||||
|
||||
EXPECT_EQ(a.socket.state(), SocketState::kClosed);
|
||||
|
||||
EXPECT_CALL(a.cb, OnConnected).Times(1);
|
||||
EXPECT_CALL(z.cb, OnConnected).Times(1);
|
||||
a.socket.Connect();
|
||||
ExchangeMessages(a, z);
|
||||
a.socket.ResetStreams(std::vector<StreamID>({StreamID(2)}));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace dcsctp
|
||||
|
||||
@ -118,7 +118,14 @@ class TransmissionControlBlock : public Context {
|
||||
&reassembly_queue_,
|
||||
&retransmission_queue_,
|
||||
handover_state),
|
||||
heartbeat_handler_(log_prefix, options, this, &timer_manager_) {}
|
||||
heartbeat_handler_(log_prefix, options, this, &timer_manager_) {
|
||||
// If the connection is re-established (peer restarted, but re-used old
|
||||
// connection), make sure that all message identifiers are reset and any
|
||||
// partly sent message is re-sent in full. The same is true when the socket
|
||||
// is closed and later re-opened, which never happens in WebRTC, but is a
|
||||
// valid operation on the SCTP level.
|
||||
send_queue.Reset();
|
||||
}
|
||||
|
||||
// Implementation of `Context`.
|
||||
bool is_connection_established() const override {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user