2016-08-30 14:04:35 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-01-11 09:11:00 -08:00
|
|
|
#ifndef PC_RTC_STATS_COLLECTOR_H_
|
|
|
|
|
#define PC_RTC_STATS_COLLECTOR_H_
|
2016-08-30 14:04:35 -07:00
|
|
|
|
2021-01-29 14:45:08 +00:00
|
|
|
#include <stdint.h>
|
2022-02-23 13:44:59 +00:00
|
|
|
|
|
|
|
|
#include <cstdint>
|
2016-10-24 04:00:05 -07:00
|
|
|
#include <map>
|
2016-08-30 14:04:35 -07:00
|
|
|
#include <memory>
|
2016-11-14 01:41:09 -08:00
|
|
|
#include <set>
|
2017-10-30 09:57:42 -07:00
|
|
|
#include <string>
|
2016-09-05 01:36:50 -07:00
|
|
|
#include <vector>
|
2016-08-30 14:04:35 -07:00
|
|
|
|
2018-06-19 16:47:43 +02:00
|
|
|
#include "absl/types/optional.h"
|
2021-01-29 14:45:08 +00:00
|
|
|
#include "api/data_channel_interface.h"
|
|
|
|
|
#include "api/media_types.h"
|
2019-01-25 20:26:48 +01:00
|
|
|
#include "api/scoped_refptr.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "api/stats/rtc_stats_collector_callback.h"
|
|
|
|
|
#include "api/stats/rtc_stats_report.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "api/stats/rtcstats_objects.h"
|
|
|
|
|
#include "call/call.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "media/base/media_channel.h"
|
2020-07-09 15:32:34 -07:00
|
|
|
#include "pc/data_channel_utils.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "pc/peer_connection_internal.h"
|
2021-01-29 14:45:08 +00:00
|
|
|
#include "pc/rtp_receiver.h"
|
|
|
|
|
#include "pc/rtp_sender.h"
|
|
|
|
|
#include "pc/rtp_transceiver.h"
|
|
|
|
|
#include "pc/sctp_data_channel.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "pc/track_media_info_map.h"
|
2021-01-29 14:45:08 +00:00
|
|
|
#include "pc/transport_stats.h"
|
|
|
|
|
#include "rtc_base/checks.h"
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
#include "rtc_base/event.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "rtc_base/ref_count.h"
|
2021-01-29 14:45:08 +00:00
|
|
|
#include "rtc_base/ssl_certificate.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "rtc_base/ssl_identity.h"
|
2018-07-25 15:04:28 +02:00
|
|
|
#include "rtc_base/third_party/sigslot/sigslot.h"
|
2021-01-29 14:45:08 +00:00
|
|
|
#include "rtc_base/thread.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "rtc_base/time_utils.h"
|
2016-08-30 14:04:35 -07:00
|
|
|
|
|
|
|
|
namespace webrtc {
|
|
|
|
|
|
2018-03-19 13:52:56 +01:00
|
|
|
class RtpSenderInternal;
|
|
|
|
|
class RtpReceiverInternal;
|
|
|
|
|
|
2016-09-05 01:36:50 -07:00
|
|
|
// All public methods of the collector are to be called on the signaling thread.
|
|
|
|
|
// Stats are gathered on the signaling, worker and network threads
|
|
|
|
|
// asynchronously. The callback is invoked on the signaling thread. Resulting
|
2021-07-30 22:30:23 +02:00
|
|
|
// reports are cached for `cache_lifetime_` ms.
|
2021-04-22 18:16:35 +02:00
|
|
|
class RTCStatsCollector : public rtc::RefCountInterface,
|
2016-11-14 01:41:09 -08:00
|
|
|
public sigslot::has_slots<> {
|
2016-09-05 01:36:50 -07:00
|
|
|
public:
|
|
|
|
|
static rtc::scoped_refptr<RTCStatsCollector> Create(
|
2018-01-23 16:38:46 -08:00
|
|
|
PeerConnectionInternal* pc,
|
2016-08-31 07:57:36 -07:00
|
|
|
int64_t cache_lifetime_us = 50 * rtc::kNumMicrosecsPerMillisec);
|
2016-08-30 14:04:35 -07:00
|
|
|
|
|
|
|
|
// Gets a recent stats report. If there is a report cached that is still fresh
|
|
|
|
|
// it is returned, otherwise new stats are gathered and returned. A report is
|
2021-07-30 22:30:23 +02:00
|
|
|
// considered fresh for `cache_lifetime_` ms. const RTCStatsReports are safe
|
2016-08-30 14:04:35 -07:00
|
|
|
// to use across multiple threads and may be destructed on any thread.
|
2018-03-19 13:52:56 +01:00
|
|
|
// If the optional selector argument is used, stats are filtered according to
|
|
|
|
|
// stats selection algorithm before delivery.
|
|
|
|
|
// https://w3c.github.io/webrtc-pc/#dfn-stats-selection-algorithm
|
2016-09-05 01:36:50 -07:00
|
|
|
void GetStatsReport(rtc::scoped_refptr<RTCStatsCollectorCallback> callback);
|
2021-07-30 22:30:23 +02:00
|
|
|
// If `selector` is null the selection algorithm is still applied (interpreted
|
2018-03-19 13:52:56 +01:00
|
|
|
// as: no RTP streams are sent by selector). The result is empty.
|
|
|
|
|
void GetStatsReport(rtc::scoped_refptr<RtpSenderInternal> selector,
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback);
|
2021-07-30 22:30:23 +02:00
|
|
|
// If `selector` is null the selection algorithm is still applied (interpreted
|
2018-03-19 13:52:56 +01:00
|
|
|
// as: no RTP streams are received by selector). The result is empty.
|
|
|
|
|
void GetStatsReport(rtc::scoped_refptr<RtpReceiverInternal> selector,
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback);
|
2016-08-30 14:04:35 -07:00
|
|
|
// Clears the cache's reference to the most recent stats report. Subsequently
|
2021-07-30 22:30:23 +02:00
|
|
|
// calling `GetStatsReport` guarantees fresh stats.
|
2016-08-30 14:04:35 -07:00
|
|
|
void ClearCachedStatsReport();
|
|
|
|
|
|
2021-07-30 22:30:23 +02:00
|
|
|
// If there is a `GetStatsReport` requests in-flight, waits until it has been
|
2016-12-19 05:06:57 -08:00
|
|
|
// completed. Must be called on the signaling thread.
|
|
|
|
|
void WaitForPendingRequest();
|
|
|
|
|
|
2016-09-05 01:36:50 -07:00
|
|
|
protected:
|
2018-01-23 16:38:46 -08:00
|
|
|
RTCStatsCollector(PeerConnectionInternal* pc, int64_t cache_lifetime_us);
|
2016-12-19 05:06:57 -08:00
|
|
|
~RTCStatsCollector();
|
2016-09-05 01:36:50 -07:00
|
|
|
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
struct CertificateStatsPair {
|
|
|
|
|
std::unique_ptr<rtc::SSLCertificateStats> local;
|
|
|
|
|
std::unique_ptr<rtc::SSLCertificateStats> remote;
|
|
|
|
|
};
|
2016-09-05 01:36:50 -07:00
|
|
|
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
// Stats gathering on a particular thread. Virtual for the sake of testing.
|
|
|
|
|
virtual void ProducePartialResultsOnSignalingThreadImpl(
|
|
|
|
|
int64_t timestamp_us,
|
|
|
|
|
RTCStatsReport* partial_report);
|
|
|
|
|
virtual void ProducePartialResultsOnNetworkThreadImpl(
|
|
|
|
|
int64_t timestamp_us,
|
|
|
|
|
const std::map<std::string, cricket::TransportStats>&
|
|
|
|
|
transport_stats_by_name,
|
|
|
|
|
const std::map<std::string, CertificateStatsPair>& transport_cert_stats,
|
|
|
|
|
RTCStatsReport* partial_report);
|
2016-09-05 01:36:50 -07:00
|
|
|
|
2016-08-30 14:04:35 -07:00
|
|
|
private:
|
2018-03-19 13:52:56 +01:00
|
|
|
class RequestInfo {
|
|
|
|
|
public:
|
|
|
|
|
enum class FilterMode { kAll, kSenderSelector, kReceiverSelector };
|
|
|
|
|
|
|
|
|
|
// Constructs with FilterMode::kAll.
|
|
|
|
|
explicit RequestInfo(
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback);
|
|
|
|
|
// Constructs with FilterMode::kSenderSelector. The selection algorithm is
|
2021-07-30 22:30:23 +02:00
|
|
|
// applied even if `selector` is null, resulting in an empty report.
|
2018-03-19 13:52:56 +01:00
|
|
|
RequestInfo(rtc::scoped_refptr<RtpSenderInternal> selector,
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback);
|
|
|
|
|
// Constructs with FilterMode::kReceiverSelector. The selection algorithm is
|
2021-07-30 22:30:23 +02:00
|
|
|
// applied even if `selector` is null, resulting in an empty report.
|
2018-03-19 13:52:56 +01:00
|
|
|
RequestInfo(rtc::scoped_refptr<RtpReceiverInternal> selector,
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback);
|
|
|
|
|
|
|
|
|
|
FilterMode filter_mode() const { return filter_mode_; }
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback() const {
|
|
|
|
|
return callback_;
|
|
|
|
|
}
|
|
|
|
|
rtc::scoped_refptr<RtpSenderInternal> sender_selector() const {
|
|
|
|
|
RTC_DCHECK(filter_mode_ == FilterMode::kSenderSelector);
|
|
|
|
|
return sender_selector_;
|
|
|
|
|
}
|
|
|
|
|
rtc::scoped_refptr<RtpReceiverInternal> receiver_selector() const {
|
|
|
|
|
RTC_DCHECK(filter_mode_ == FilterMode::kReceiverSelector);
|
|
|
|
|
return receiver_selector_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
RequestInfo(FilterMode filter_mode,
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback,
|
|
|
|
|
rtc::scoped_refptr<RtpSenderInternal> sender_selector,
|
|
|
|
|
rtc::scoped_refptr<RtpReceiverInternal> receiver_selector);
|
|
|
|
|
|
|
|
|
|
FilterMode filter_mode_;
|
|
|
|
|
rtc::scoped_refptr<RTCStatsCollectorCallback> callback_;
|
|
|
|
|
rtc::scoped_refptr<RtpSenderInternal> sender_selector_;
|
|
|
|
|
rtc::scoped_refptr<RtpReceiverInternal> receiver_selector_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void GetStatsReportInternal(RequestInfo request);
|
|
|
|
|
|
2018-02-15 15:19:50 -08:00
|
|
|
// Structure for tracking stats about each RtpTransceiver managed by the
|
|
|
|
|
// PeerConnection. This can either by a Plan B style or Unified Plan style
|
|
|
|
|
// transceiver (i.e., can have 0 or many senders and receivers).
|
|
|
|
|
// Some fields are copied from the RtpTransceiver/BaseChannel object so that
|
|
|
|
|
// they can be accessed safely on threads other than the signaling thread.
|
|
|
|
|
// If a BaseChannel is not available (e.g., if signaling has not started),
|
2021-07-30 22:30:23 +02:00
|
|
|
// then `mid` and `transport_name` will be null.
|
2018-02-15 15:19:50 -08:00
|
|
|
struct RtpTransceiverStatsInfo {
|
|
|
|
|
rtc::scoped_refptr<RtpTransceiver> transceiver;
|
|
|
|
|
cricket::MediaType media_type;
|
2018-06-19 16:47:43 +02:00
|
|
|
absl::optional<std::string> mid;
|
|
|
|
|
absl::optional<std::string> transport_name;
|
2022-07-27 10:55:45 +02:00
|
|
|
TrackMediaInfoMap track_media_info_map;
|
2018-02-15 15:19:50 -08:00
|
|
|
};
|
|
|
|
|
|
2018-03-08 09:53:47 -08:00
|
|
|
void DeliverCachedReport(
|
|
|
|
|
rtc::scoped_refptr<const RTCStatsReport> cached_report,
|
2018-03-19 13:52:56 +01:00
|
|
|
std::vector<RequestInfo> requests);
|
2016-08-30 14:04:35 -07:00
|
|
|
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCCertificateStats`.
|
2016-12-19 04:58:02 -08:00
|
|
|
void ProduceCertificateStats_n(
|
2016-10-24 04:00:05 -07:00
|
|
|
int64_t timestamp_us,
|
|
|
|
|
const std::map<std::string, CertificateStatsPair>& transport_cert_stats,
|
2016-10-03 14:16:56 -07:00
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCCodecStats`.
|
2016-12-19 04:58:02 -08:00
|
|
|
void ProduceCodecStats_n(
|
2018-02-15 15:19:50 -08:00
|
|
|
int64_t timestamp_us,
|
|
|
|
|
const std::vector<RtpTransceiverStatsInfo>& transceiver_stats_infos,
|
2016-11-23 02:32:06 -08:00
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCDataChannelStats`.
|
2016-10-18 12:48:31 -07:00
|
|
|
void ProduceDataChannelStats_s(int64_t timestamp_us,
|
|
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCIceCandidatePairStats` and `RTCIceCandidateStats`.
|
2016-12-19 04:58:02 -08:00
|
|
|
void ProduceIceCandidateAndPairStats_n(
|
2017-06-02 06:44:03 -07:00
|
|
|
int64_t timestamp_us,
|
2018-02-06 10:34:40 -08:00
|
|
|
const std::map<std::string, cricket::TransportStats>&
|
|
|
|
|
transport_stats_by_name,
|
2017-06-02 06:44:03 -07:00
|
|
|
const Call::Stats& call_stats,
|
2016-10-07 02:18:47 -07:00
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCMediaStreamStats`.
|
2018-02-15 15:19:50 -08:00
|
|
|
void ProduceMediaStreamStats_s(int64_t timestamp_us,
|
|
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCMediaStreamTrackStats`.
|
2018-02-15 15:19:50 -08:00
|
|
|
void ProduceMediaStreamTrackStats_s(int64_t timestamp_us,
|
|
|
|
|
RTCStatsReport* report) const;
|
2019-05-22 15:49:42 +02:00
|
|
|
// Produces RTCMediaSourceStats, including RTCAudioSourceStats and
|
|
|
|
|
// RTCVideoSourceStats.
|
|
|
|
|
void ProduceMediaSourceStats_s(int64_t timestamp_us,
|
|
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCPeerConnectionStats`.
|
2016-10-03 14:16:56 -07:00
|
|
|
void ProducePeerConnectionStats_s(int64_t timestamp_us,
|
|
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCInboundRTPStreamStats` and `RTCOutboundRTPStreamStats`.
|
Implement RTCRemoteInboundRtpStreamStats for both audio and video.
This implements the essentials of RTCRemoteInboundRtpStreamStats. This
includes:
- ssrc
- transportId
- codecId
- packetsLost
- jitter
- localId
- roundTripTime
https://w3c.github.io/webrtc-stats/#remoteinboundrtpstats-dict*
The following members are not implemented because they require more
work...
- From RTCReceivedRtpStreamStats: packetsReceived, packetsDiscarded,
packetsRepaired, burstPacketsLost, burstPacketsDiscarded,
burstLossCount, burstDiscardCount, burstLossRate, burstDiscardRate,
gapLossRate and gapDiscardRate.
- From RTCRemoteInboundRtpStreamStats: fractionLost.
Bug: webrtc:10455, webrtc:10456
Change-Id: If2ab0da7105d8c93bba58e14aa93bd22ffe57f1d
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/138067
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28073}
2019-05-27 13:40:25 +02:00
|
|
|
// This has to be invoked after codecs and transport stats have been created
|
|
|
|
|
// because some metrics are calculated through lookup of other metrics.
|
2018-02-06 10:34:40 -08:00
|
|
|
void ProduceRTPStreamStats_n(
|
|
|
|
|
int64_t timestamp_us,
|
2018-02-15 15:19:50 -08:00
|
|
|
const std::vector<RtpTransceiverStatsInfo>& transceiver_stats_infos,
|
2018-02-06 10:34:40 -08:00
|
|
|
RTCStatsReport* report) const;
|
2018-02-15 15:19:50 -08:00
|
|
|
void ProduceAudioRTPStreamStats_n(int64_t timestamp_us,
|
|
|
|
|
const RtpTransceiverStatsInfo& stats,
|
|
|
|
|
RTCStatsReport* report) const;
|
|
|
|
|
void ProduceVideoRTPStreamStats_n(int64_t timestamp_us,
|
|
|
|
|
const RtpTransceiverStatsInfo& stats,
|
|
|
|
|
RTCStatsReport* report) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// Produces `RTCTransportStats`.
|
2016-12-19 04:58:02 -08:00
|
|
|
void ProduceTransportStats_n(
|
2018-02-06 10:34:40 -08:00
|
|
|
int64_t timestamp_us,
|
|
|
|
|
const std::map<std::string, cricket::TransportStats>&
|
|
|
|
|
transport_stats_by_name,
|
2016-10-24 04:00:05 -07:00
|
|
|
const std::map<std::string, CertificateStatsPair>& transport_cert_stats,
|
|
|
|
|
RTCStatsReport* report) const;
|
|
|
|
|
|
|
|
|
|
// Helper function to stats-producing functions.
|
|
|
|
|
std::map<std::string, CertificateStatsPair>
|
2018-02-06 10:34:40 -08:00
|
|
|
PrepareTransportCertificateStats_n(
|
|
|
|
|
const std::map<std::string, cricket::TransportStats>&
|
|
|
|
|
transport_stats_by_name) const;
|
2021-07-30 22:30:23 +02:00
|
|
|
// The results are stored in `transceiver_stats_infos_` and `call_stats_`.
|
2021-04-18 11:55:57 +02:00
|
|
|
void PrepareTransceiverStatsInfosAndCallStats_s_w_n();
|
2016-08-30 14:04:35 -07:00
|
|
|
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
// Stats gathering on a particular thread.
|
|
|
|
|
void ProducePartialResultsOnSignalingThread(int64_t timestamp_us);
|
2021-04-18 11:55:57 +02:00
|
|
|
void ProducePartialResultsOnNetworkThread(
|
|
|
|
|
int64_t timestamp_us,
|
|
|
|
|
absl::optional<std::string> sctp_transport_name);
|
2021-07-30 22:30:23 +02:00
|
|
|
// Merges `network_report_` into `partial_report_` and completes the request.
|
|
|
|
|
// This is a NO-OP if `network_report_` is null.
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
void MergeNetworkReport_s();
|
|
|
|
|
|
2022-02-08 07:41:25 +00:00
|
|
|
// Slots for signals (sigslot) that are wired up to `pc_`.
|
|
|
|
|
void OnSctpDataChannelCreated(SctpDataChannel* channel);
|
2021-07-30 22:30:23 +02:00
|
|
|
// Slots for signals (sigslot) that are wired up to `channel`.
|
2020-07-09 15:32:34 -07:00
|
|
|
void OnDataChannelOpened(DataChannelInterface* channel);
|
|
|
|
|
void OnDataChannelClosed(DataChannelInterface* channel);
|
2016-11-14 01:41:09 -08:00
|
|
|
|
2018-01-23 16:38:46 -08:00
|
|
|
PeerConnectionInternal* const pc_;
|
2016-09-05 01:36:50 -07:00
|
|
|
rtc::Thread* const signaling_thread_;
|
|
|
|
|
rtc::Thread* const worker_thread_;
|
|
|
|
|
rtc::Thread* const network_thread_;
|
|
|
|
|
|
|
|
|
|
int num_pending_partial_reports_;
|
|
|
|
|
int64_t partial_report_timestamp_us_;
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
// Reports that are produced on the signaling thread or the network thread are
|
|
|
|
|
// merged into this report. It is only touched on the signaling thread. Once
|
|
|
|
|
// all partial reports are merged this is the result of a request.
|
2016-09-05 01:36:50 -07:00
|
|
|
rtc::scoped_refptr<RTCStatsReport> partial_report_;
|
2018-03-19 13:52:56 +01:00
|
|
|
std::vector<RequestInfo> requests_;
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
// Holds the result of ProducePartialResultsOnNetworkThread(). It is merged
|
2021-07-30 22:30:23 +02:00
|
|
|
// into `partial_report_` on the signaling thread and then nulled by
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
// MergeNetworkReport_s(). Thread-safety is ensured by using
|
2021-07-30 22:30:23 +02:00
|
|
|
// `network_report_event_`.
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
rtc::scoped_refptr<RTCStatsReport> network_report_;
|
2021-07-30 22:30:23 +02:00
|
|
|
// If set, it is safe to touch the `network_report_` on the signaling thread.
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
// This is reset before async-invoking ProducePartialResultsOnNetworkThread()
|
|
|
|
|
// and set when ProducePartialResultsOnNetworkThread() is complete, after it
|
2021-07-30 22:30:23 +02:00
|
|
|
// has updated the value of `network_report_`.
|
Reland "Fix getStats() freeze bug affecting Chromium but not WebRTC standalone."
This is a reland of 05d43c6f7fe497fed0f2c8714e2042dd07a86df2
The original CL got reverted because Chrome did not support IsQuitting() which
triggered a NOTREACHED() inside of a DCHECK. With
https://chromium-review.googlesource.com/c/chromium/src/+/1491620
it is safe to reland this CL.
The only changes between this and the original patch set is that this is now
rebased on top of https://webrtc-review.googlesource.com/c/src/+/124701, i.e.
rtc::PostMessageWithFunctor() has been replaced by rtc::Thread::PostTask().
Original change's description:
> Fix getStats() freeze bug affecting Chromium but not WebRTC standalone.
>
> PeerConnection::Close() is, per-spec, a blocking operation.
> Unfortunately, PeerConnection is implemented to own resources used by
> the network thread, and Close() - on the signaling thread - destroys
> these resources. As such, tasks run in parallel like getStats() get into
> race conditions with Close() unless synchronized. The mechanism in-place
> is RTCStatsCollector::WaitForPendingRequest(), it waits until the
> network thread is done with the in-parallel stats request.
>
> Prior to this CL, this was implemented by performing
> rtc::Thread::ProcessMessages() in a loop until the network thread had
> posted a task on the signaling thread to say that it was done which
> would then get processed by ProcessMessages(). In WebRTC this works, and
> the test is RTCStatsIntegrationTest.GetsStatsWhileClosingPeerConnection.
>
> But because Chromium's thread wrapper does no support
> ProcessMessages(), calling getStats() followed by close() in Chrome
> resulted in waiting forever (https://crbug.com/850907).
>
> In this CL, the process messages loop is removed. Instead, the shared
> resources are guarded by an rtc::Event. WaitForPendingRequest() still
> blocks the signaling thread, but only while shared resources are in use
> by the network thread. After this CL, calling WaitForPendingRequest() no
> longer has any unexpected side-effects since it no longer processes
> other messages that might have been posted on the thread.
>
> The resource ownership and threading model of WebRTC deserves to be
> revisited, but this fixes a common Chromium crash without redesigning
> PeerConnection, in a way that does not cause more blocking than what
> the other PeerConnection methods are already doing.
>
> Note: An alternative to using rtc::Event is to use resource locks and
> to not perform the stats collection on the network thread if the
> request was cancelled before the start of processing, but this has very
> little benefit in terms of performance: once the network thread starts
> collecting the stats, it would use the lock until collection is
> completed, blocking the signaling thread trying to acquire that lock
> anyway. This defeats the purpose and is a riskier change, since
> cancelling partial collection in this inherently racy edge-case would
> have observable differences from the returned stats, which may cause
> more regressions.
>
> Bug: chromium:850907
> Change-Id: Idceeee0bddc0c9d5518b58a2b263abb2bbf47cff
> Reviewed-on: https://webrtc-review.googlesource.com/c/121567
> Commit-Queue: Henrik Boström <hbos@webrtc.org>
> Reviewed-by: Steve Anton <steveanton@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#26707}
TBR=steveanton@webrtc.org
Bug: chromium:850907
Change-Id: I5be7f69f0de65ff1120e4926fbf904def97ea9c0
Reviewed-on: https://webrtc-review.googlesource.com/c/124781
Reviewed-by: Henrik Boström <hbos@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26896}
2019-02-28 09:49:31 +01:00
|
|
|
rtc::Event network_report_event_;
|
2016-09-05 01:36:50 -07:00
|
|
|
|
2021-04-18 11:55:57 +02:00
|
|
|
// Cleared and set in `PrepareTransceiverStatsInfosAndCallStats_s_w_n`,
|
|
|
|
|
// starting out on the signaling thread, then network. Later read on the
|
|
|
|
|
// network and signaling threads as part of collecting stats and finally
|
|
|
|
|
// reset when the work is done. Initially this variable was added and not
|
|
|
|
|
// passed around as an arguments to avoid copies. This is thread safe due to
|
|
|
|
|
// how operations are sequenced and we don't start the stats collection
|
|
|
|
|
// sequence if one is in progress. As a future improvement though, we could
|
|
|
|
|
// now get rid of the variable and keep the data scoped within a stats
|
|
|
|
|
// collection sequence.
|
2018-02-15 15:19:50 -08:00
|
|
|
std::vector<RtpTransceiverStatsInfo> transceiver_stats_infos_;
|
2018-02-06 10:34:40 -08:00
|
|
|
|
2017-06-02 06:44:03 -07:00
|
|
|
Call::Stats call_stats_;
|
2016-12-19 04:58:02 -08:00
|
|
|
|
2016-08-31 07:57:36 -07:00
|
|
|
// A timestamp, in microseconds, that is based on a timer that is
|
|
|
|
|
// monotonically increasing. That is, even if the system clock is modified the
|
|
|
|
|
// difference between the timer and this timestamp is how fresh the cached
|
|
|
|
|
// report is.
|
|
|
|
|
int64_t cache_timestamp_us_;
|
|
|
|
|
int64_t cache_lifetime_us_;
|
2016-08-30 14:04:35 -07:00
|
|
|
rtc::scoped_refptr<const RTCStatsReport> cached_report_;
|
2016-11-14 01:41:09 -08:00
|
|
|
|
|
|
|
|
// Data recorded and maintained by the stats collector during its lifetime.
|
|
|
|
|
// Some stats are produced from this record instead of other components.
|
|
|
|
|
struct InternalRecord {
|
|
|
|
|
InternalRecord() : data_channels_opened(0), data_channels_closed(0) {}
|
|
|
|
|
|
|
|
|
|
// The opened count goes up when a channel is fully opened and the closed
|
|
|
|
|
// count goes up if a previously opened channel has fully closed. The opened
|
|
|
|
|
// count does not go down when a channel closes, meaning (opened - closed)
|
|
|
|
|
// is the number of channels currently opened. A channel that is closed
|
|
|
|
|
// before reaching the open state does not affect these counters.
|
|
|
|
|
uint32_t data_channels_opened;
|
|
|
|
|
uint32_t data_channels_closed;
|
|
|
|
|
// Identifies by address channels that have been opened, which remain in the
|
|
|
|
|
// set until they have been fully closed.
|
|
|
|
|
std::set<uintptr_t> opened_data_channels;
|
|
|
|
|
};
|
|
|
|
|
InternalRecord internal_record_;
|
2016-08-30 14:04:35 -07:00
|
|
|
};
|
|
|
|
|
|
2016-10-18 12:48:31 -07:00
|
|
|
const char* CandidateTypeToRTCIceCandidateTypeForTesting(
|
|
|
|
|
const std::string& type);
|
|
|
|
|
const char* DataStateToRTCDataChannelStateForTesting(
|
|
|
|
|
DataChannelInterface::DataState state);
|
2016-10-07 02:18:47 -07:00
|
|
|
|
2016-08-30 14:04:35 -07:00
|
|
|
} // namespace webrtc
|
|
|
|
|
|
2019-01-11 09:11:00 -08:00
|
|
|
#endif // PC_RTC_STATS_COLLECTOR_H_
|