diff options
author | Jorge E. Moreira <jemoreira@google.com> | 2024-02-28 11:12:35 -0800 |
---|---|---|
committer | Jorge Moreira <jemoreira@google.com> | 2024-02-29 17:45:27 +0000 |
commit | 1175aee9f762c11e2fbfe61ec92431a485f8d380 (patch) | |
tree | 630cf3fbbc48d0e16a7b87aca355a9137967e710 | |
parent | 3b73a4184ed87dc633954136c12a08dbb404ac26 (diff) | |
download | cuttlefish-1175aee9f762c11e2fbfe61ec92431a485f8d380.tar.gz |
Don't pass shared pointers to audio buffer around
Shared pointers can be copied and stored and the usual semantics of it
imply the underlying data is valid until the shared pointer is
destroyed. This is not true for audio buffers as those buffers are
released as soon as the function returns.
This changes those pointers into const references which explicity
indicate borrow semantics.
Bug: 327434508
Test: m webRTC
Change-Id: I9296390463002cbc0389b17777560ddfa9b98c9e
4 files changed, 10 insertions, 10 deletions
diff --git a/host/frontend/webrtc/audio_handler.cpp b/host/frontend/webrtc/audio_handler.cpp index 52a2294d7..be15ed7c2 100644 --- a/host/frontend/webrtc/audio_handler.cpp +++ b/host/frontend/webrtc/audio_handler.cpp @@ -443,7 +443,7 @@ void AudioHandler::OnPlaybackBuffer(TxBuffer buffer) { // webrtc api doesn't expect volatile memory. This should be safe though // because webrtc will use the contents of the buffer before returning // and only then we release it. - auto audio_frame_buffer = std::make_shared<CvdAudioFrameBuffer>( + CvdAudioFrameBuffer audio_frame_buffer( const_cast<const uint8_t*>(&buffer.get()[pos]), stream_desc.bits_per_sample, stream_desc.sample_rate, stream_desc.channels, frames); @@ -453,9 +453,9 @@ void AudioHandler::OnPlaybackBuffer(TxBuffer buffer) { pos += holding_buffer.Add(buffer.get() + pos, buffer.len() - pos); if (holding_buffer.full()) { auto buffer_ptr = const_cast<const uint8_t*>(holding_buffer.data()); - auto audio_frame_buffer = std::make_shared<CvdAudioFrameBuffer>( - buffer_ptr, stream_desc.bits_per_sample, - stream_desc.sample_rate, stream_desc.channels, frames); + CvdAudioFrameBuffer audio_frame_buffer( + buffer_ptr, stream_desc.bits_per_sample, stream_desc.sample_rate, + stream_desc.channels, frames); audio_sink_->OnFrame(audio_frame_buffer, base_time); holding_buffer.count = 0; } diff --git a/host/frontend/webrtc/libdevice/audio_sink.h b/host/frontend/webrtc/libdevice/audio_sink.h index 1baa88115..614aada91 100644 --- a/host/frontend/webrtc/libdevice/audio_sink.h +++ b/host/frontend/webrtc/libdevice/audio_sink.h @@ -26,7 +26,7 @@ namespace webrtc_streaming { class AudioSink { public: virtual ~AudioSink() = default; - virtual void OnFrame(std::shared_ptr<AudioFrameBuffer> frame, + virtual void OnFrame(const AudioFrameBuffer& frame, int64_t timestamp_us) = 0; }; diff --git a/host/frontend/webrtc/libdevice/audio_track_source_impl.cpp b/host/frontend/webrtc/libdevice/audio_track_source_impl.cpp index 72756e195..c10e2630b 100644 --- a/host/frontend/webrtc/libdevice/audio_track_source_impl.cpp +++ b/host/frontend/webrtc/libdevice/audio_track_source_impl.cpp @@ -51,12 +51,12 @@ const cricket::AudioOptions AudioTrackSourceImpl::options() const { return cricket::AudioOptions(); } -void AudioTrackSourceImpl::OnFrame(std::shared_ptr<AudioFrameBuffer> frame, +void AudioTrackSourceImpl::OnFrame(const AudioFrameBuffer& frame, int64_t timestamp_ms) { std::lock_guard<std::mutex> lock(sinks_mutex_); for (auto sink : sinks_) { - sink->OnData(frame->data(), frame->bits_per_sample(), - frame->sample_rate(), frame->channels(), frame->frames(), + sink->OnData(frame.data(), frame.bits_per_sample(), + frame.sample_rate(), frame.channels(), frame.frames(), timestamp_ms); } } diff --git a/host/frontend/webrtc/libdevice/audio_track_source_impl.h b/host/frontend/webrtc/libdevice/audio_track_source_impl.h index 53dffeacf..61f01cff4 100644 --- a/host/frontend/webrtc/libdevice/audio_track_source_impl.h +++ b/host/frontend/webrtc/libdevice/audio_track_source_impl.h @@ -44,7 +44,7 @@ class AudioTrackSourceImpl : public webrtc::AudioSourceInterface { // audio network adaptation on the source is the wrong layer of abstraction). virtual const cricket::AudioOptions options() const; - void OnFrame(std::shared_ptr<AudioFrameBuffer> frame, int64_t timestamp_ms); + void OnFrame(const AudioFrameBuffer& frame, int64_t timestamp_ms); // MediaSourceInterface implementation SourceState state() const override; @@ -74,7 +74,7 @@ class AudioTrackSourceImplSinkWrapper : public AudioSink { AudioTrackSourceImplSinkWrapper(rtc::scoped_refptr<AudioTrackSourceImpl> obj) : track_source_impl_(obj) {} - void OnFrame(std::shared_ptr<AudioFrameBuffer> frame, + void OnFrame(const AudioFrameBuffer& frame, int64_t timestamp_ms) override { track_source_impl_->OnFrame(frame, timestamp_ms); } |