servo_media_audio/
context.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6use std::sync::mpsc::{self, Sender};
7use std::sync::{Arc, Mutex};
8use std::thread::Builder;
9
10use malloc_size_of_derive::MallocSizeOf;
11use servo_media_traits::{BackendMsg, ClientContextId, MediaInstance, MediaInstanceError};
12
13use crate::AudioBackend;
14use crate::decoder::{AudioDecoder, AudioDecoderCallbacks, AudioDecoderOptions};
15use crate::graph::{AudioGraph, InputPort, NodeId, OutputPort, PortId};
16use crate::node::{AudioNodeInit, AudioNodeMessage, ChannelInfo};
17use crate::render_thread::{AudioRenderThread, AudioRenderThreadMsg, SinkEosCallback};
18use crate::sink::AudioSinkError;
19
20/// Describes the state of the audio context on the control thread.
21#[derive(Clone, Copy, Debug, PartialEq, MallocSizeOf)]
22pub enum ProcessingState {
23    /// The audio context is suspended (context time is not proceeding,
24    /// audio hardware may be powered down/released).
25    Suspended,
26    /// Audio is being processed.
27    Running,
28    /// The audio context has been released, and can no longer be used
29    /// to process audio.
30    Closed,
31}
32
33pub type StateChangeResult = Option<()>;
34
35/// Identify the type of playback, which affects tradeoffs between audio output
36/// and power consumption.
37#[derive(Copy, Clone)]
38pub enum LatencyCategory {
39    /// Balance audio output latency and power consumption.
40    Balanced,
41    /// Provide the lowest audio output latency possible without glitching.
42    Interactive,
43    /// Prioritize sustained playback without interruption over audio output latency.
44    /// Lowest power consumption.
45    Playback,
46}
47
48/// User-specified options for a real time audio context.
49#[derive(Copy, Clone)]
50pub struct RealTimeAudioContextOptions {
51    /// Number of samples that will play in one second, measured in Hz.
52    pub sample_rate: f32,
53    /// Type of playback.
54    pub latency_hint: LatencyCategory,
55}
56
57impl Default for RealTimeAudioContextOptions {
58    fn default() -> Self {
59        Self {
60            sample_rate: 44100.,
61            latency_hint: LatencyCategory::Interactive,
62        }
63    }
64}
65
66/// User-specified options for an offline audio context.
67#[derive(Copy, Clone)]
68pub struct OfflineAudioContextOptions {
69    /// The number of channels for this offline audio context.
70    pub channels: u8,
71    /// The length of the rendered audio buffer in sample-frames.
72    pub length: usize,
73    /// Number of samples that will be rendered in one second, measured in Hz.
74    pub sample_rate: f32,
75}
76
77impl Default for OfflineAudioContextOptions {
78    fn default() -> Self {
79        Self {
80            channels: 1,
81            length: 0,
82            sample_rate: 44100.,
83        }
84    }
85}
86
87impl From<RealTimeAudioContextOptions> for AudioContextOptions {
88    fn from(options: RealTimeAudioContextOptions) -> Self {
89        AudioContextOptions::RealTimeAudioContext(options)
90    }
91}
92
93impl From<OfflineAudioContextOptions> for AudioContextOptions {
94    fn from(options: OfflineAudioContextOptions) -> Self {
95        AudioContextOptions::OfflineAudioContext(options)
96    }
97}
98
99/// User-specified options for a real time or offline audio context.
100#[derive(Copy, Clone)]
101pub enum AudioContextOptions {
102    RealTimeAudioContext(RealTimeAudioContextOptions),
103    OfflineAudioContext(OfflineAudioContextOptions),
104}
105
106impl Default for AudioContextOptions {
107    fn default() -> Self {
108        AudioContextOptions::RealTimeAudioContext(Default::default())
109    }
110}
111
112/// Representation of an audio context on the control thread.
113#[derive(MallocSizeOf)]
114pub struct AudioContext {
115    /// Media instance ID.
116    id: usize,
117    /// Client context ID.
118    client_context_id: ClientContextId,
119    /// Owner backend communication channel.
120    #[conditional_malloc_size_of]
121    backend_chan: Arc<Mutex<Sender<BackendMsg>>>,
122    /// Rendering thread communication channel.
123    sender: Sender<AudioRenderThreadMsg>,
124    /// State of the audio context on the control thread.
125    state: Cell<ProcessingState>,
126    /// Number of samples that will be played in one second.
127    sample_rate: f32,
128    /// The identifier of an AudioDestinationNode with a single input
129    /// representing the final destination for all audio.
130    dest_node: NodeId,
131    listener: NodeId,
132    #[ignore_malloc_size_of = "Fn"]
133    make_decoder: Arc<dyn Fn() -> Box<dyn AudioDecoder> + Sync + Send>,
134}
135
136#[derive(Debug)]
137pub struct AudioContextError;
138
139impl std::fmt::Display for AudioContextError {
140    fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
141        write!(formatter, "AudioContextError")
142    }
143}
144
145impl std::error::Error for AudioContextError {}
146
147impl AudioContext {
148    /// Constructs a new audio context.
149    pub fn new<B: AudioBackend>(
150        id: usize,
151        client_context_id: &ClientContextId,
152        backend_chan: Arc<Mutex<Sender<BackendMsg>>>,
153        options: AudioContextOptions,
154    ) -> Result<Self, AudioSinkError> {
155        let (sample_rate, channels) = match options {
156            AudioContextOptions::RealTimeAudioContext(ref options) => (options.sample_rate, 2),
157            AudioContextOptions::OfflineAudioContext(ref options) => {
158                (options.sample_rate, options.channels)
159            },
160        };
161
162        let (sender, receiver) = mpsc::channel();
163        let sender_ = sender.clone();
164        let graph = AudioGraph::new(channels);
165        let dest_node = graph.dest_id();
166        let listener = graph.listener_id();
167
168        let (init_sender, init_receiver) = mpsc::channel();
169        Builder::new()
170            .name("AudioRenderThread".to_owned())
171            .spawn(move || {
172                AudioRenderThread::start::<B>(
173                    receiver,
174                    sender_,
175                    sample_rate,
176                    graph,
177                    options,
178                    init_sender,
179                )
180            })
181            .expect("Failed to spawn AudioRenderThread");
182
183        init_receiver
184            .recv()
185            .expect("Failed to receive result from AudioRenderThread")?;
186        Ok(Self {
187            id,
188            client_context_id: *client_context_id,
189            backend_chan,
190            sender,
191            state: Cell::new(ProcessingState::Suspended),
192            sample_rate,
193            dest_node,
194            listener,
195            make_decoder: Arc::new(|| B::make_decoder()),
196        })
197    }
198
199    pub fn state(&self) -> ProcessingState {
200        self.state.get()
201    }
202
203    pub fn dest_node(&self) -> NodeId {
204        self.dest_node
205    }
206
207    pub fn listener(&self) -> NodeId {
208        self.listener
209    }
210
211    pub fn current_time(&self) -> f64 {
212        let (tx, rx) = mpsc::channel();
213        let _ = self.sender.send(AudioRenderThreadMsg::GetCurrentTime(tx));
214        rx.recv().unwrap()
215    }
216
217    pub fn create_node(&self, node_type: AudioNodeInit, ch: ChannelInfo) -> Option<NodeId> {
218        let (tx, rx) = mpsc::channel();
219        let _ = self
220            .sender
221            .send(AudioRenderThreadMsg::CreateNode(node_type, tx, ch));
222        rx.recv().ok()
223    }
224
225    // Resume audio processing.
226    make_state_change!(resume, Running, Resume);
227
228    // Suspend audio processing.
229    make_state_change!(suspend, Suspended, Suspend);
230
231    // Stop audio processing and close render thread.
232    make_state_change!(close, Closed, Close);
233
234    pub fn message_node(&self, id: NodeId, msg: AudioNodeMessage) {
235        let _ = self.sender.send(AudioRenderThreadMsg::MessageNode(id, msg));
236    }
237
238    pub fn connect_ports(&self, from: PortId<OutputPort>, to: PortId<InputPort>) {
239        let _ = self
240            .sender
241            .send(AudioRenderThreadMsg::ConnectPorts(from, to));
242    }
243
244    pub fn disconnect_all_from(&self, node: NodeId) {
245        let _ = self
246            .sender
247            .send(AudioRenderThreadMsg::DisconnectAllFrom(node));
248    }
249
250    /// Disconnect all outgoing connections from a node's output
251    ///
252    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output>>
253    pub fn disconnect_output(&self, out: PortId<OutputPort>) {
254        let _ = self
255            .sender
256            .send(AudioRenderThreadMsg::DisconnectOutput(out));
257    }
258
259    /// Disconnect connections from a node to another node
260    ///
261    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode>
262    pub fn disconnect_between(&self, from: NodeId, to: NodeId) {
263        let _ = self
264            .sender
265            .send(AudioRenderThreadMsg::DisconnectBetween(from, to));
266    }
267
268    /// Disconnect connections from a node to another node's input
269    ///
270    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationparam>
271    pub fn disconnect_to(&self, from: NodeId, to: PortId<InputPort>) {
272        let _ = self
273            .sender
274            .send(AudioRenderThreadMsg::DisconnectTo(from, to));
275    }
276
277    /// Disconnect all outgoing connections from a node's output to another node
278    ///
279    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output>
280    pub fn disconnect_output_between(&self, out: PortId<OutputPort>, to: NodeId) {
281        let _ = self
282            .sender
283            .send(AudioRenderThreadMsg::DisconnectOutputBetween(out, to));
284    }
285
286    /// Disconnect all outgoing connections from a node's output to another node's input
287    ///
288    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input>
289    pub fn disconnect_output_between_to(&self, out: PortId<OutputPort>, inp: PortId<InputPort>) {
290        let _ = self
291            .sender
292            .send(AudioRenderThreadMsg::DisconnectOutputBetweenTo(out, inp));
293    }
294
295    /// Asynchronously decodes the audio file data contained in the given
296    /// buffer.
297    pub fn decode_audio_data(&self, data: Vec<u8>, callbacks: AudioDecoderCallbacks) {
298        let options = AudioDecoderOptions {
299            sample_rate: self.sample_rate,
300        };
301        let make_decoder = self.make_decoder.clone();
302        Builder::new()
303            .name("AudioDecoder".to_owned())
304            .spawn(move || {
305                let audio_decoder = make_decoder();
306
307                audio_decoder.decode(data, callbacks, Some(options));
308            })
309            .unwrap();
310    }
311
312    pub fn set_eos_callback(&self, callback: SinkEosCallback) {
313        let _ = self
314            .sender
315            .send(AudioRenderThreadMsg::SetSinkEosCallback(callback));
316    }
317
318    fn set_mute(&self, val: bool) {
319        let _ = self.sender.send(AudioRenderThreadMsg::SetMute(val));
320    }
321}
322
323impl Drop for AudioContext {
324    fn drop(&mut self) {
325        let (tx, _) = mpsc::channel();
326        let _ = self.sender.send(AudioRenderThreadMsg::Close(tx));
327
328        // Ask the backend to unregister this instance and wait for ACK
329        let (tx_ack, rx_ack) = mpsc::channel();
330        let _ = self
331            .backend_chan
332            .lock()
333            .unwrap()
334            .send(BackendMsg::Shutdown {
335                context: self.client_context_id,
336                id: self.id,
337                tx_ack,
338            });
339        let _ = rx_ack.recv();
340    }
341}
342
343impl MediaInstance for AudioContext {
344    fn get_id(&self) -> usize {
345        self.id
346    }
347
348    fn mute(&self, val: bool) -> Result<(), MediaInstanceError> {
349        self.set_mute(val);
350        Ok(())
351    }
352
353    fn suspend(&self) -> Result<(), MediaInstanceError> {
354        let (tx, _) = mpsc::channel();
355        self.sender
356            .send(AudioRenderThreadMsg::Suspend(tx))
357            .map_err(|_| MediaInstanceError)
358    }
359
360    fn resume(&self) -> Result<(), MediaInstanceError> {
361        let (tx, _) = mpsc::channel();
362        self.sender
363            .send(AudioRenderThreadMsg::Resume(tx))
364            .map_err(|_| MediaInstanceError)
365    }
366}