servo_media_audio/
context.rs

1use crate::decoder::{AudioDecoder, AudioDecoderCallbacks, AudioDecoderOptions};
2use crate::graph::{AudioGraph, InputPort, NodeId, OutputPort, PortId};
3use crate::node::{AudioNodeInit, AudioNodeMessage, ChannelInfo};
4use crate::render_thread::AudioRenderThread;
5use crate::render_thread::AudioRenderThreadMsg;
6use servo_media_traits::{BackendMsg, ClientContextId, MediaInstance};
7use std::cell::Cell;
8use std::sync::mpsc::{self, Sender};
9use std::sync::{Arc, Mutex};
10use std::thread::Builder;
11use crate::AudioBackend;
12
13use crate::sink::AudioSinkError;
14
15/// Describes the state of the audio context on the control thread.
16#[derive(Clone, Copy, Debug, PartialEq)]
17pub enum ProcessingState {
18    /// The audio context is suspended (context time is not proceeding,
19    /// audio hardware may be powered down/released).
20    Suspended,
21    /// Audio is being processed.
22    Running,
23    /// The audio context has been released, and can no longer be used
24    /// to process audio.
25    Closed,
26}
27
28pub type StateChangeResult = Result<(), ()>;
29
30/// Identify the type of playback, which affects tradeoffs between audio output
31/// and power consumption.
32#[derive(Copy, Clone)]
33pub enum LatencyCategory {
34    /// Balance audio output latency and power consumption.
35    Balanced,
36    /// Provide the lowest audio output latency possible without glitching.
37    Interactive,
38    /// Prioritize sustained playback without interruption over audio output latency.
39    /// Lowest power consumption.
40    Playback,
41}
42
43/// User-specified options for a real time audio context.
44#[derive(Copy, Clone)]
45pub struct RealTimeAudioContextOptions {
46    /// Number of samples that will play in one second, measured in Hz.
47    pub sample_rate: f32,
48    /// Type of playback.
49    pub latency_hint: LatencyCategory,
50}
51
52impl Default for RealTimeAudioContextOptions {
53    fn default() -> Self {
54        Self {
55            sample_rate: 44100.,
56            latency_hint: LatencyCategory::Interactive,
57        }
58    }
59}
60
61/// User-specified options for an offline audio context.
62#[derive(Copy, Clone)]
63pub struct OfflineAudioContextOptions {
64    /// The number of channels for this offline audio context.
65    pub channels: u8,
66    /// The length of the rendered audio buffer in sample-frames.
67    pub length: usize,
68    /// Number of samples that will be rendered in one second, measured in Hz.
69    pub sample_rate: f32,
70}
71
72impl Default for OfflineAudioContextOptions {
73    fn default() -> Self {
74        Self {
75            channels: 1,
76            length: 0,
77            sample_rate: 44100.,
78        }
79    }
80}
81
82impl From<RealTimeAudioContextOptions> for AudioContextOptions {
83    fn from(options: RealTimeAudioContextOptions) -> Self {
84        AudioContextOptions::RealTimeAudioContext(options)
85    }
86}
87
88impl From<OfflineAudioContextOptions> for AudioContextOptions {
89    fn from(options: OfflineAudioContextOptions) -> Self {
90        AudioContextOptions::OfflineAudioContext(options)
91    }
92}
93
94/// User-specified options for a real time or offline audio context.
95#[derive(Copy, Clone)]
96pub enum AudioContextOptions {
97    RealTimeAudioContext(RealTimeAudioContextOptions),
98    OfflineAudioContext(OfflineAudioContextOptions),
99}
100
101impl Default for AudioContextOptions {
102    fn default() -> Self {
103        AudioContextOptions::RealTimeAudioContext(Default::default())
104    }
105}
106
107/// Representation of an audio context on the control thread.
108pub struct AudioContext {
109    /// Media instance ID.
110    id: usize,
111    /// Client context ID.
112    client_context_id: ClientContextId,
113    /// Owner backend communication channel.
114    backend_chan: Arc<Mutex<Sender<BackendMsg>>>,
115    /// Rendering thread communication channel.
116    sender: Sender<AudioRenderThreadMsg>,
117    /// State of the audio context on the control thread.
118    state: Cell<ProcessingState>,
119    /// Number of samples that will be played in one second.
120    sample_rate: f32,
121    /// The identifier of an AudioDestinationNode with a single input
122    /// representing the final destination for all audio.
123    dest_node: NodeId,
124    listener: NodeId,
125    make_decoder: Arc<(dyn Fn() -> Box<dyn AudioDecoder> + Sync + Send)>,
126}
127
128impl AudioContext {
129    /// Constructs a new audio context.
130    pub fn new<B: AudioBackend>(
131        id: usize,
132        client_context_id: &ClientContextId,
133        backend_chan: Arc<Mutex<Sender<BackendMsg>>>,
134        options: AudioContextOptions,
135    ) -> Result<Self, AudioSinkError> {
136        let (sample_rate, channels) = match options {
137            AudioContextOptions::RealTimeAudioContext(ref options) => (options.sample_rate, 2),
138            AudioContextOptions::OfflineAudioContext(ref options) => {
139                (options.sample_rate, options.channels)
140            }
141        };
142
143        let (sender, receiver) = mpsc::channel();
144        let sender_ = sender.clone();
145        let graph = AudioGraph::new(channels);
146        let dest_node = graph.dest_id();
147        let listener = graph.listener_id();
148
149        let (init_sender, init_receiver) = mpsc::channel();
150        Builder::new()
151            .name("AudioRenderThread".to_owned())
152            .spawn(move || {
153                AudioRenderThread::start::<B>(
154                    receiver,
155                    sender_,
156                    sample_rate,
157                    graph,
158                    options,
159                    init_sender,
160                )
161            })
162            .expect("Failed to spawn AudioRenderThread");
163
164        let init_thread_result = init_receiver
165            .recv()
166            .expect("Failed to receive result from AudioRenderThread");
167
168        if let Err(e) = init_thread_result {
169            return Err(e);
170        }
171
172        Ok(Self {
173            id,
174            client_context_id: *client_context_id,
175            backend_chan,
176            sender,
177            state: Cell::new(ProcessingState::Suspended),
178            sample_rate,
179            dest_node,
180            listener,
181            make_decoder: Arc::new(|| B::make_decoder()),
182        })
183    }
184
185    pub fn state(&self) -> ProcessingState {
186        self.state.get()
187    }
188
189    pub fn dest_node(&self) -> NodeId {
190        self.dest_node
191    }
192
193    pub fn listener(&self) -> NodeId {
194        self.listener
195    }
196
197    pub fn current_time(&self) -> f64 {
198        let (tx, rx) = mpsc::channel();
199        let _ = self.sender.send(AudioRenderThreadMsg::GetCurrentTime(tx));
200        rx.recv().unwrap()
201    }
202
203    pub fn create_node(&self, node_type: AudioNodeInit, ch: ChannelInfo) -> NodeId {
204        let (tx, rx) = mpsc::channel();
205        let _ = self
206            .sender
207            .send(AudioRenderThreadMsg::CreateNode(node_type, tx, ch));
208        rx.recv().unwrap()
209    }
210
211    // Resume audio processing.
212    make_state_change!(resume, Running, Resume);
213
214    // Suspend audio processing.
215    make_state_change!(suspend, Suspended, Suspend);
216
217    // Stop audio processing and close render thread.
218    make_state_change!(close, Closed, Close);
219
220    pub fn message_node(&self, id: NodeId, msg: AudioNodeMessage) {
221        let _ = self.sender.send(AudioRenderThreadMsg::MessageNode(id, msg));
222    }
223
224    pub fn connect_ports(&self, from: PortId<OutputPort>, to: PortId<InputPort>) {
225        let _ = self
226            .sender
227            .send(AudioRenderThreadMsg::ConnectPorts(from, to));
228    }
229
230    pub fn disconnect_all_from(&self, node: NodeId) {
231        let _ = self
232            .sender
233            .send(AudioRenderThreadMsg::DisconnectAllFrom(node));
234    }
235
236    // /// Disconnect all outgoing connections from a node's output
237    // ///
238    // /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output
239    pub fn disconnect_output(&self, out: PortId<OutputPort>) {
240        let _ = self
241            .sender
242            .send(AudioRenderThreadMsg::DisconnectOutput(out));
243    }
244
245    /// Disconnect connections from a node to another node
246    ///
247    /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode
248    pub fn disconnect_between(&self, from: NodeId, to: NodeId) {
249        let _ = self
250            .sender
251            .send(AudioRenderThreadMsg::DisconnectBetween(from, to));
252    }
253
254    /// Disconnect connections from a node to another node's input
255    ///
256    /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationparam
257    pub fn disconnect_to(&self, from: NodeId, to: PortId<InputPort>) {
258        let _ = self
259            .sender
260            .send(AudioRenderThreadMsg::DisconnectTo(from, to));
261    }
262
263    /// Disconnect all outgoing connections from a node's output to another node
264    ///
265    /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output
266    pub fn disconnect_output_between(&self, out: PortId<OutputPort>, to: NodeId) {
267        let _ = self
268            .sender
269            .send(AudioRenderThreadMsg::DisconnectOutputBetween(out, to));
270    }
271
272    // /// Disconnect all outgoing connections from a node's output to another node's input
273    // ///
274    // /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input
275    pub fn disconnect_output_between_to(&self, out: PortId<OutputPort>, inp: PortId<InputPort>) {
276        let _ = self
277            .sender
278            .send(AudioRenderThreadMsg::DisconnectOutputBetweenTo(out, inp));
279    }
280
281    /// Asynchronously decodes the audio file data contained in the given
282    /// buffer.
283    pub fn decode_audio_data(&self, data: Vec<u8>, callbacks: AudioDecoderCallbacks) {
284        let mut options = AudioDecoderOptions::default();
285        options.sample_rate = self.sample_rate;
286        let make_decoder = self.make_decoder.clone();
287        Builder::new()
288            .name("AudioDecoder".to_owned())
289            .spawn(move || {
290                let audio_decoder = make_decoder();
291
292                audio_decoder.decode(data, callbacks, Some(options));
293            })
294            .unwrap();
295    }
296
297    pub fn set_eos_callback(
298        &self,
299        callback: Box<dyn Fn(Box<dyn AsRef<[f32]>>) + Send + Sync + 'static>,
300    ) {
301        let _ = self
302            .sender
303            .send(AudioRenderThreadMsg::SetSinkEosCallback(callback));
304    }
305
306    fn set_mute(&self, val: bool) {
307        let _ = self.sender.send(AudioRenderThreadMsg::SetMute(val));
308    }
309}
310
311impl Drop for AudioContext {
312    fn drop(&mut self) {
313        let (tx, _) = mpsc::channel();
314        let _ = self.sender.send(AudioRenderThreadMsg::Close(tx));
315
316        // Ask the backend to unregister this instance and wait for ACK
317        let (tx_ack, rx_ack) = mpsc::channel();
318        let _ = self
319            .backend_chan
320            .lock()
321            .unwrap()
322            .send(BackendMsg::Shutdown {
323                context: self.client_context_id,
324                id: self.id,
325                tx_ack,
326            });
327        let _ = rx_ack.recv();
328    }
329}
330
331impl MediaInstance for AudioContext {
332    fn get_id(&self) -> usize {
333        self.id
334    }
335
336    fn mute(&self, val: bool) -> Result<(), ()> {
337        self.set_mute(val);
338        Ok(())
339    }
340
341    fn suspend(&self) -> Result<(), ()> {
342        let (tx, _) = mpsc::channel();
343        self.sender
344            .send(AudioRenderThreadMsg::Suspend(tx))
345            .map_err(|_| ())
346    }
347
348    fn resume(&self) -> Result<(), ()> {
349        let (tx, _) = mpsc::channel();
350        self.sender
351            .send(AudioRenderThreadMsg::Resume(tx))
352            .map_err(|_| ())
353    }
354}