1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
use decoder::{AudioDecoder, AudioDecoderCallbacks, AudioDecoderOptions};
use graph::{AudioGraph, InputPort, NodeId, OutputPort, PortId};
use node::{AudioNodeInit, AudioNodeMessage, ChannelInfo};
use render_thread::AudioRenderThread;
use render_thread::AudioRenderThreadMsg;
use servo_media_traits::{BackendMsg, ClientContextId, MediaInstance};
use std::cell::Cell;
use std::sync::mpsc::{self, Sender};
use std::sync::{Arc, Mutex};
use std::thread::Builder;
use AudioBackend;

use crate::sink::AudioSinkError;

/// Describes the state of the audio context on the control thread.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ProcessingState {
    /// The audio context is suspended (context time is not proceeding,
    /// audio hardware may be powered down/released).
    Suspended,
    /// Audio is being processed.
    Running,
    /// The audio context has been released, and can no longer be used
    /// to process audio.
    Closed,
}

pub type StateChangeResult = Result<(), ()>;

/// Identify the type of playback, which affects tradeoffs between audio output
/// and power consumption.
#[derive(Copy, Clone)]
pub enum LatencyCategory {
    /// Balance audio output latency and power consumption.
    Balanced,
    /// Provide the lowest audio output latency possible without glitching.
    Interactive,
    /// Prioritize sustained playback without interruption over audio output latency.
    /// Lowest power consumption.
    Playback,
}

/// User-specified options for a real time audio context.
#[derive(Copy, Clone)]
pub struct RealTimeAudioContextOptions {
    /// Number of samples that will play in one second, measured in Hz.
    pub sample_rate: f32,
    /// Type of playback.
    pub latency_hint: LatencyCategory,
}

impl Default for RealTimeAudioContextOptions {
    fn default() -> Self {
        Self {
            sample_rate: 44100.,
            latency_hint: LatencyCategory::Interactive,
        }
    }
}

/// User-specified options for an offline audio context.
#[derive(Copy, Clone)]
pub struct OfflineAudioContextOptions {
    /// The number of channels for this offline audio context.
    pub channels: u8,
    /// The length of the rendered audio buffer in sample-frames.
    pub length: usize,
    /// Number of samples that will be rendered in one second, measured in Hz.
    pub sample_rate: f32,
}

impl Default for OfflineAudioContextOptions {
    fn default() -> Self {
        Self {
            channels: 1,
            length: 0,
            sample_rate: 44100.,
        }
    }
}

impl From<RealTimeAudioContextOptions> for AudioContextOptions {
    fn from(options: RealTimeAudioContextOptions) -> Self {
        AudioContextOptions::RealTimeAudioContext(options)
    }
}

impl From<OfflineAudioContextOptions> for AudioContextOptions {
    fn from(options: OfflineAudioContextOptions) -> Self {
        AudioContextOptions::OfflineAudioContext(options)
    }
}

/// User-specified options for a real time or offline audio context.
#[derive(Copy, Clone)]
pub enum AudioContextOptions {
    RealTimeAudioContext(RealTimeAudioContextOptions),
    OfflineAudioContext(OfflineAudioContextOptions),
}

impl Default for AudioContextOptions {
    fn default() -> Self {
        AudioContextOptions::RealTimeAudioContext(Default::default())
    }
}

/// Representation of an audio context on the control thread.
pub struct AudioContext {
    /// Media instance ID.
    id: usize,
    /// Client context ID.
    client_context_id: ClientContextId,
    /// Owner backend communication channel.
    backend_chan: Arc<Mutex<Sender<BackendMsg>>>,
    /// Rendering thread communication channel.
    sender: Sender<AudioRenderThreadMsg>,
    /// State of the audio context on the control thread.
    state: Cell<ProcessingState>,
    /// Number of samples that will be played in one second.
    sample_rate: f32,
    /// The identifier of an AudioDestinationNode with a single input
    /// representing the final destination for all audio.
    dest_node: NodeId,
    listener: NodeId,
    make_decoder: Arc<(dyn Fn() -> Box<dyn AudioDecoder> + Sync + Send)>,
}

impl AudioContext {
    /// Constructs a new audio context.
    pub fn new<B: AudioBackend>(
        id: usize,
        client_context_id: &ClientContextId,
        backend_chan: Arc<Mutex<Sender<BackendMsg>>>,
        options: AudioContextOptions,
    ) -> Result<Self, AudioSinkError> {
        let (sample_rate, channels) = match options {
            AudioContextOptions::RealTimeAudioContext(ref options) => (options.sample_rate, 2),
            AudioContextOptions::OfflineAudioContext(ref options) => {
                (options.sample_rate, options.channels)
            }
        };

        let (sender, receiver) = mpsc::channel();
        let sender_ = sender.clone();
        let graph = AudioGraph::new(channels);
        let dest_node = graph.dest_id();
        let listener = graph.listener_id();

        let (init_sender, init_receiver) = mpsc::channel();
        Builder::new()
            .name("AudioRenderThread".to_owned())
            .spawn(move || {
                AudioRenderThread::start::<B>(
                    receiver,
                    sender_,
                    sample_rate,
                    graph,
                    options,
                    init_sender,
                )
            })
            .expect("Failed to spawn AudioRenderThread");

        let init_thread_result = init_receiver
            .recv()
            .expect("Failed to receive result from AudioRenderThread");

        if let Err(e) = init_thread_result {
            return Err(e);
        }

        Ok(Self {
            id,
            client_context_id: *client_context_id,
            backend_chan,
            sender,
            state: Cell::new(ProcessingState::Suspended),
            sample_rate,
            dest_node,
            listener,
            make_decoder: Arc::new(|| B::make_decoder()),
        })
    }

    pub fn state(&self) -> ProcessingState {
        self.state.get()
    }

    pub fn dest_node(&self) -> NodeId {
        self.dest_node
    }

    pub fn listener(&self) -> NodeId {
        self.listener
    }

    pub fn current_time(&self) -> f64 {
        let (tx, rx) = mpsc::channel();
        let _ = self.sender.send(AudioRenderThreadMsg::GetCurrentTime(tx));
        rx.recv().unwrap()
    }

    pub fn create_node(&self, node_type: AudioNodeInit, ch: ChannelInfo) -> NodeId {
        let (tx, rx) = mpsc::channel();
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::CreateNode(node_type, tx, ch));
        rx.recv().unwrap()
    }

    // Resume audio processing.
    make_state_change!(resume, Running, Resume);

    // Suspend audio processing.
    make_state_change!(suspend, Suspended, Suspend);

    // Stop audio processing and close render thread.
    make_state_change!(close, Closed, Close);

    pub fn message_node(&self, id: NodeId, msg: AudioNodeMessage) {
        let _ = self.sender.send(AudioRenderThreadMsg::MessageNode(id, msg));
    }

    pub fn connect_ports(&self, from: PortId<OutputPort>, to: PortId<InputPort>) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::ConnectPorts(from, to));
    }

    pub fn disconnect_all_from(&self, node: NodeId) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::DisconnectAllFrom(node));
    }

    // /// Disconnect all outgoing connections from a node's output
    // ///
    // /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output
    pub fn disconnect_output(&self, out: PortId<OutputPort>) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::DisconnectOutput(out));
    }

    /// Disconnect connections from a node to another node
    ///
    /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode
    pub fn disconnect_between(&self, from: NodeId, to: NodeId) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::DisconnectBetween(from, to));
    }

    /// Disconnect connections from a node to another node's input
    ///
    /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationparam
    pub fn disconnect_to(&self, from: NodeId, to: PortId<InputPort>) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::DisconnectTo(from, to));
    }

    /// Disconnect all outgoing connections from a node's output to another node
    ///
    /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output
    pub fn disconnect_output_between(&self, out: PortId<OutputPort>, to: NodeId) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::DisconnectOutputBetween(out, to));
    }

    // /// Disconnect all outgoing connections from a node's output to another node's input
    // ///
    // /// https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input
    pub fn disconnect_output_between_to(&self, out: PortId<OutputPort>, inp: PortId<InputPort>) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::DisconnectOutputBetweenTo(out, inp));
    }

    /// Asynchronously decodes the audio file data contained in the given
    /// buffer.
    pub fn decode_audio_data(&self, data: Vec<u8>, callbacks: AudioDecoderCallbacks) {
        let mut options = AudioDecoderOptions::default();
        options.sample_rate = self.sample_rate;
        let make_decoder = self.make_decoder.clone();
        Builder::new()
            .name("AudioDecoder".to_owned())
            .spawn(move || {
                let audio_decoder = make_decoder();

                audio_decoder.decode(data, callbacks, Some(options));
            })
            .unwrap();
    }

    pub fn set_eos_callback(
        &self,
        callback: Box<dyn Fn(Box<dyn AsRef<[f32]>>) + Send + Sync + 'static>,
    ) {
        let _ = self
            .sender
            .send(AudioRenderThreadMsg::SetSinkEosCallback(callback));
    }

    fn set_mute(&self, val: bool) {
        let _ = self.sender.send(AudioRenderThreadMsg::SetMute(val));
    }
}

impl Drop for AudioContext {
    fn drop(&mut self) {
        let (tx, _) = mpsc::channel();
        let _ = self.sender.send(AudioRenderThreadMsg::Close(tx));
        let _ = self
            .backend_chan
            .lock()
            .unwrap()
            .send(BackendMsg::Shutdown(self.client_context_id, self.id));
    }
}

impl MediaInstance for AudioContext {
    fn get_id(&self) -> usize {
        self.id
    }

    fn mute(&self, val: bool) -> Result<(), ()> {
        self.set_mute(val);
        Ok(())
    }

    fn suspend(&self) -> Result<(), ()> {
        let (tx, _) = mpsc::channel();
        self.sender
            .send(AudioRenderThreadMsg::Suspend(tx))
            .map_err(|_| ())
    }

    fn resume(&self) -> Result<(), ()> {
        let (tx, _) = mpsc::channel();
        self.sender
            .send(AudioRenderThreadMsg::Resume(tx))
            .map_err(|_| ())
    }
}