script/dom/audio/
audionode.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6
7use dom_struct::dom_struct;
8use script_bindings::codegen::InheritTypes::{
9    AudioNodeTypeId, AudioScheduledSourceNodeTypeId, EventTargetTypeId,
10};
11use servo_media::audio::graph::NodeId;
12use servo_media::audio::node::{
13    AudioNodeInit, AudioNodeMessage, ChannelCountMode as ServoMediaChannelCountMode, ChannelInfo,
14    ChannelInterpretation as ServoMediaChannelInterpretation,
15};
16
17use crate::conversions::Convert;
18use crate::dom::audio::audioparam::AudioParam;
19use crate::dom::audio::baseaudiocontext::BaseAudioContext;
20use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
21    AudioNodeMethods, AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
22};
23use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
24use crate::dom::bindings::inheritance::Castable;
25use crate::dom::bindings::root::{Dom, DomRoot};
26use crate::dom::eventtarget::EventTarget;
27
28// 32 is the minimum required by the spec for createBuffer() and the deprecated
29// createScriptProcessor() and matches what is used by Blink and Gecko.
30// The limit protects against large memory allocations.
31pub(crate) const MAX_CHANNEL_COUNT: u32 = 32;
32
33#[dom_struct]
34pub(crate) struct AudioNode {
35    eventtarget: EventTarget,
36    #[ignore_malloc_size_of = "servo_media"]
37    #[no_trace]
38    node_id: NodeId,
39    context: Dom<BaseAudioContext>,
40    number_of_inputs: u32,
41    number_of_outputs: u32,
42    channel_count: Cell<u32>,
43    channel_count_mode: Cell<ChannelCountMode>,
44    channel_interpretation: Cell<ChannelInterpretation>,
45}
46
47impl AudioNode {
48    pub(crate) fn new_inherited(
49        node_type: AudioNodeInit,
50        context: &BaseAudioContext,
51        options: UnwrappedAudioNodeOptions,
52        number_of_inputs: u32,
53        number_of_outputs: u32,
54    ) -> Fallible<AudioNode> {
55        if options.count == 0 || options.count > MAX_CHANNEL_COUNT {
56            return Err(Error::NotSupported);
57        }
58        let ch = ChannelInfo {
59            count: options.count as u8,
60            mode: options.mode.convert(),
61            interpretation: options.interpretation.convert(),
62            context_channel_count: context.channel_count() as u8,
63        };
64        let node_id = context
65            .audio_context_impl()
66            .lock()
67            .unwrap()
68            .create_node(node_type, ch);
69        Ok(AudioNode::new_inherited_for_id(
70            node_id,
71            context,
72            options,
73            number_of_inputs,
74            number_of_outputs,
75        ))
76    }
77
78    pub(crate) fn new_inherited_for_id(
79        node_id: NodeId,
80        context: &BaseAudioContext,
81        options: UnwrappedAudioNodeOptions,
82        number_of_inputs: u32,
83        number_of_outputs: u32,
84    ) -> AudioNode {
85        AudioNode {
86            eventtarget: EventTarget::new_inherited(),
87            node_id,
88            context: Dom::from_ref(context),
89            number_of_inputs,
90            number_of_outputs,
91            channel_count: Cell::new(options.count),
92            channel_count_mode: Cell::new(options.mode),
93            channel_interpretation: Cell::new(options.interpretation),
94        }
95    }
96
97    pub(crate) fn message(&self, message: AudioNodeMessage) {
98        self.context
99            .audio_context_impl()
100            .lock()
101            .unwrap()
102            .message_node(self.node_id, message);
103    }
104
105    pub(crate) fn node_id(&self) -> NodeId {
106        self.node_id
107    }
108}
109
110impl AudioNodeMethods<crate::DomTypeHolder> for AudioNode {
111    // https://webaudio.github.io/web-audio-api/#dom-audionode-connect
112    fn Connect(
113        &self,
114        destination: &AudioNode,
115        output: u32,
116        input: u32,
117    ) -> Fallible<DomRoot<AudioNode>> {
118        if self.context != destination.context {
119            return Err(Error::InvalidAccess);
120        }
121
122        if output >= self.NumberOfOutputs() || input >= destination.NumberOfInputs() {
123            return Err(Error::IndexSize);
124        }
125
126        // servo-media takes care of ignoring duplicated connections.
127
128        self.context
129            .audio_context_impl()
130            .lock()
131            .unwrap()
132            .connect_ports(
133                self.node_id().output(output),
134                destination.node_id().input(input),
135            );
136
137        Ok(DomRoot::from_ref(destination))
138    }
139
140    // https://webaudio.github.io/web-audio-api/#dom-audionode-connect-destinationparam-output
141    fn Connect_(&self, dest: &AudioParam, output: u32) -> Fallible<()> {
142        if self.context != dest.context() {
143            return Err(Error::InvalidAccess);
144        }
145
146        if output >= self.NumberOfOutputs() {
147            return Err(Error::IndexSize);
148        }
149
150        // servo-media takes care of ignoring duplicated connections.
151
152        self.context
153            .audio_context_impl()
154            .lock()
155            .unwrap()
156            .connect_ports(
157                self.node_id().output(output),
158                dest.node_id().param(dest.param_type()),
159            );
160
161        Ok(())
162    }
163
164    // https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect
165    fn Disconnect(&self) -> ErrorResult {
166        self.context
167            .audio_context_impl()
168            .lock()
169            .unwrap()
170            .disconnect_all_from(self.node_id());
171        Ok(())
172    }
173
174    // https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output
175    fn Disconnect_(&self, out: u32) -> ErrorResult {
176        self.context
177            .audio_context_impl()
178            .lock()
179            .unwrap()
180            .disconnect_output(self.node_id().output(out));
181        Ok(())
182    }
183
184    // https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode
185    fn Disconnect__(&self, to: &AudioNode) -> ErrorResult {
186        self.context
187            .audio_context_impl()
188            .lock()
189            .unwrap()
190            .disconnect_between(self.node_id(), to.node_id());
191        Ok(())
192    }
193
194    // https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output
195    fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult {
196        self.context
197            .audio_context_impl()
198            .lock()
199            .unwrap()
200            .disconnect_output_between(self.node_id().output(out), to.node_id());
201        Ok(())
202    }
203
204    // https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input
205    fn Disconnect____(&self, to: &AudioNode, out: u32, inp: u32) -> ErrorResult {
206        self.context
207            .audio_context_impl()
208            .lock()
209            .unwrap()
210            .disconnect_output_between_to(self.node_id().output(out), to.node_id().input(inp));
211        Ok(())
212    }
213
214    // https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect
215    fn Disconnect_____(&self, param: &AudioParam) -> ErrorResult {
216        self.context
217            .audio_context_impl()
218            .lock()
219            .unwrap()
220            .disconnect_to(self.node_id(), param.node_id().param(param.param_type()));
221        Ok(())
222    }
223
224    // https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect
225    fn Disconnect______(&self, param: &AudioParam, out: u32) -> ErrorResult {
226        self.context
227            .audio_context_impl()
228            .lock()
229            .unwrap()
230            .disconnect_output_between_to(
231                self.node_id().output(out),
232                param.node_id().param(param.param_type()),
233            );
234        Ok(())
235    }
236
237    // https://webaudio.github.io/web-audio-api/#dom-audionode-context
238    fn Context(&self) -> DomRoot<BaseAudioContext> {
239        DomRoot::from_ref(&self.context)
240    }
241
242    // https://webaudio.github.io/web-audio-api/#dom-audionode-numberofinputs
243    fn NumberOfInputs(&self) -> u32 {
244        self.number_of_inputs
245    }
246
247    // https://webaudio.github.io/web-audio-api/#dom-audionode-numberofoutputs
248    fn NumberOfOutputs(&self) -> u32 {
249        self.number_of_outputs
250    }
251
252    // https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount
253    fn ChannelCount(&self) -> u32 {
254        self.channel_count.get()
255    }
256
257    // https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount
258    fn SetChannelCount(&self, value: u32) -> ErrorResult {
259        match self.upcast::<EventTarget>().type_id() {
260            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
261                if self.context.is_offline() {
262                    return Err(Error::InvalidState);
263                } else if !(1..=MAX_CHANNEL_COUNT).contains(&value) {
264                    return Err(Error::IndexSize);
265                }
266            },
267            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
268                if value > 2 {
269                    return Err(Error::NotSupported);
270                }
271            },
272            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
273                AudioScheduledSourceNodeTypeId::StereoPannerNode,
274            )) => {
275                if value > 2 {
276                    return Err(Error::NotSupported);
277                }
278            },
279            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
280                return Err(Error::InvalidState);
281            },
282            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
283                return Err(Error::InvalidState);
284            },
285            // XXX We do not support any of the other AudioNodes with
286            // constraints yet. Add more cases here as we add support
287            // for new AudioNodes.
288            _ => (),
289        };
290
291        if value == 0 || value > MAX_CHANNEL_COUNT {
292            return Err(Error::NotSupported);
293        }
294
295        self.channel_count.set(value);
296        self.message(AudioNodeMessage::SetChannelCount(value as u8));
297        Ok(())
298    }
299
300    // https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode
301    fn ChannelCountMode(&self) -> ChannelCountMode {
302        self.channel_count_mode.get()
303    }
304
305    // https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode
306    fn SetChannelCountMode(&self, value: ChannelCountMode) -> ErrorResult {
307        // Channel count mode has no effect for nodes with no inputs.
308        if self.number_of_inputs == 0 {
309            return Ok(());
310        }
311
312        match self.upcast::<EventTarget>().type_id() {
313            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
314                if self.context.is_offline() {
315                    return Err(Error::InvalidState);
316                }
317            },
318            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
319                if value == ChannelCountMode::Max {
320                    return Err(Error::NotSupported);
321                }
322            },
323            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
324                AudioScheduledSourceNodeTypeId::StereoPannerNode,
325            )) => {
326                if value == ChannelCountMode::Max {
327                    return Err(Error::NotSupported);
328                }
329            },
330            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
331                return Err(Error::InvalidState);
332            },
333            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
334                return Err(Error::InvalidState);
335            },
336            // XXX We do not support any of the other AudioNodes with
337            // constraints yet. Add more cases here as we add support
338            // for new AudioNodes.
339            _ => (),
340        };
341
342        self.channel_count_mode.set(value);
343        self.message(AudioNodeMessage::SetChannelMode(value.convert()));
344        Ok(())
345    }
346
347    // https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation
348    fn ChannelInterpretation(&self) -> ChannelInterpretation {
349        self.channel_interpretation.get()
350    }
351
352    // https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation
353    fn SetChannelInterpretation(&self, value: ChannelInterpretation) -> ErrorResult {
354        // Channel interpretation mode has no effect for nodes with no inputs.
355        if self.number_of_inputs == 0 {
356            return Ok(());
357        }
358
359        if let EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) =
360            self.upcast::<EventTarget>().type_id()
361        {
362            return Err(Error::InvalidState);
363        };
364
365        self.channel_interpretation.set(value);
366        self.message(AudioNodeMessage::SetChannelInterpretation(value.convert()));
367        Ok(())
368    }
369}
370
371impl Convert<ServoMediaChannelCountMode> for ChannelCountMode {
372    fn convert(self) -> ServoMediaChannelCountMode {
373        match self {
374            ChannelCountMode::Max => ServoMediaChannelCountMode::Max,
375            ChannelCountMode::Clamped_max => ServoMediaChannelCountMode::ClampedMax,
376            ChannelCountMode::Explicit => ServoMediaChannelCountMode::Explicit,
377        }
378    }
379}
380
381impl Convert<ServoMediaChannelInterpretation> for ChannelInterpretation {
382    fn convert(self) -> ServoMediaChannelInterpretation {
383        match self {
384            ChannelInterpretation::Discrete => ServoMediaChannelInterpretation::Discrete,
385            ChannelInterpretation::Speakers => ServoMediaChannelInterpretation::Speakers,
386        }
387    }
388}
389
390pub(crate) trait AudioNodeOptionsHelper {
391    fn unwrap_or(
392        &self,
393        count: u32,
394        mode: ChannelCountMode,
395        interpretation: ChannelInterpretation,
396    ) -> UnwrappedAudioNodeOptions;
397}
398
399impl AudioNodeOptionsHelper for AudioNodeOptions {
400    fn unwrap_or(
401        &self,
402        count: u32,
403        mode: ChannelCountMode,
404        interpretation: ChannelInterpretation,
405    ) -> UnwrappedAudioNodeOptions {
406        UnwrappedAudioNodeOptions {
407            count: self.channelCount.unwrap_or(count),
408            mode: self.channelCountMode.unwrap_or(mode),
409            interpretation: self.channelInterpretation.unwrap_or(interpretation),
410        }
411    }
412}
413
414/// Each node has a set of defaults, so this lets us work with them
415/// easily without having to deal with the Options
416pub(crate) struct UnwrappedAudioNodeOptions {
417    pub(crate) count: u32,
418    pub(crate) mode: ChannelCountMode,
419    pub(crate) interpretation: ChannelInterpretation,
420}
421
422impl Default for UnwrappedAudioNodeOptions {
423    fn default() -> Self {
424        UnwrappedAudioNodeOptions {
425            count: 2,
426            mode: ChannelCountMode::Max,
427            interpretation: ChannelInterpretation::Speakers,
428        }
429    }
430}