script/dom/audio/
audionode.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6
7use dom_struct::dom_struct;
8use log::warn;
9use script_bindings::codegen::InheritTypes::{
10    AudioNodeTypeId, AudioScheduledSourceNodeTypeId, EventTargetTypeId,
11};
12use servo_media::audio::graph::NodeId;
13use servo_media::audio::node::{
14    AudioNodeInit, AudioNodeMessage, ChannelCountMode as ServoMediaChannelCountMode, ChannelInfo,
15    ChannelInterpretation as ServoMediaChannelInterpretation,
16};
17
18use crate::conversions::Convert;
19use crate::dom::audio::audioparam::AudioParam;
20use crate::dom::audio::baseaudiocontext::BaseAudioContext;
21use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
22    AudioNodeMethods, AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
23};
24use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
25use crate::dom::bindings::inheritance::Castable;
26use crate::dom::bindings::reflector::DomGlobal;
27use crate::dom::bindings::root::{Dom, DomRoot};
28use crate::dom::console::Console;
29use crate::dom::eventtarget::EventTarget;
30
31// 32 is the minimum required by the spec for createBuffer() and the deprecated
32// createScriptProcessor() and matches what is used by Blink and Gecko.
33// The limit protects against large memory allocations.
34pub(crate) const MAX_CHANNEL_COUNT: u32 = 32;
35
36#[dom_struct]
37pub(crate) struct AudioNode {
38    eventtarget: EventTarget,
39    #[ignore_malloc_size_of = "servo_media"]
40    #[no_trace]
41    node_id: Option<NodeId>,
42    context: Dom<BaseAudioContext>,
43    number_of_inputs: u32,
44    number_of_outputs: u32,
45    channel_count: Cell<u32>,
46    channel_count_mode: Cell<ChannelCountMode>,
47    channel_interpretation: Cell<ChannelInterpretation>,
48}
49
50impl AudioNode {
51    pub(crate) fn new_inherited(
52        node_type: AudioNodeInit,
53        context: &BaseAudioContext,
54        options: UnwrappedAudioNodeOptions,
55        number_of_inputs: u32,
56        number_of_outputs: u32,
57    ) -> Fallible<AudioNode> {
58        if options.count == 0 || options.count > MAX_CHANNEL_COUNT {
59            return Err(Error::NotSupported(None));
60        }
61        let ch = ChannelInfo {
62            count: options.count as u8,
63            mode: options.mode.convert(),
64            interpretation: options.interpretation.convert(),
65            context_channel_count: context.channel_count() as u8,
66        };
67        let node_id = context
68            .audio_context_impl()
69            .lock()
70            .unwrap()
71            .create_node(node_type, ch);
72
73        if node_id.is_none() {
74            // Follow Chromuim and Gecko, we just warn and create an inert AudioNode.
75            const MESSAGE: &str =
76                "Failed to create an AudioNode backend. The constructed AudioNode will be inert.";
77            warn!("{MESSAGE}");
78            Console::internal_warn(&context.global(), MESSAGE.to_string());
79        }
80
81        Ok(AudioNode::new_inherited_for_id(
82            node_id,
83            context,
84            options,
85            number_of_inputs,
86            number_of_outputs,
87        ))
88    }
89
90    pub(crate) fn new_inherited_for_id(
91        node_id: Option<NodeId>,
92        context: &BaseAudioContext,
93        options: UnwrappedAudioNodeOptions,
94        number_of_inputs: u32,
95        number_of_outputs: u32,
96    ) -> AudioNode {
97        AudioNode {
98            eventtarget: EventTarget::new_inherited(),
99            node_id,
100            context: Dom::from_ref(context),
101            number_of_inputs,
102            number_of_outputs,
103            channel_count: Cell::new(options.count),
104            channel_count_mode: Cell::new(options.mode),
105            channel_interpretation: Cell::new(options.interpretation),
106        }
107    }
108
109    pub(crate) fn message(&self, message: AudioNodeMessage) {
110        if let Some(node_id) = self.node_id {
111            self.context
112                .audio_context_impl()
113                .lock()
114                .unwrap()
115                .message_node(node_id, message);
116        }
117    }
118
119    pub(crate) fn node_id(&self) -> Option<NodeId> {
120        self.node_id
121    }
122}
123
124impl AudioNodeMethods<crate::DomTypeHolder> for AudioNode {
125    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect>
126    fn Connect(
127        &self,
128        destination: &AudioNode,
129        output: u32,
130        input: u32,
131    ) -> Fallible<DomRoot<AudioNode>> {
132        if self.context != destination.context {
133            return Err(Error::InvalidAccess(None));
134        }
135
136        if output >= self.NumberOfOutputs() || input >= destination.NumberOfInputs() {
137            return Err(Error::IndexSize(None));
138        }
139
140        // servo-media takes care of ignoring duplicated connections.
141
142        let Some(source_id) = self.node_id() else {
143            return Ok(DomRoot::from_ref(destination));
144        };
145        let Some(dest_id) = destination.node_id() else {
146            return Ok(DomRoot::from_ref(destination));
147        };
148
149        self.context
150            .audio_context_impl()
151            .lock()
152            .unwrap()
153            .connect_ports(source_id.output(output), dest_id.input(input));
154
155        Ok(DomRoot::from_ref(destination))
156    }
157
158    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect-destinationparam-output>
159    fn Connect_(&self, dest: &AudioParam, output: u32) -> Fallible<()> {
160        if self.context != dest.context() {
161            return Err(Error::InvalidAccess(None));
162        }
163
164        if output >= self.NumberOfOutputs() {
165            return Err(Error::IndexSize(None));
166        }
167
168        // servo-media takes care of ignoring duplicated connections.
169
170        let Some(source_id) = self.node_id() else {
171            return Ok(());
172        };
173        let Some(param_node) = dest.node_id() else {
174            return Ok(());
175        };
176
177        self.context
178            .audio_context_impl()
179            .lock()
180            .unwrap()
181            .connect_ports(
182                source_id.output(output),
183                param_node.param(dest.param_type()),
184            );
185
186        Ok(())
187    }
188
189    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
190    fn Disconnect(&self) -> ErrorResult {
191        if let Some(node_id) = self.node_id() {
192            self.context
193                .audio_context_impl()
194                .lock()
195                .unwrap()
196                .disconnect_all_from(node_id);
197        }
198        Ok(())
199    }
200
201    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output>
202    fn Disconnect_(&self, out: u32) -> ErrorResult {
203        if let Some(node_id) = self.node_id() {
204            self.context
205                .audio_context_impl()
206                .lock()
207                .unwrap()
208                .disconnect_output(node_id.output(out));
209        }
210        Ok(())
211    }
212
213    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode>
214    fn Disconnect__(&self, to: &AudioNode) -> ErrorResult {
215        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
216            self.context
217                .audio_context_impl()
218                .lock()
219                .unwrap()
220                .disconnect_between(from_node, to_node);
221        }
222        Ok(())
223    }
224
225    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output>
226    fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult {
227        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
228            self.context
229                .audio_context_impl()
230                .lock()
231                .unwrap()
232                .disconnect_output_between(from_node.output(out), to_node);
233        }
234        Ok(())
235    }
236
237    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input>
238    fn Disconnect____(&self, to: &AudioNode, out: u32, inp: u32) -> ErrorResult {
239        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
240            self.context
241                .audio_context_impl()
242                .lock()
243                .unwrap()
244                .disconnect_output_between_to(from_node.output(out), to_node.input(inp));
245        }
246        Ok(())
247    }
248
249    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
250    fn Disconnect_____(&self, param: &AudioParam) -> ErrorResult {
251        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
252            self.context
253                .audio_context_impl()
254                .lock()
255                .unwrap()
256                .disconnect_to(from_node, param_node.param(param.param_type()));
257        }
258        Ok(())
259    }
260
261    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
262    fn Disconnect______(&self, param: &AudioParam, out: u32) -> ErrorResult {
263        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
264            self.context
265                .audio_context_impl()
266                .lock()
267                .unwrap()
268                .disconnect_output_between_to(
269                    from_node.output(out),
270                    param_node.param(param.param_type()),
271                );
272        }
273        Ok(())
274    }
275
276    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-context>
277    fn Context(&self) -> DomRoot<BaseAudioContext> {
278        DomRoot::from_ref(&self.context)
279    }
280
281    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofinputs>
282    fn NumberOfInputs(&self) -> u32 {
283        self.number_of_inputs
284    }
285
286    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofoutputs>
287    fn NumberOfOutputs(&self) -> u32 {
288        self.number_of_outputs
289    }
290
291    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
292    fn ChannelCount(&self) -> u32 {
293        self.channel_count.get()
294    }
295
296    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
297    fn SetChannelCount(&self, value: u32) -> ErrorResult {
298        match self.upcast::<EventTarget>().type_id() {
299            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
300                if self.context.is_offline() {
301                    return Err(Error::InvalidState(None));
302                } else if !(1..=MAX_CHANNEL_COUNT).contains(&value) {
303                    return Err(Error::IndexSize(None));
304                }
305            },
306            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
307                if value > 2 {
308                    return Err(Error::NotSupported(None));
309                }
310            },
311            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
312                AudioScheduledSourceNodeTypeId::StereoPannerNode,
313            )) => {
314                if value > 2 {
315                    return Err(Error::NotSupported(None));
316                }
317            },
318            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
319                return Err(Error::InvalidState(None));
320            },
321            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
322                return Err(Error::InvalidState(None));
323            },
324            // XXX We do not support any of the other AudioNodes with
325            // constraints yet. Add more cases here as we add support
326            // for new AudioNodes.
327            _ => (),
328        };
329
330        if value == 0 || value > MAX_CHANNEL_COUNT {
331            return Err(Error::NotSupported(None));
332        }
333
334        self.channel_count.set(value);
335        self.message(AudioNodeMessage::SetChannelCount(value as u8));
336        Ok(())
337    }
338
339    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
340    fn ChannelCountMode(&self) -> ChannelCountMode {
341        self.channel_count_mode.get()
342    }
343
344    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
345    fn SetChannelCountMode(&self, value: ChannelCountMode) -> ErrorResult {
346        // Channel count mode has no effect for nodes with no inputs.
347        if self.number_of_inputs == 0 {
348            return Ok(());
349        }
350
351        match self.upcast::<EventTarget>().type_id() {
352            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
353                if self.context.is_offline() {
354                    return Err(Error::InvalidState(None));
355                }
356            },
357            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
358                if value == ChannelCountMode::Max {
359                    return Err(Error::NotSupported(None));
360                }
361            },
362            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
363                AudioScheduledSourceNodeTypeId::StereoPannerNode,
364            )) => {
365                if value == ChannelCountMode::Max {
366                    return Err(Error::NotSupported(None));
367                }
368            },
369            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
370                return Err(Error::InvalidState(None));
371            },
372            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
373                return Err(Error::InvalidState(None));
374            },
375            // XXX We do not support any of the other AudioNodes with
376            // constraints yet. Add more cases here as we add support
377            // for new AudioNodes.
378            _ => (),
379        };
380
381        self.channel_count_mode.set(value);
382        self.message(AudioNodeMessage::SetChannelMode(value.convert()));
383        Ok(())
384    }
385
386    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
387    fn ChannelInterpretation(&self) -> ChannelInterpretation {
388        self.channel_interpretation.get()
389    }
390
391    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
392    fn SetChannelInterpretation(&self, value: ChannelInterpretation) -> ErrorResult {
393        // Channel interpretation mode has no effect for nodes with no inputs.
394        if self.number_of_inputs == 0 {
395            return Ok(());
396        }
397
398        if let EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) =
399            self.upcast::<EventTarget>().type_id()
400        {
401            return Err(Error::InvalidState(None));
402        };
403
404        self.channel_interpretation.set(value);
405        self.message(AudioNodeMessage::SetChannelInterpretation(value.convert()));
406        Ok(())
407    }
408}
409
410impl Convert<ServoMediaChannelCountMode> for ChannelCountMode {
411    fn convert(self) -> ServoMediaChannelCountMode {
412        match self {
413            ChannelCountMode::Max => ServoMediaChannelCountMode::Max,
414            ChannelCountMode::Clamped_max => ServoMediaChannelCountMode::ClampedMax,
415            ChannelCountMode::Explicit => ServoMediaChannelCountMode::Explicit,
416        }
417    }
418}
419
420impl Convert<ServoMediaChannelInterpretation> for ChannelInterpretation {
421    fn convert(self) -> ServoMediaChannelInterpretation {
422        match self {
423            ChannelInterpretation::Discrete => ServoMediaChannelInterpretation::Discrete,
424            ChannelInterpretation::Speakers => ServoMediaChannelInterpretation::Speakers,
425        }
426    }
427}
428
429pub(crate) trait AudioNodeOptionsHelper {
430    fn unwrap_or(
431        &self,
432        count: u32,
433        mode: ChannelCountMode,
434        interpretation: ChannelInterpretation,
435    ) -> UnwrappedAudioNodeOptions;
436}
437
438impl AudioNodeOptionsHelper for AudioNodeOptions {
439    fn unwrap_or(
440        &self,
441        count: u32,
442        mode: ChannelCountMode,
443        interpretation: ChannelInterpretation,
444    ) -> UnwrappedAudioNodeOptions {
445        UnwrappedAudioNodeOptions {
446            count: self.channelCount.unwrap_or(count),
447            mode: self.channelCountMode.unwrap_or(mode),
448            interpretation: self.channelInterpretation.unwrap_or(interpretation),
449        }
450    }
451}
452
453/// Each node has a set of defaults, so this lets us work with them
454/// easily without having to deal with the Options
455pub(crate) struct UnwrappedAudioNodeOptions {
456    pub(crate) count: u32,
457    pub(crate) mode: ChannelCountMode,
458    pub(crate) interpretation: ChannelInterpretation,
459}
460
461impl Default for UnwrappedAudioNodeOptions {
462    fn default() -> Self {
463        UnwrappedAudioNodeOptions {
464            count: 2,
465            mode: ChannelCountMode::Max,
466            interpretation: ChannelInterpretation::Speakers,
467        }
468    }
469}