script/dom/audio/
audionode.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6
7use dom_struct::dom_struct;
8use log::warn;
9use script_bindings::codegen::InheritTypes::{
10    AudioNodeTypeId, AudioScheduledSourceNodeTypeId, EventTargetTypeId,
11};
12use servo_media::audio::graph::NodeId;
13use servo_media::audio::node::{
14    AudioNodeInit, AudioNodeMessage, ChannelCountMode as ServoMediaChannelCountMode, ChannelInfo,
15    ChannelInterpretation as ServoMediaChannelInterpretation,
16};
17
18use crate::conversions::Convert;
19use crate::dom::audio::audioparam::AudioParam;
20use crate::dom::audio::baseaudiocontext::BaseAudioContext;
21use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
22    AudioNodeMethods, AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
23};
24use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
25use crate::dom::bindings::inheritance::Castable;
26use crate::dom::bindings::reflector::DomGlobal;
27use crate::dom::bindings::root::{Dom, DomRoot};
28use crate::dom::bindings::str::DOMString;
29use crate::dom::console::Console;
30use crate::dom::eventtarget::EventTarget;
31
32// 32 is the minimum required by the spec for createBuffer() and the deprecated
33// createScriptProcessor() and matches what is used by Blink and Gecko.
34// The limit protects against large memory allocations.
35pub(crate) const MAX_CHANNEL_COUNT: u32 = 32;
36
37#[dom_struct]
38pub(crate) struct AudioNode {
39    eventtarget: EventTarget,
40    #[ignore_malloc_size_of = "servo_media"]
41    #[no_trace]
42    node_id: Option<NodeId>,
43    context: Dom<BaseAudioContext>,
44    number_of_inputs: u32,
45    number_of_outputs: u32,
46    channel_count: Cell<u32>,
47    channel_count_mode: Cell<ChannelCountMode>,
48    channel_interpretation: Cell<ChannelInterpretation>,
49}
50
51impl AudioNode {
52    pub(crate) fn new_inherited(
53        node_type: AudioNodeInit,
54        context: &BaseAudioContext,
55        options: UnwrappedAudioNodeOptions,
56        number_of_inputs: u32,
57        number_of_outputs: u32,
58    ) -> Fallible<AudioNode> {
59        if options.count == 0 || options.count > MAX_CHANNEL_COUNT {
60            return Err(Error::NotSupported(None));
61        }
62        let ch = ChannelInfo {
63            count: options.count as u8,
64            mode: options.mode.convert(),
65            interpretation: options.interpretation.convert(),
66            context_channel_count: context.channel_count() as u8,
67        };
68        let node_id = match context
69            .audio_context_impl()
70            .lock()
71            .unwrap()
72            .create_node(node_type, ch)
73        {
74            Ok(node_id) => Some(node_id),
75            Err(_) => {
76                // Follow Chromuim and Gecko, we just warn and create an inert AudioNode.
77                const MESSAGE: &str = "Failed to create an AudioNode backend. The constructed AudioNode will be inert.";
78                warn!("{MESSAGE}");
79                Console::internal_warn(&context.global(), DOMString::from(MESSAGE));
80                None
81            },
82        };
83
84        Ok(AudioNode::new_inherited_for_id(
85            node_id,
86            context,
87            options,
88            number_of_inputs,
89            number_of_outputs,
90        ))
91    }
92
93    pub(crate) fn new_inherited_for_id(
94        node_id: Option<NodeId>,
95        context: &BaseAudioContext,
96        options: UnwrappedAudioNodeOptions,
97        number_of_inputs: u32,
98        number_of_outputs: u32,
99    ) -> AudioNode {
100        AudioNode {
101            eventtarget: EventTarget::new_inherited(),
102            node_id,
103            context: Dom::from_ref(context),
104            number_of_inputs,
105            number_of_outputs,
106            channel_count: Cell::new(options.count),
107            channel_count_mode: Cell::new(options.mode),
108            channel_interpretation: Cell::new(options.interpretation),
109        }
110    }
111
112    pub(crate) fn message(&self, message: AudioNodeMessage) {
113        if let Some(node_id) = self.node_id {
114            self.context
115                .audio_context_impl()
116                .lock()
117                .unwrap()
118                .message_node(node_id, message);
119        }
120    }
121
122    pub(crate) fn node_id(&self) -> Option<NodeId> {
123        self.node_id
124    }
125}
126
127impl AudioNodeMethods<crate::DomTypeHolder> for AudioNode {
128    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect>
129    fn Connect(
130        &self,
131        destination: &AudioNode,
132        output: u32,
133        input: u32,
134    ) -> Fallible<DomRoot<AudioNode>> {
135        if self.context != destination.context {
136            return Err(Error::InvalidAccess(None));
137        }
138
139        if output >= self.NumberOfOutputs() || input >= destination.NumberOfInputs() {
140            return Err(Error::IndexSize(None));
141        }
142
143        // servo-media takes care of ignoring duplicated connections.
144
145        let Some(source_id) = self.node_id() else {
146            return Ok(DomRoot::from_ref(destination));
147        };
148        let Some(dest_id) = destination.node_id() else {
149            return Ok(DomRoot::from_ref(destination));
150        };
151
152        self.context
153            .audio_context_impl()
154            .lock()
155            .unwrap()
156            .connect_ports(source_id.output(output), dest_id.input(input));
157
158        Ok(DomRoot::from_ref(destination))
159    }
160
161    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect-destinationparam-output>
162    fn Connect_(&self, dest: &AudioParam, output: u32) -> Fallible<()> {
163        if self.context != dest.context() {
164            return Err(Error::InvalidAccess(None));
165        }
166
167        if output >= self.NumberOfOutputs() {
168            return Err(Error::IndexSize(None));
169        }
170
171        // servo-media takes care of ignoring duplicated connections.
172
173        let Some(source_id) = self.node_id() else {
174            return Ok(());
175        };
176        let Some(param_node) = dest.node_id() else {
177            return Ok(());
178        };
179
180        self.context
181            .audio_context_impl()
182            .lock()
183            .unwrap()
184            .connect_ports(
185                source_id.output(output),
186                param_node.param(dest.param_type()),
187            );
188
189        Ok(())
190    }
191
192    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
193    fn Disconnect(&self) -> ErrorResult {
194        if let Some(node_id) = self.node_id() {
195            self.context
196                .audio_context_impl()
197                .lock()
198                .unwrap()
199                .disconnect_all_from(node_id);
200        }
201        Ok(())
202    }
203
204    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output>
205    fn Disconnect_(&self, out: u32) -> ErrorResult {
206        if let Some(node_id) = self.node_id() {
207            self.context
208                .audio_context_impl()
209                .lock()
210                .unwrap()
211                .disconnect_output(node_id.output(out));
212        }
213        Ok(())
214    }
215
216    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode>
217    fn Disconnect__(&self, to: &AudioNode) -> ErrorResult {
218        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
219            self.context
220                .audio_context_impl()
221                .lock()
222                .unwrap()
223                .disconnect_between(from_node, to_node);
224        }
225        Ok(())
226    }
227
228    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output>
229    fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult {
230        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
231            self.context
232                .audio_context_impl()
233                .lock()
234                .unwrap()
235                .disconnect_output_between(from_node.output(out), to_node);
236        }
237        Ok(())
238    }
239
240    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input>
241    fn Disconnect____(&self, to: &AudioNode, out: u32, inp: u32) -> ErrorResult {
242        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
243            self.context
244                .audio_context_impl()
245                .lock()
246                .unwrap()
247                .disconnect_output_between_to(from_node.output(out), to_node.input(inp));
248        }
249        Ok(())
250    }
251
252    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
253    fn Disconnect_____(&self, param: &AudioParam) -> ErrorResult {
254        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
255            self.context
256                .audio_context_impl()
257                .lock()
258                .unwrap()
259                .disconnect_to(from_node, param_node.param(param.param_type()));
260        }
261        Ok(())
262    }
263
264    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
265    fn Disconnect______(&self, param: &AudioParam, out: u32) -> ErrorResult {
266        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
267            self.context
268                .audio_context_impl()
269                .lock()
270                .unwrap()
271                .disconnect_output_between_to(
272                    from_node.output(out),
273                    param_node.param(param.param_type()),
274                );
275        }
276        Ok(())
277    }
278
279    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-context>
280    fn Context(&self) -> DomRoot<BaseAudioContext> {
281        DomRoot::from_ref(&self.context)
282    }
283
284    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofinputs>
285    fn NumberOfInputs(&self) -> u32 {
286        self.number_of_inputs
287    }
288
289    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofoutputs>
290    fn NumberOfOutputs(&self) -> u32 {
291        self.number_of_outputs
292    }
293
294    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
295    fn ChannelCount(&self) -> u32 {
296        self.channel_count.get()
297    }
298
299    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
300    fn SetChannelCount(&self, value: u32) -> ErrorResult {
301        match self.upcast::<EventTarget>().type_id() {
302            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
303                if self.context.is_offline() {
304                    return Err(Error::InvalidState(None));
305                } else if !(1..=MAX_CHANNEL_COUNT).contains(&value) {
306                    return Err(Error::IndexSize(None));
307                }
308            },
309            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
310                if value > 2 {
311                    return Err(Error::NotSupported(None));
312                }
313            },
314            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
315                AudioScheduledSourceNodeTypeId::StereoPannerNode,
316            )) => {
317                if value > 2 {
318                    return Err(Error::NotSupported(None));
319                }
320            },
321            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
322                return Err(Error::InvalidState(None));
323            },
324            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
325                return Err(Error::InvalidState(None));
326            },
327            // XXX We do not support any of the other AudioNodes with
328            // constraints yet. Add more cases here as we add support
329            // for new AudioNodes.
330            _ => (),
331        };
332
333        if value == 0 || value > MAX_CHANNEL_COUNT {
334            return Err(Error::NotSupported(None));
335        }
336
337        self.channel_count.set(value);
338        self.message(AudioNodeMessage::SetChannelCount(value as u8));
339        Ok(())
340    }
341
342    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
343    fn ChannelCountMode(&self) -> ChannelCountMode {
344        self.channel_count_mode.get()
345    }
346
347    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
348    fn SetChannelCountMode(&self, value: ChannelCountMode) -> ErrorResult {
349        // Channel count mode has no effect for nodes with no inputs.
350        if self.number_of_inputs == 0 {
351            return Ok(());
352        }
353
354        match self.upcast::<EventTarget>().type_id() {
355            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
356                if self.context.is_offline() {
357                    return Err(Error::InvalidState(None));
358                }
359            },
360            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
361                if value == ChannelCountMode::Max {
362                    return Err(Error::NotSupported(None));
363                }
364            },
365            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
366                AudioScheduledSourceNodeTypeId::StereoPannerNode,
367            )) => {
368                if value == ChannelCountMode::Max {
369                    return Err(Error::NotSupported(None));
370                }
371            },
372            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
373                return Err(Error::InvalidState(None));
374            },
375            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
376                return Err(Error::InvalidState(None));
377            },
378            // XXX We do not support any of the other AudioNodes with
379            // constraints yet. Add more cases here as we add support
380            // for new AudioNodes.
381            _ => (),
382        };
383
384        self.channel_count_mode.set(value);
385        self.message(AudioNodeMessage::SetChannelMode(value.convert()));
386        Ok(())
387    }
388
389    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
390    fn ChannelInterpretation(&self) -> ChannelInterpretation {
391        self.channel_interpretation.get()
392    }
393
394    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
395    fn SetChannelInterpretation(&self, value: ChannelInterpretation) -> ErrorResult {
396        // Channel interpretation mode has no effect for nodes with no inputs.
397        if self.number_of_inputs == 0 {
398            return Ok(());
399        }
400
401        if let EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) =
402            self.upcast::<EventTarget>().type_id()
403        {
404            return Err(Error::InvalidState(None));
405        };
406
407        self.channel_interpretation.set(value);
408        self.message(AudioNodeMessage::SetChannelInterpretation(value.convert()));
409        Ok(())
410    }
411}
412
413impl Convert<ServoMediaChannelCountMode> for ChannelCountMode {
414    fn convert(self) -> ServoMediaChannelCountMode {
415        match self {
416            ChannelCountMode::Max => ServoMediaChannelCountMode::Max,
417            ChannelCountMode::Clamped_max => ServoMediaChannelCountMode::ClampedMax,
418            ChannelCountMode::Explicit => ServoMediaChannelCountMode::Explicit,
419        }
420    }
421}
422
423impl Convert<ServoMediaChannelInterpretation> for ChannelInterpretation {
424    fn convert(self) -> ServoMediaChannelInterpretation {
425        match self {
426            ChannelInterpretation::Discrete => ServoMediaChannelInterpretation::Discrete,
427            ChannelInterpretation::Speakers => ServoMediaChannelInterpretation::Speakers,
428        }
429    }
430}
431
432pub(crate) trait AudioNodeOptionsHelper {
433    fn unwrap_or(
434        &self,
435        count: u32,
436        mode: ChannelCountMode,
437        interpretation: ChannelInterpretation,
438    ) -> UnwrappedAudioNodeOptions;
439}
440
441impl AudioNodeOptionsHelper for AudioNodeOptions {
442    fn unwrap_or(
443        &self,
444        count: u32,
445        mode: ChannelCountMode,
446        interpretation: ChannelInterpretation,
447    ) -> UnwrappedAudioNodeOptions {
448        UnwrappedAudioNodeOptions {
449            count: self.channelCount.unwrap_or(count),
450            mode: self.channelCountMode.unwrap_or(mode),
451            interpretation: self.channelInterpretation.unwrap_or(interpretation),
452        }
453    }
454}
455
456/// Each node has a set of defaults, so this lets us work with them
457/// easily without having to deal with the Options
458pub(crate) struct UnwrappedAudioNodeOptions {
459    pub(crate) count: u32,
460    pub(crate) mode: ChannelCountMode,
461    pub(crate) interpretation: ChannelInterpretation,
462}
463
464impl Default for UnwrappedAudioNodeOptions {
465    fn default() -> Self {
466        UnwrappedAudioNodeOptions {
467            count: 2,
468            mode: ChannelCountMode::Max,
469            interpretation: ChannelInterpretation::Speakers,
470        }
471    }
472}