script/dom/audio/
audionode.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6
7use dom_struct::dom_struct;
8use log::warn;
9use script_bindings::codegen::InheritTypes::{
10    AudioNodeTypeId, AudioScheduledSourceNodeTypeId, EventTargetTypeId,
11};
12use servo_media::audio::graph::NodeId;
13use servo_media::audio::node::{
14    AudioNodeInit, AudioNodeMessage, ChannelCountMode as ServoMediaChannelCountMode, ChannelInfo,
15    ChannelInterpretation as ServoMediaChannelInterpretation,
16};
17
18use crate::conversions::Convert;
19use crate::dom::audio::audioparam::AudioParam;
20use crate::dom::audio::baseaudiocontext::BaseAudioContext;
21use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
22    AudioNodeMethods, AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
23};
24use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
25use crate::dom::bindings::inheritance::Castable;
26use crate::dom::bindings::reflector::DomGlobal;
27use crate::dom::bindings::root::{Dom, DomRoot};
28use crate::dom::bindings::str::DOMString;
29use crate::dom::console::Console;
30use crate::dom::eventtarget::EventTarget;
31
32// 32 is the minimum required by the spec for createBuffer() and the deprecated
33// createScriptProcessor() and matches what is used by Blink and Gecko.
34// The limit protects against large memory allocations.
35pub(crate) const MAX_CHANNEL_COUNT: u32 = 32;
36
37#[dom_struct]
38pub(crate) struct AudioNode {
39    eventtarget: EventTarget,
40    #[ignore_malloc_size_of = "servo_media"]
41    #[no_trace]
42    node_id: Option<NodeId>,
43    context: Dom<BaseAudioContext>,
44    number_of_inputs: u32,
45    number_of_outputs: u32,
46    channel_count: Cell<u32>,
47    channel_count_mode: Cell<ChannelCountMode>,
48    channel_interpretation: Cell<ChannelInterpretation>,
49}
50
51impl AudioNode {
52    pub(crate) fn new_inherited(
53        node_type: AudioNodeInit,
54        context: &BaseAudioContext,
55        options: UnwrappedAudioNodeOptions,
56        number_of_inputs: u32,
57        number_of_outputs: u32,
58    ) -> Fallible<AudioNode> {
59        if options.count == 0 || options.count > MAX_CHANNEL_COUNT {
60            return Err(Error::NotSupported(None));
61        }
62        let ch = ChannelInfo {
63            count: options.count as u8,
64            mode: options.mode.convert(),
65            interpretation: options.interpretation.convert(),
66            context_channel_count: context.channel_count() as u8,
67        };
68        let node_id = context
69            .audio_context_impl()
70            .lock()
71            .unwrap()
72            .create_node(node_type, ch);
73
74        if node_id.is_none() {
75            // Follow Chromuim and Gecko, we just warn and create an inert AudioNode.
76            const MESSAGE: &str =
77                "Failed to create an AudioNode backend. The constructed AudioNode will be inert.";
78            warn!("{MESSAGE}");
79            Console::internal_warn(&context.global(), DOMString::from(MESSAGE));
80        }
81
82        Ok(AudioNode::new_inherited_for_id(
83            node_id,
84            context,
85            options,
86            number_of_inputs,
87            number_of_outputs,
88        ))
89    }
90
91    pub(crate) fn new_inherited_for_id(
92        node_id: Option<NodeId>,
93        context: &BaseAudioContext,
94        options: UnwrappedAudioNodeOptions,
95        number_of_inputs: u32,
96        number_of_outputs: u32,
97    ) -> AudioNode {
98        AudioNode {
99            eventtarget: EventTarget::new_inherited(),
100            node_id,
101            context: Dom::from_ref(context),
102            number_of_inputs,
103            number_of_outputs,
104            channel_count: Cell::new(options.count),
105            channel_count_mode: Cell::new(options.mode),
106            channel_interpretation: Cell::new(options.interpretation),
107        }
108    }
109
110    pub(crate) fn message(&self, message: AudioNodeMessage) {
111        if let Some(node_id) = self.node_id {
112            self.context
113                .audio_context_impl()
114                .lock()
115                .unwrap()
116                .message_node(node_id, message);
117        }
118    }
119
120    pub(crate) fn node_id(&self) -> Option<NodeId> {
121        self.node_id
122    }
123}
124
125impl AudioNodeMethods<crate::DomTypeHolder> for AudioNode {
126    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect>
127    fn Connect(
128        &self,
129        destination: &AudioNode,
130        output: u32,
131        input: u32,
132    ) -> Fallible<DomRoot<AudioNode>> {
133        if self.context != destination.context {
134            return Err(Error::InvalidAccess(None));
135        }
136
137        if output >= self.NumberOfOutputs() || input >= destination.NumberOfInputs() {
138            return Err(Error::IndexSize(None));
139        }
140
141        // servo-media takes care of ignoring duplicated connections.
142
143        let Some(source_id) = self.node_id() else {
144            return Ok(DomRoot::from_ref(destination));
145        };
146        let Some(dest_id) = destination.node_id() else {
147            return Ok(DomRoot::from_ref(destination));
148        };
149
150        self.context
151            .audio_context_impl()
152            .lock()
153            .unwrap()
154            .connect_ports(source_id.output(output), dest_id.input(input));
155
156        Ok(DomRoot::from_ref(destination))
157    }
158
159    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect-destinationparam-output>
160    fn Connect_(&self, dest: &AudioParam, output: u32) -> Fallible<()> {
161        if self.context != dest.context() {
162            return Err(Error::InvalidAccess(None));
163        }
164
165        if output >= self.NumberOfOutputs() {
166            return Err(Error::IndexSize(None));
167        }
168
169        // servo-media takes care of ignoring duplicated connections.
170
171        let Some(source_id) = self.node_id() else {
172            return Ok(());
173        };
174        let Some(param_node) = dest.node_id() else {
175            return Ok(());
176        };
177
178        self.context
179            .audio_context_impl()
180            .lock()
181            .unwrap()
182            .connect_ports(
183                source_id.output(output),
184                param_node.param(dest.param_type()),
185            );
186
187        Ok(())
188    }
189
190    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
191    fn Disconnect(&self) -> ErrorResult {
192        if let Some(node_id) = self.node_id() {
193            self.context
194                .audio_context_impl()
195                .lock()
196                .unwrap()
197                .disconnect_all_from(node_id);
198        }
199        Ok(())
200    }
201
202    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output>
203    fn Disconnect_(&self, out: u32) -> ErrorResult {
204        if let Some(node_id) = self.node_id() {
205            self.context
206                .audio_context_impl()
207                .lock()
208                .unwrap()
209                .disconnect_output(node_id.output(out));
210        }
211        Ok(())
212    }
213
214    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode>
215    fn Disconnect__(&self, to: &AudioNode) -> ErrorResult {
216        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
217            self.context
218                .audio_context_impl()
219                .lock()
220                .unwrap()
221                .disconnect_between(from_node, to_node);
222        }
223        Ok(())
224    }
225
226    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output>
227    fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult {
228        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
229            self.context
230                .audio_context_impl()
231                .lock()
232                .unwrap()
233                .disconnect_output_between(from_node.output(out), to_node);
234        }
235        Ok(())
236    }
237
238    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input>
239    fn Disconnect____(&self, to: &AudioNode, out: u32, inp: u32) -> ErrorResult {
240        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
241            self.context
242                .audio_context_impl()
243                .lock()
244                .unwrap()
245                .disconnect_output_between_to(from_node.output(out), to_node.input(inp));
246        }
247        Ok(())
248    }
249
250    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
251    fn Disconnect_____(&self, param: &AudioParam) -> ErrorResult {
252        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
253            self.context
254                .audio_context_impl()
255                .lock()
256                .unwrap()
257                .disconnect_to(from_node, param_node.param(param.param_type()));
258        }
259        Ok(())
260    }
261
262    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
263    fn Disconnect______(&self, param: &AudioParam, out: u32) -> ErrorResult {
264        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
265            self.context
266                .audio_context_impl()
267                .lock()
268                .unwrap()
269                .disconnect_output_between_to(
270                    from_node.output(out),
271                    param_node.param(param.param_type()),
272                );
273        }
274        Ok(())
275    }
276
277    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-context>
278    fn Context(&self) -> DomRoot<BaseAudioContext> {
279        DomRoot::from_ref(&self.context)
280    }
281
282    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofinputs>
283    fn NumberOfInputs(&self) -> u32 {
284        self.number_of_inputs
285    }
286
287    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofoutputs>
288    fn NumberOfOutputs(&self) -> u32 {
289        self.number_of_outputs
290    }
291
292    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
293    fn ChannelCount(&self) -> u32 {
294        self.channel_count.get()
295    }
296
297    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
298    fn SetChannelCount(&self, value: u32) -> ErrorResult {
299        match self.upcast::<EventTarget>().type_id() {
300            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
301                if self.context.is_offline() {
302                    return Err(Error::InvalidState(None));
303                } else if !(1..=MAX_CHANNEL_COUNT).contains(&value) {
304                    return Err(Error::IndexSize(None));
305                }
306            },
307            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
308                if value > 2 {
309                    return Err(Error::NotSupported(None));
310                }
311            },
312            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
313                AudioScheduledSourceNodeTypeId::StereoPannerNode,
314            )) => {
315                if value > 2 {
316                    return Err(Error::NotSupported(None));
317                }
318            },
319            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
320                return Err(Error::InvalidState(None));
321            },
322            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
323                return Err(Error::InvalidState(None));
324            },
325            // XXX We do not support any of the other AudioNodes with
326            // constraints yet. Add more cases here as we add support
327            // for new AudioNodes.
328            _ => (),
329        };
330
331        if value == 0 || value > MAX_CHANNEL_COUNT {
332            return Err(Error::NotSupported(None));
333        }
334
335        self.channel_count.set(value);
336        self.message(AudioNodeMessage::SetChannelCount(value as u8));
337        Ok(())
338    }
339
340    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
341    fn ChannelCountMode(&self) -> ChannelCountMode {
342        self.channel_count_mode.get()
343    }
344
345    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
346    fn SetChannelCountMode(&self, value: ChannelCountMode) -> ErrorResult {
347        // Channel count mode has no effect for nodes with no inputs.
348        if self.number_of_inputs == 0 {
349            return Ok(());
350        }
351
352        match self.upcast::<EventTarget>().type_id() {
353            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
354                if self.context.is_offline() {
355                    return Err(Error::InvalidState(None));
356                }
357            },
358            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
359                if value == ChannelCountMode::Max {
360                    return Err(Error::NotSupported(None));
361                }
362            },
363            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
364                AudioScheduledSourceNodeTypeId::StereoPannerNode,
365            )) => {
366                if value == ChannelCountMode::Max {
367                    return Err(Error::NotSupported(None));
368                }
369            },
370            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
371                return Err(Error::InvalidState(None));
372            },
373            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
374                return Err(Error::InvalidState(None));
375            },
376            // XXX We do not support any of the other AudioNodes with
377            // constraints yet. Add more cases here as we add support
378            // for new AudioNodes.
379            _ => (),
380        };
381
382        self.channel_count_mode.set(value);
383        self.message(AudioNodeMessage::SetChannelMode(value.convert()));
384        Ok(())
385    }
386
387    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
388    fn ChannelInterpretation(&self) -> ChannelInterpretation {
389        self.channel_interpretation.get()
390    }
391
392    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
393    fn SetChannelInterpretation(&self, value: ChannelInterpretation) -> ErrorResult {
394        // Channel interpretation mode has no effect for nodes with no inputs.
395        if self.number_of_inputs == 0 {
396            return Ok(());
397        }
398
399        if let EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) =
400            self.upcast::<EventTarget>().type_id()
401        {
402            return Err(Error::InvalidState(None));
403        };
404
405        self.channel_interpretation.set(value);
406        self.message(AudioNodeMessage::SetChannelInterpretation(value.convert()));
407        Ok(())
408    }
409}
410
411impl Convert<ServoMediaChannelCountMode> for ChannelCountMode {
412    fn convert(self) -> ServoMediaChannelCountMode {
413        match self {
414            ChannelCountMode::Max => ServoMediaChannelCountMode::Max,
415            ChannelCountMode::Clamped_max => ServoMediaChannelCountMode::ClampedMax,
416            ChannelCountMode::Explicit => ServoMediaChannelCountMode::Explicit,
417        }
418    }
419}
420
421impl Convert<ServoMediaChannelInterpretation> for ChannelInterpretation {
422    fn convert(self) -> ServoMediaChannelInterpretation {
423        match self {
424            ChannelInterpretation::Discrete => ServoMediaChannelInterpretation::Discrete,
425            ChannelInterpretation::Speakers => ServoMediaChannelInterpretation::Speakers,
426        }
427    }
428}
429
430pub(crate) trait AudioNodeOptionsHelper {
431    fn unwrap_or(
432        &self,
433        count: u32,
434        mode: ChannelCountMode,
435        interpretation: ChannelInterpretation,
436    ) -> UnwrappedAudioNodeOptions;
437}
438
439impl AudioNodeOptionsHelper for AudioNodeOptions {
440    fn unwrap_or(
441        &self,
442        count: u32,
443        mode: ChannelCountMode,
444        interpretation: ChannelInterpretation,
445    ) -> UnwrappedAudioNodeOptions {
446        UnwrappedAudioNodeOptions {
447            count: self.channelCount.unwrap_or(count),
448            mode: self.channelCountMode.unwrap_or(mode),
449            interpretation: self.channelInterpretation.unwrap_or(interpretation),
450        }
451    }
452}
453
454/// Each node has a set of defaults, so this lets us work with them
455/// easily without having to deal with the Options
456pub(crate) struct UnwrappedAudioNodeOptions {
457    pub(crate) count: u32,
458    pub(crate) mode: ChannelCountMode,
459    pub(crate) interpretation: ChannelInterpretation,
460}
461
462impl Default for UnwrappedAudioNodeOptions {
463    fn default() -> Self {
464        UnwrappedAudioNodeOptions {
465            count: 2,
466            mode: ChannelCountMode::Max,
467            interpretation: ChannelInterpretation::Speakers,
468        }
469    }
470}