script/dom/audio/
audionode.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6
7use dom_struct::dom_struct;
8use log::warn;
9use script_bindings::codegen::InheritTypes::{
10    AudioNodeTypeId, AudioScheduledSourceNodeTypeId, EventTargetTypeId,
11};
12use servo_media::audio::graph::NodeId;
13use servo_media::audio::node::{
14    AudioNodeInit, AudioNodeMessage, ChannelCountMode as ServoMediaChannelCountMode, ChannelInfo,
15    ChannelInterpretation as ServoMediaChannelInterpretation,
16};
17
18use crate::conversions::Convert;
19use crate::dom::audio::audioparam::AudioParam;
20use crate::dom::audio::baseaudiocontext::BaseAudioContext;
21use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
22    AudioNodeMethods, AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
23};
24use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
25use crate::dom::bindings::inheritance::Castable;
26use crate::dom::bindings::reflector::DomGlobal;
27use crate::dom::bindings::root::{Dom, DomRoot};
28use crate::dom::console::Console;
29use crate::dom::eventtarget::EventTarget;
30
31// 32 is the minimum required by the spec for createBuffer() and the deprecated
32// createScriptProcessor() and matches what is used by Blink and Gecko.
33// The limit protects against large memory allocations.
34pub(crate) const MAX_CHANNEL_COUNT: u32 = 32;
35
36#[dom_struct]
37pub(crate) struct AudioNode {
38    eventtarget: EventTarget,
39    #[ignore_malloc_size_of = "servo_media"]
40    #[no_trace]
41    node_id: Option<NodeId>,
42    context: Dom<BaseAudioContext>,
43    number_of_inputs: u32,
44    number_of_outputs: u32,
45    channel_count: Cell<u32>,
46    channel_count_mode: Cell<ChannelCountMode>,
47    channel_interpretation: Cell<ChannelInterpretation>,
48}
49
50impl AudioNode {
51    pub(crate) fn new_inherited(
52        node_type: AudioNodeInit,
53        context: &BaseAudioContext,
54        options: UnwrappedAudioNodeOptions,
55        number_of_inputs: u32,
56        number_of_outputs: u32,
57    ) -> Fallible<AudioNode> {
58        if options.count == 0 || options.count > MAX_CHANNEL_COUNT {
59            return Err(Error::NotSupported(None));
60        }
61        let ch = ChannelInfo {
62            count: options.count as u8,
63            mode: options.mode.convert(),
64            interpretation: options.interpretation.convert(),
65            context_channel_count: context.channel_count() as u8,
66        };
67        let node_id = context
68            .audio_context_impl()
69            .lock()
70            .unwrap()
71            .create_node(node_type, ch);
72
73        if node_id.is_none() {
74            // Follow Chromuim and Gecko, we just warn and create an inert AudioNode.
75            const MESSAGE: &str =
76                "Failed to create an AudioNode backend. The constructed AudioNode will be inert.";
77            warn!("{MESSAGE}");
78            Console::internal_warn(&context.global(), MESSAGE.to_string());
79        }
80
81        Ok(AudioNode::new_inherited_for_id(
82            node_id,
83            context,
84            options,
85            number_of_inputs,
86            number_of_outputs,
87        ))
88    }
89
90    pub(crate) fn new_inherited_for_id(
91        node_id: Option<NodeId>,
92        context: &BaseAudioContext,
93        options: UnwrappedAudioNodeOptions,
94        number_of_inputs: u32,
95        number_of_outputs: u32,
96    ) -> AudioNode {
97        AudioNode {
98            eventtarget: EventTarget::new_inherited(),
99            node_id,
100            context: Dom::from_ref(context),
101            number_of_inputs,
102            number_of_outputs,
103            channel_count: Cell::new(options.count),
104            channel_count_mode: Cell::new(options.mode),
105            channel_interpretation: Cell::new(options.interpretation),
106        }
107    }
108
109    pub(crate) fn message(&self, message: AudioNodeMessage) {
110        if let Some(node_id) = self.node_id {
111            self.context
112                .audio_context_impl()
113                .lock()
114                .unwrap()
115                .message_node(node_id, message);
116        }
117    }
118
119    pub(crate) fn node_id(&self) -> Option<NodeId> {
120        self.node_id
121    }
122}
123
124impl AudioNodeMethods<crate::DomTypeHolder> for AudioNode {
125    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect>
126    fn Connect(
127        &self,
128        destination: &AudioNode,
129        output: u32,
130        input: u32,
131    ) -> Fallible<DomRoot<AudioNode>> {
132        if self.context != destination.context {
133            return Err(Error::InvalidAccess(None));
134        }
135
136        if output >= self.NumberOfOutputs() || input >= destination.NumberOfInputs() {
137            return Err(Error::IndexSize(None));
138        }
139
140        // servo-media takes care of ignoring duplicated connections.
141
142        let Some(source_id) = self.node_id() else {
143            return Ok(DomRoot::from_ref(destination));
144        };
145        let Some(dest_id) = destination.node_id() else {
146            return Ok(DomRoot::from_ref(destination));
147        };
148
149        self.context
150            .audio_context_impl()
151            .lock()
152            .unwrap()
153            .connect_ports(source_id.output(output), dest_id.input(input));
154
155        Ok(DomRoot::from_ref(destination))
156    }
157
158    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-connect-destinationparam-output>
159    fn Connect_(&self, dest: &AudioParam, output: u32) -> Fallible<()> {
160        if self.context != dest.context() {
161            return Err(Error::InvalidAccess(None));
162        }
163
164        if output >= self.NumberOfOutputs() {
165            return Err(Error::IndexSize(None));
166        }
167
168        // servo-media takes care of ignoring duplicated connections.
169
170        let Some(source_id) = self.node_id() else {
171            return Ok(());
172        };
173        let Some(param_node) = dest.node_id() else {
174            return Ok(());
175        };
176
177        self.context
178            .audio_context_impl()
179            .lock()
180            .unwrap()
181            .connect_ports(
182                source_id.output(output),
183                param_node.param(dest.param_type()),
184            );
185
186        Ok(())
187    }
188
189    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
190    fn Disconnect(&self) -> ErrorResult {
191        if let Some(node_id) = self.node_id() {
192            self.context
193                .audio_context_impl()
194                .lock()
195                .unwrap()
196                .disconnect_all_from(node_id);
197        }
198        Ok(())
199    }
200
201    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-output>
202    fn Disconnect_(&self, out: u32) -> ErrorResult {
203        if let Some(node_id) = self.node_id() {
204            self.context
205                .audio_context_impl()
206                .lock()
207                .unwrap()
208                .disconnect_output(node_id.output(out));
209        }
210        Ok(())
211    }
212
213    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode>
214    fn Disconnect__(&self, to: &AudioNode) -> ErrorResult {
215        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
216            self.context
217                .audio_context_impl()
218                .lock()
219                .unwrap()
220                .disconnect_between(from_node, to_node);
221        }
222        Ok(())
223    }
224
225    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output>
226    fn Disconnect___(&self, to: &AudioNode, out: u32) -> ErrorResult {
227        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
228            self.context
229                .audio_context_impl()
230                .lock()
231                .unwrap()
232                .disconnect_output_between(from_node.output(out), to_node);
233        }
234        Ok(())
235    }
236
237    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect-destinationnode-output-input>
238    fn Disconnect____(&self, to: &AudioNode, out: u32, inp: u32) -> ErrorResult {
239        if let (Some(from_node), Some(to_node)) = (self.node_id(), to.node_id()) {
240            self.context
241                .audio_context_impl()
242                .lock()
243                .unwrap()
244                .disconnect_output_between_to(from_node.output(out), to_node.input(inp));
245        }
246        Ok(())
247    }
248
249    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
250    fn Disconnect_____(&self, param: &AudioParam) -> ErrorResult {
251        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
252            self.context
253                .audio_context_impl()
254                .lock()
255                .unwrap()
256                .disconnect_to(from_node, param_node.param(param.param_type()));
257        }
258        Ok(())
259    }
260
261    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-disconnect>
262    fn Disconnect______(&self, param: &AudioParam, out: u32) -> ErrorResult {
263        if let (Some(from_node), Some(param_node)) = (self.node_id(), param.node_id()) {
264            self.context
265                .audio_context_impl()
266                .lock()
267                .unwrap()
268                .disconnect_output_between_to(
269                    from_node.output(out),
270                    param_node.param(param.param_type()),
271                );
272        }
273        Ok(())
274    }
275
276    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-context>
277    fn Context(&self) -> DomRoot<BaseAudioContext> {
278        DomRoot::from_ref(&self.context)
279    }
280
281    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofinputs>
282    fn NumberOfInputs(&self) -> u32 {
283        self.number_of_inputs
284    }
285
286    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-numberofoutputs>
287    fn NumberOfOutputs(&self) -> u32 {
288        self.number_of_outputs
289    }
290
291    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
292    fn ChannelCount(&self) -> u32 {
293        self.channel_count.get()
294    }
295
296    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcount>
297    fn SetChannelCount(&self, value: u32) -> ErrorResult {
298        match self.upcast::<EventTarget>().type_id() {
299            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode) => {
300                if self.context.is_offline() {
301                    return Err(Error::InvalidState(None));
302                } else if !(1..=MAX_CHANNEL_COUNT).contains(&value) {
303                    return Err(Error::IndexSize(None));
304                }
305            },
306            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) if value > 2 => {
307                return Err(Error::NotSupported(None));
308            },
309            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
310                AudioScheduledSourceNodeTypeId::StereoPannerNode,
311            )) if value > 2 => {
312                return Err(Error::NotSupported(None));
313            },
314            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
315                return Err(Error::InvalidState(None));
316            },
317            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
318                return Err(Error::InvalidState(None));
319            },
320            // XXX We do not support any of the other AudioNodes with
321            // constraints yet. Add more cases here as we add support
322            // for new AudioNodes.
323            _ => (),
324        };
325
326        if value == 0 || value > MAX_CHANNEL_COUNT {
327            return Err(Error::NotSupported(None));
328        }
329
330        self.channel_count.set(value);
331        self.message(AudioNodeMessage::SetChannelCount(value as u8));
332        Ok(())
333    }
334
335    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
336    fn ChannelCountMode(&self) -> ChannelCountMode {
337        self.channel_count_mode.get()
338    }
339
340    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelcountmode>
341    fn SetChannelCountMode(&self, value: ChannelCountMode) -> ErrorResult {
342        // Channel count mode has no effect for nodes with no inputs.
343        if self.number_of_inputs == 0 {
344            return Ok(());
345        }
346
347        match self.upcast::<EventTarget>().type_id() {
348            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioDestinationNode)
349                if self.context.is_offline() =>
350            {
351                return Err(Error::InvalidState(None));
352            },
353            EventTargetTypeId::AudioNode(AudioNodeTypeId::PannerNode) => {
354                if value == ChannelCountMode::Max {
355                    return Err(Error::NotSupported(None));
356                }
357            },
358            EventTargetTypeId::AudioNode(AudioNodeTypeId::AudioScheduledSourceNode(
359                AudioScheduledSourceNodeTypeId::StereoPannerNode,
360            )) if value == ChannelCountMode::Max => {
361                return Err(Error::NotSupported(None));
362            },
363            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelMergerNode) => {
364                return Err(Error::InvalidState(None));
365            },
366            EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) => {
367                return Err(Error::InvalidState(None));
368            },
369            // XXX We do not support any of the other AudioNodes with
370            // constraints yet. Add more cases here as we add support
371            // for new AudioNodes.
372            _ => (),
373        };
374
375        self.channel_count_mode.set(value);
376        self.message(AudioNodeMessage::SetChannelMode(value.convert()));
377        Ok(())
378    }
379
380    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
381    fn ChannelInterpretation(&self) -> ChannelInterpretation {
382        self.channel_interpretation.get()
383    }
384
385    /// <https://webaudio.github.io/web-audio-api/#dom-audionode-channelinterpretation>
386    fn SetChannelInterpretation(&self, value: ChannelInterpretation) -> ErrorResult {
387        // Channel interpretation mode has no effect for nodes with no inputs.
388        if self.number_of_inputs == 0 {
389            return Ok(());
390        }
391
392        if let EventTargetTypeId::AudioNode(AudioNodeTypeId::ChannelSplitterNode) =
393            self.upcast::<EventTarget>().type_id()
394        {
395            return Err(Error::InvalidState(None));
396        };
397
398        self.channel_interpretation.set(value);
399        self.message(AudioNodeMessage::SetChannelInterpretation(value.convert()));
400        Ok(())
401    }
402}
403
404impl Convert<ServoMediaChannelCountMode> for ChannelCountMode {
405    fn convert(self) -> ServoMediaChannelCountMode {
406        match self {
407            ChannelCountMode::Max => ServoMediaChannelCountMode::Max,
408            ChannelCountMode::Clamped_max => ServoMediaChannelCountMode::ClampedMax,
409            ChannelCountMode::Explicit => ServoMediaChannelCountMode::Explicit,
410        }
411    }
412}
413
414impl Convert<ServoMediaChannelInterpretation> for ChannelInterpretation {
415    fn convert(self) -> ServoMediaChannelInterpretation {
416        match self {
417            ChannelInterpretation::Discrete => ServoMediaChannelInterpretation::Discrete,
418            ChannelInterpretation::Speakers => ServoMediaChannelInterpretation::Speakers,
419        }
420    }
421}
422
423pub(crate) trait AudioNodeOptionsHelper {
424    fn unwrap_or(
425        &self,
426        count: u32,
427        mode: ChannelCountMode,
428        interpretation: ChannelInterpretation,
429    ) -> UnwrappedAudioNodeOptions;
430}
431
432impl AudioNodeOptionsHelper for AudioNodeOptions {
433    fn unwrap_or(
434        &self,
435        count: u32,
436        mode: ChannelCountMode,
437        interpretation: ChannelInterpretation,
438    ) -> UnwrappedAudioNodeOptions {
439        UnwrappedAudioNodeOptions {
440            count: self.channelCount.unwrap_or(count),
441            mode: self.channelCountMode.unwrap_or(mode),
442            interpretation: self.channelInterpretation.unwrap_or(interpretation),
443        }
444    }
445}
446
447/// Each node has a set of defaults, so this lets us work with them
448/// easily without having to deal with the Options
449pub(crate) struct UnwrappedAudioNodeOptions {
450    pub(crate) count: u32,
451    pub(crate) mode: ChannelCountMode,
452    pub(crate) interpretation: ChannelInterpretation,
453}
454
455impl Default for UnwrappedAudioNodeOptions {
456    fn default() -> Self {
457        UnwrappedAudioNodeOptions {
458            count: 2,
459            mode: ChannelCountMode::Max,
460            interpretation: ChannelInterpretation::Speakers,
461        }
462    }
463}