script/dom/audio/
baseaudiocontext.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6use std::collections::hash_map::Entry;
7use std::collections::{HashMap, VecDeque};
8use std::rc::Rc;
9use std::sync::{Arc, Mutex};
10
11use base::id::PipelineId;
12use dom_struct::dom_struct;
13use js::rust::CustomAutoRooterGuard;
14use js::typedarray::ArrayBuffer;
15use servo_media::audio::context::{
16    AudioContext, AudioContextOptions, OfflineAudioContextOptions, ProcessingState,
17    RealTimeAudioContextOptions,
18};
19use servo_media::audio::decoder::AudioDecoderCallbacks;
20use servo_media::audio::graph::NodeId;
21use servo_media::{ClientContextId, ServoMedia};
22use uuid::Uuid;
23
24use crate::conversions::Convert;
25use crate::dom::audio::analysernode::AnalyserNode;
26use crate::dom::audio::audiobuffer::AudioBuffer;
27use crate::dom::audio::audiobuffersourcenode::AudioBufferSourceNode;
28use crate::dom::audio::audiodestinationnode::AudioDestinationNode;
29use crate::dom::audio::audiolistener::AudioListener;
30use crate::dom::audio::audionode::MAX_CHANNEL_COUNT;
31use crate::dom::audio::biquadfilternode::BiquadFilterNode;
32use crate::dom::audio::channelmergernode::ChannelMergerNode;
33use crate::dom::audio::channelsplitternode::ChannelSplitterNode;
34use crate::dom::audio::constantsourcenode::ConstantSourceNode;
35use crate::dom::audio::gainnode::GainNode;
36use crate::dom::audio::iirfilternode::IIRFilterNode;
37use crate::dom::audio::oscillatornode::OscillatorNode;
38use crate::dom::audio::pannernode::PannerNode;
39use crate::dom::audio::stereopannernode::StereoPannerNode;
40use crate::dom::bindings::callback::ExceptionHandling;
41use crate::dom::bindings::cell::DomRefCell;
42use crate::dom::bindings::codegen::Bindings::AnalyserNodeBinding::AnalyserOptions;
43use crate::dom::bindings::codegen::Bindings::AudioBufferSourceNodeBinding::AudioBufferSourceOptions;
44use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
45    AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
46};
47use crate::dom::bindings::codegen::Bindings::BaseAudioContextBinding::{
48    AudioContextState, BaseAudioContextMethods, DecodeErrorCallback, DecodeSuccessCallback,
49};
50use crate::dom::bindings::codegen::Bindings::BiquadFilterNodeBinding::BiquadFilterOptions;
51use crate::dom::bindings::codegen::Bindings::ChannelMergerNodeBinding::ChannelMergerOptions;
52use crate::dom::bindings::codegen::Bindings::ChannelSplitterNodeBinding::ChannelSplitterOptions;
53use crate::dom::bindings::codegen::Bindings::ConstantSourceNodeBinding::ConstantSourceOptions;
54use crate::dom::bindings::codegen::Bindings::GainNodeBinding::GainOptions;
55use crate::dom::bindings::codegen::Bindings::IIRFilterNodeBinding::IIRFilterOptions;
56use crate::dom::bindings::codegen::Bindings::OscillatorNodeBinding::OscillatorOptions;
57use crate::dom::bindings::codegen::Bindings::PannerNodeBinding::PannerOptions;
58use crate::dom::bindings::codegen::Bindings::StereoPannerNodeBinding::StereoPannerOptions;
59use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
60use crate::dom::bindings::inheritance::Castable;
61use crate::dom::bindings::num::Finite;
62use crate::dom::bindings::refcounted::Trusted;
63use crate::dom::bindings::reflector::DomGlobal;
64use crate::dom::bindings::root::{DomRoot, MutNullableDom};
65use crate::dom::domexception::{DOMErrorName, DOMException};
66use crate::dom::eventtarget::EventTarget;
67use crate::dom::promise::Promise;
68use crate::realms::InRealm;
69use crate::script_runtime::CanGc;
70
71#[allow(dead_code)]
72pub(crate) enum BaseAudioContextOptions {
73    AudioContext(RealTimeAudioContextOptions),
74    OfflineAudioContext(OfflineAudioContextOptions),
75}
76
77#[derive(JSTraceable, MallocSizeOf)]
78struct DecodeResolver {
79    #[conditional_malloc_size_of]
80    pub(crate) promise: Rc<Promise>,
81    #[conditional_malloc_size_of]
82    pub(crate) success_callback: Option<Rc<DecodeSuccessCallback>>,
83    #[conditional_malloc_size_of]
84    pub(crate) error_callback: Option<Rc<DecodeErrorCallback>>,
85}
86
87type BoxedSliceOfPromises = Box<[Rc<Promise>]>;
88
89#[dom_struct]
90pub(crate) struct BaseAudioContext {
91    eventtarget: EventTarget,
92    #[ignore_malloc_size_of = "servo_media"]
93    #[no_trace]
94    audio_context_impl: Arc<Mutex<AudioContext>>,
95    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination>
96    destination: MutNullableDom<AudioDestinationNode>,
97    listener: MutNullableDom<AudioListener>,
98    /// Resume promises which are soon to be fulfilled by a queued task.
99    #[conditional_malloc_size_of]
100    in_flight_resume_promises_queue: DomRefCell<VecDeque<(BoxedSliceOfPromises, ErrorResult)>>,
101    /// <https://webaudio.github.io/web-audio-api/#pendingresumepromises>
102    #[conditional_malloc_size_of]
103    pending_resume_promises: DomRefCell<Vec<Rc<Promise>>>,
104    decode_resolvers: DomRefCell<HashMap<String, DecodeResolver>>,
105    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate>
106    sample_rate: f32,
107    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state>
108    /// Although servo-media already keeps track of the control thread state,
109    /// we keep a state flag here as well. This is so that we can synchronously
110    /// throw when trying to do things on the context when the context has just
111    /// been "closed()".
112    state: Cell<AudioContextState>,
113    channel_count: u32,
114}
115
116impl BaseAudioContext {
117    #[cfg_attr(crown, allow(crown::unrooted_must_root))]
118    pub(crate) fn new_inherited(
119        options: BaseAudioContextOptions,
120        pipeline_id: PipelineId,
121    ) -> Fallible<BaseAudioContext> {
122        let (sample_rate, channel_count) = match options {
123            BaseAudioContextOptions::AudioContext(ref opt) => (opt.sample_rate, 2),
124            BaseAudioContextOptions::OfflineAudioContext(ref opt) => {
125                (opt.sample_rate, opt.channels)
126            },
127        };
128
129        let client_context_id =
130            ClientContextId::build(pipeline_id.namespace_id.0, pipeline_id.index.0.get());
131        let audio_context_impl = ServoMedia::get()
132            .create_audio_context(&client_context_id, options.convert())
133            .map_err(|_| Error::NotSupported)?;
134
135        Ok(BaseAudioContext {
136            eventtarget: EventTarget::new_inherited(),
137            audio_context_impl,
138            destination: Default::default(),
139            listener: Default::default(),
140            in_flight_resume_promises_queue: Default::default(),
141            pending_resume_promises: Default::default(),
142            decode_resolvers: Default::default(),
143            sample_rate,
144            state: Cell::new(AudioContextState::Suspended),
145            channel_count: channel_count.into(),
146        })
147    }
148
149    /// Tells whether this is an OfflineAudioContext or not.
150    pub(crate) fn is_offline(&self) -> bool {
151        false
152    }
153
154    pub(crate) fn audio_context_impl(&self) -> Arc<Mutex<AudioContext>> {
155        self.audio_context_impl.clone()
156    }
157
158    pub(crate) fn destination_node(&self) -> NodeId {
159        self.audio_context_impl.lock().unwrap().dest_node()
160    }
161
162    pub(crate) fn listener(&self) -> NodeId {
163        self.audio_context_impl.lock().unwrap().listener()
164    }
165
166    // https://webaudio.github.io/web-audio-api/#allowed-to-start
167    pub(crate) fn is_allowed_to_start(&self) -> bool {
168        self.state.get() == AudioContextState::Suspended
169    }
170
171    fn push_pending_resume_promise(&self, promise: &Rc<Promise>) {
172        self.pending_resume_promises
173            .borrow_mut()
174            .push(promise.clone());
175    }
176
177    /// Takes the pending resume promises.
178    ///
179    /// The result with which these promises will be fulfilled is passed here
180    /// and this method returns nothing because we actually just move the
181    /// current list of pending resume promises to the
182    /// `in_flight_resume_promises_queue` field.
183    ///
184    /// Each call to this method must be followed by a call to
185    /// `fulfill_in_flight_resume_promises`, to actually fulfill the promises
186    /// which were taken and moved to the in-flight queue.
187    fn take_pending_resume_promises(&self, result: ErrorResult) {
188        let pending_resume_promises =
189            std::mem::take(&mut *self.pending_resume_promises.borrow_mut());
190        self.in_flight_resume_promises_queue
191            .borrow_mut()
192            .push_back((pending_resume_promises.into(), result));
193    }
194
195    /// Fulfills the next in-flight resume promises queue after running a closure.
196    ///
197    /// See the comment on `take_pending_resume_promises` for why this method
198    /// does not take a list of promises to fulfill. Callers cannot just pop
199    /// the front list off of `in_flight_resume_promises_queue` and later fulfill
200    /// the promises because that would mean putting
201    /// `#[cfg_attr(crown, allow(crown::unrooted_must_root))]` on even more functions, potentially
202    /// hiding actual safety bugs.
203    #[cfg_attr(crown, allow(crown::unrooted_must_root))]
204    fn fulfill_in_flight_resume_promises<F>(&self, f: F)
205    where
206        F: FnOnce(),
207    {
208        let (promises, result) = self
209            .in_flight_resume_promises_queue
210            .borrow_mut()
211            .pop_front()
212            .expect("there should be at least one list of in flight resume promises");
213        f();
214        for promise in &*promises {
215            match result {
216                Ok(ref value) => promise.resolve_native(value, CanGc::note()),
217                Err(ref error) => promise.reject_error(error.clone(), CanGc::note()),
218            }
219        }
220    }
221
222    /// Control thread processing state
223    pub(crate) fn control_thread_state(&self) -> ProcessingState {
224        self.audio_context_impl.lock().unwrap().state()
225    }
226
227    /// Set audio context state
228    pub(crate) fn set_state_attribute(&self, state: AudioContextState) {
229        self.state.set(state);
230    }
231
232    pub(crate) fn resume(&self) {
233        let this = Trusted::new(self);
234        // Set the rendering thread state to 'running' and start
235        // rendering the audio graph.
236        match self.audio_context_impl.lock().unwrap().resume() {
237            Ok(()) => {
238                self.take_pending_resume_promises(Ok(()));
239                self.global().task_manager().dom_manipulation_task_source().queue(
240                    task!(resume_success: move || {
241                        let this = this.root();
242                        this.fulfill_in_flight_resume_promises(|| {
243                            if this.state.get() != AudioContextState::Running {
244                                this.state.set(AudioContextState::Running);
245                                this.global().task_manager().dom_manipulation_task_source().queue_simple_event(
246                                    this.upcast(),
247                                    atom!("statechange"),
248                                    );
249                            }
250                        });
251                    })
252                );
253            },
254            Err(()) => {
255                self.take_pending_resume_promises(Err(Error::Type(
256                    "Something went wrong".to_owned(),
257                )));
258                self.global()
259                    .task_manager()
260                    .dom_manipulation_task_source()
261                    .queue(task!(resume_error: move || {
262                        this.root().fulfill_in_flight_resume_promises(|| {})
263                    }));
264            },
265        }
266    }
267
268    pub(crate) fn channel_count(&self) -> u32 {
269        self.channel_count
270    }
271}
272
273impl BaseAudioContextMethods<crate::DomTypeHolder> for BaseAudioContext {
274    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate>
275    fn SampleRate(&self) -> Finite<f32> {
276        Finite::wrap(self.sample_rate)
277    }
278
279    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-currenttime>
280    fn CurrentTime(&self) -> Finite<f64> {
281        let current_time = self.audio_context_impl.lock().unwrap().current_time();
282        Finite::wrap(current_time)
283    }
284
285    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state>
286    fn State(&self) -> AudioContextState {
287        self.state.get()
288    }
289
290    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-resume>
291    fn Resume(&self, comp: InRealm, can_gc: CanGc) -> Rc<Promise> {
292        // Step 1.
293        let promise = Promise::new_in_current_realm(comp, can_gc);
294
295        // Step 2.
296        if self.audio_context_impl.lock().unwrap().state() == ProcessingState::Closed {
297            promise.reject_error(Error::InvalidState(None), can_gc);
298            return promise;
299        }
300
301        // Step 3.
302        if self.state.get() == AudioContextState::Running {
303            promise.resolve_native(&(), can_gc);
304            return promise;
305        }
306
307        self.push_pending_resume_promise(&promise);
308
309        // Step 4.
310        if !self.is_allowed_to_start() {
311            return promise;
312        }
313
314        // Steps 5 and 6.
315        self.resume();
316
317        // Step 7.
318        promise
319    }
320
321    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination>
322    fn Destination(&self, can_gc: CanGc) -> DomRoot<AudioDestinationNode> {
323        let global = self.global();
324        self.destination.or_init(|| {
325            let mut options = AudioNodeOptions::empty();
326            options.channelCount = Some(self.channel_count);
327            options.channelCountMode = Some(ChannelCountMode::Explicit);
328            options.channelInterpretation = Some(ChannelInterpretation::Speakers);
329            AudioDestinationNode::new(&global, self, &options, can_gc)
330        })
331    }
332
333    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-listener>
334    fn Listener(&self, can_gc: CanGc) -> DomRoot<AudioListener> {
335        let global = self.global();
336        let window = global.as_window();
337        self.listener
338            .or_init(|| AudioListener::new(window, self, can_gc))
339    }
340
341    // https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-onstatechange
342    event_handler!(statechange, GetOnstatechange, SetOnstatechange);
343
344    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createoscillator>
345    fn CreateOscillator(&self, can_gc: CanGc) -> Fallible<DomRoot<OscillatorNode>> {
346        OscillatorNode::new(
347            self.global().as_window(),
348            self,
349            &OscillatorOptions::empty(),
350            can_gc,
351        )
352    }
353
354    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-creategain>
355    fn CreateGain(&self, can_gc: CanGc) -> Fallible<DomRoot<GainNode>> {
356        GainNode::new(
357            self.global().as_window(),
358            self,
359            &GainOptions::empty(),
360            can_gc,
361        )
362    }
363
364    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createpanner>
365    fn CreatePanner(&self, can_gc: CanGc) -> Fallible<DomRoot<PannerNode>> {
366        PannerNode::new(
367            self.global().as_window(),
368            self,
369            &PannerOptions::empty(),
370            can_gc,
371        )
372    }
373
374    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createanalyser>
375    fn CreateAnalyser(&self, can_gc: CanGc) -> Fallible<DomRoot<AnalyserNode>> {
376        AnalyserNode::new(
377            self.global().as_window(),
378            self,
379            &AnalyserOptions::empty(),
380            can_gc,
381        )
382    }
383
384    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbiquadfilter>
385    fn CreateBiquadFilter(&self, can_gc: CanGc) -> Fallible<DomRoot<BiquadFilterNode>> {
386        BiquadFilterNode::new(
387            self.global().as_window(),
388            self,
389            &BiquadFilterOptions::empty(),
390            can_gc,
391        )
392    }
393
394    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createstereopanner>
395    fn CreateStereoPanner(&self, can_gc: CanGc) -> Fallible<DomRoot<StereoPannerNode>> {
396        StereoPannerNode::new(
397            self.global().as_window(),
398            self,
399            &StereoPannerOptions::empty(),
400            can_gc,
401        )
402    }
403
404    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createconstantsource>
405    fn CreateConstantSource(&self, can_gc: CanGc) -> Fallible<DomRoot<ConstantSourceNode>> {
406        ConstantSourceNode::new(
407            self.global().as_window(),
408            self,
409            &ConstantSourceOptions::empty(),
410            can_gc,
411        )
412    }
413
414    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createchannelmerger>
415    fn CreateChannelMerger(
416        &self,
417        count: u32,
418        can_gc: CanGc,
419    ) -> Fallible<DomRoot<ChannelMergerNode>> {
420        let mut opts = ChannelMergerOptions::empty();
421        opts.numberOfInputs = count;
422        ChannelMergerNode::new(self.global().as_window(), self, &opts, can_gc)
423    }
424
425    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createchannelsplitter>
426    fn CreateChannelSplitter(
427        &self,
428        count: u32,
429        can_gc: CanGc,
430    ) -> Fallible<DomRoot<ChannelSplitterNode>> {
431        let mut opts = ChannelSplitterOptions::empty();
432        opts.numberOfOutputs = count;
433        ChannelSplitterNode::new(self.global().as_window(), self, &opts, can_gc)
434    }
435
436    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffer>
437    fn CreateBuffer(
438        &self,
439        number_of_channels: u32,
440        length: u32,
441        sample_rate: Finite<f32>,
442        can_gc: CanGc,
443    ) -> Fallible<DomRoot<AudioBuffer>> {
444        if number_of_channels == 0 ||
445            number_of_channels > MAX_CHANNEL_COUNT ||
446            length == 0 ||
447            *sample_rate <= 0.
448        {
449            return Err(Error::NotSupported);
450        }
451        Ok(AudioBuffer::new(
452            self.global().as_window(),
453            number_of_channels,
454            length,
455            *sample_rate,
456            None,
457            can_gc,
458        ))
459    }
460
461    // https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffersource
462    fn CreateBufferSource(&self, can_gc: CanGc) -> Fallible<DomRoot<AudioBufferSourceNode>> {
463        AudioBufferSourceNode::new(
464            self.global().as_window(),
465            self,
466            &AudioBufferSourceOptions::empty(),
467            can_gc,
468        )
469    }
470
471    // https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-decodeaudiodata
472    fn DecodeAudioData(
473        &self,
474        audio_data: CustomAutoRooterGuard<ArrayBuffer>,
475        decode_success_callback: Option<Rc<DecodeSuccessCallback>>,
476        decode_error_callback: Option<Rc<DecodeErrorCallback>>,
477        comp: InRealm,
478        can_gc: CanGc,
479    ) -> Rc<Promise> {
480        // Step 1.
481        let promise = Promise::new_in_current_realm(comp, can_gc);
482
483        if audio_data.len() > 0 {
484            // Step 2.
485            // XXX detach array buffer.
486            let uuid = Uuid::new_v4().simple().to_string();
487            let uuid_ = uuid.clone();
488            self.decode_resolvers.borrow_mut().insert(
489                uuid.clone(),
490                DecodeResolver {
491                    promise: promise.clone(),
492                    success_callback: decode_success_callback,
493                    error_callback: decode_error_callback,
494                },
495            );
496            let audio_data = audio_data.to_vec();
497            let decoded_audio = Arc::new(Mutex::new(Vec::new()));
498            let decoded_audio_ = decoded_audio.clone();
499            let decoded_audio__ = decoded_audio.clone();
500            // servo-media returns an audio channel position along
501            // with the AudioDecoderCallback progress callback, which
502            // may not be the same as the index of the decoded_audio
503            // Vec.
504            let channels = Arc::new(Mutex::new(HashMap::new()));
505            let this = Trusted::new(self);
506            let this_ = this.clone();
507            let task_source = self
508                .global()
509                .task_manager()
510                .dom_manipulation_task_source()
511                .to_sendable();
512            let task_source_clone = task_source.clone();
513            let callbacks = AudioDecoderCallbacks::new()
514                .ready(move |channel_count| {
515                    decoded_audio
516                        .lock()
517                        .unwrap()
518                        .resize(channel_count as usize, Vec::new());
519                })
520                .progress(move |buffer, channel_pos_mask| {
521                    let mut decoded_audio = decoded_audio_.lock().unwrap();
522                    let mut channels = channels.lock().unwrap();
523                    let channel = match channels.entry(channel_pos_mask) {
524                        Entry::Occupied(entry) => *entry.get(),
525                        Entry::Vacant(entry) => {
526                            let x = (channel_pos_mask as f32).log2() as usize;
527                            *entry.insert(x)
528                        },
529                    };
530                    decoded_audio[channel].extend_from_slice((*buffer).as_ref());
531                })
532                .eos(move || {
533                    task_source.queue(task!(audio_decode_eos: move || {
534                        let this = this.root();
535                        let decoded_audio = decoded_audio__.lock().unwrap();
536                        let length = if !decoded_audio.is_empty() {
537                            decoded_audio[0].len()
538                        } else {
539                            0
540                        };
541                        let buffer = AudioBuffer::new(
542                            this.global().as_window(),
543                            decoded_audio.len() as u32 /* number of channels */,
544                            length as u32,
545                            this.sample_rate,
546                            Some(decoded_audio.as_slice()),
547                            CanGc::note());
548                        let mut resolvers = this.decode_resolvers.borrow_mut();
549                        assert!(resolvers.contains_key(&uuid_));
550                        let resolver = resolvers.remove(&uuid_).unwrap();
551                        if let Some(callback) = resolver.success_callback {
552                            let _ = callback.Call__(&buffer, ExceptionHandling::Report, CanGc::note());
553                        }
554                        resolver.promise.resolve_native(&buffer, CanGc::note());
555                    }));
556                })
557                .error(move |error| {
558                    task_source_clone.queue(task!(audio_decode_eos: move || {
559                        let this = this_.root();
560                        let mut resolvers = this.decode_resolvers.borrow_mut();
561                        assert!(resolvers.contains_key(&uuid));
562                        let resolver = resolvers.remove(&uuid).unwrap();
563                        if let Some(callback) = resolver.error_callback {
564                            let _ = callback.Call__(
565                                &DOMException::new(&this.global(), DOMErrorName::DataCloneError, CanGc::note()),
566                                ExceptionHandling::Report, CanGc::note());
567                        }
568                        let error = format!("Audio decode error {:?}", error);
569                        resolver.promise.reject_error(Error::Type(error), CanGc::note());
570                    }));
571                })
572                .build();
573            self.audio_context_impl
574                .lock()
575                .unwrap()
576                .decode_audio_data(audio_data, callbacks);
577        } else {
578            // Step 3.
579            promise.reject_error(Error::DataClone(None), can_gc);
580            return promise;
581        }
582
583        // Step 4.
584        promise
585    }
586
587    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createiirfilter>
588    fn CreateIIRFilter(
589        &self,
590        feedforward: Vec<Finite<f64>>,
591        feedback: Vec<Finite<f64>>,
592        can_gc: CanGc,
593    ) -> Fallible<DomRoot<IIRFilterNode>> {
594        let opts = IIRFilterOptions {
595            parent: AudioNodeOptions::empty(),
596            feedback,
597            feedforward,
598        };
599        IIRFilterNode::new(self.global().as_window(), self, &opts, can_gc)
600    }
601}
602
603impl Convert<AudioContextOptions> for BaseAudioContextOptions {
604    fn convert(self) -> AudioContextOptions {
605        match self {
606            BaseAudioContextOptions::AudioContext(options) => {
607                AudioContextOptions::RealTimeAudioContext(options)
608            },
609            BaseAudioContextOptions::OfflineAudioContext(options) => {
610                AudioContextOptions::OfflineAudioContext(options)
611            },
612        }
613    }
614}
615
616impl Convert<AudioContextState> for ProcessingState {
617    fn convert(self) -> AudioContextState {
618        match self {
619            ProcessingState::Suspended => AudioContextState::Suspended,
620            ProcessingState::Running => AudioContextState::Running,
621            ProcessingState::Closed => AudioContextState::Closed,
622        }
623    }
624}