script/dom/audio/
baseaudiocontext.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6use std::collections::hash_map::Entry;
7use std::collections::{HashMap, VecDeque};
8use std::rc::Rc;
9use std::sync::{Arc, Mutex};
10
11use base::id::PipelineId;
12use dom_struct::dom_struct;
13use js::rust::CustomAutoRooterGuard;
14use js::typedarray::ArrayBuffer;
15use servo_media::audio::context::{
16    AudioContext, AudioContextOptions, OfflineAudioContextOptions, ProcessingState,
17    RealTimeAudioContextOptions,
18};
19use servo_media::audio::decoder::AudioDecoderCallbacks;
20use servo_media::audio::graph::NodeId;
21use servo_media::{ClientContextId, ServoMedia};
22use uuid::Uuid;
23
24use crate::conversions::Convert;
25use crate::dom::audio::analysernode::AnalyserNode;
26use crate::dom::audio::audiobuffer::AudioBuffer;
27use crate::dom::audio::audiobuffersourcenode::AudioBufferSourceNode;
28use crate::dom::audio::audiodestinationnode::AudioDestinationNode;
29use crate::dom::audio::audiolistener::AudioListener;
30use crate::dom::audio::audionode::MAX_CHANNEL_COUNT;
31use crate::dom::audio::biquadfilternode::BiquadFilterNode;
32use crate::dom::audio::channelmergernode::ChannelMergerNode;
33use crate::dom::audio::channelsplitternode::ChannelSplitterNode;
34use crate::dom::audio::constantsourcenode::ConstantSourceNode;
35use crate::dom::audio::gainnode::GainNode;
36use crate::dom::audio::iirfilternode::IIRFilterNode;
37use crate::dom::audio::oscillatornode::OscillatorNode;
38use crate::dom::audio::pannernode::PannerNode;
39use crate::dom::audio::stereopannernode::StereoPannerNode;
40use crate::dom::bindings::callback::ExceptionHandling;
41use crate::dom::bindings::cell::DomRefCell;
42use crate::dom::bindings::codegen::Bindings::AnalyserNodeBinding::AnalyserOptions;
43use crate::dom::bindings::codegen::Bindings::AudioBufferSourceNodeBinding::AudioBufferSourceOptions;
44use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
45    AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
46};
47use crate::dom::bindings::codegen::Bindings::BaseAudioContextBinding::{
48    AudioContextState, BaseAudioContextMethods, DecodeErrorCallback, DecodeSuccessCallback,
49};
50use crate::dom::bindings::codegen::Bindings::BiquadFilterNodeBinding::BiquadFilterOptions;
51use crate::dom::bindings::codegen::Bindings::ChannelMergerNodeBinding::ChannelMergerOptions;
52use crate::dom::bindings::codegen::Bindings::ChannelSplitterNodeBinding::ChannelSplitterOptions;
53use crate::dom::bindings::codegen::Bindings::ConstantSourceNodeBinding::ConstantSourceOptions;
54use crate::dom::bindings::codegen::Bindings::GainNodeBinding::GainOptions;
55use crate::dom::bindings::codegen::Bindings::IIRFilterNodeBinding::IIRFilterOptions;
56use crate::dom::bindings::codegen::Bindings::OscillatorNodeBinding::OscillatorOptions;
57use crate::dom::bindings::codegen::Bindings::PannerNodeBinding::PannerOptions;
58use crate::dom::bindings::codegen::Bindings::StereoPannerNodeBinding::StereoPannerOptions;
59use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
60use crate::dom::bindings::inheritance::Castable;
61use crate::dom::bindings::num::Finite;
62use crate::dom::bindings::refcounted::Trusted;
63use crate::dom::bindings::reflector::DomGlobal;
64use crate::dom::bindings::root::{DomRoot, MutNullableDom};
65use crate::dom::domexception::{DOMErrorName, DOMException};
66use crate::dom::eventtarget::EventTarget;
67use crate::dom::promise::Promise;
68use crate::realms::InRealm;
69use crate::script_runtime::CanGc;
70
71pub(crate) enum BaseAudioContextOptions {
72    AudioContext(RealTimeAudioContextOptions),
73    OfflineAudioContext(OfflineAudioContextOptions),
74}
75
76#[derive(JSTraceable, MallocSizeOf)]
77struct DecodeResolver {
78    #[conditional_malloc_size_of]
79    pub(crate) promise: Rc<Promise>,
80    #[conditional_malloc_size_of]
81    pub(crate) success_callback: Option<Rc<DecodeSuccessCallback>>,
82    #[conditional_malloc_size_of]
83    pub(crate) error_callback: Option<Rc<DecodeErrorCallback>>,
84}
85
86type BoxedSliceOfPromises = Box<[Rc<Promise>]>;
87
88#[dom_struct]
89pub(crate) struct BaseAudioContext {
90    eventtarget: EventTarget,
91    #[ignore_malloc_size_of = "servo_media"]
92    #[no_trace]
93    audio_context_impl: Arc<Mutex<AudioContext>>,
94    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination>
95    destination: MutNullableDom<AudioDestinationNode>,
96    listener: MutNullableDom<AudioListener>,
97    /// Resume promises which are soon to be fulfilled by a queued task.
98    #[conditional_malloc_size_of]
99    in_flight_resume_promises_queue: DomRefCell<VecDeque<(BoxedSliceOfPromises, ErrorResult)>>,
100    /// <https://webaudio.github.io/web-audio-api/#pendingresumepromises>
101    #[conditional_malloc_size_of]
102    pending_resume_promises: DomRefCell<Vec<Rc<Promise>>>,
103    decode_resolvers: DomRefCell<HashMap<String, DecodeResolver>>,
104    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate>
105    sample_rate: f32,
106    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state>
107    /// Although servo-media already keeps track of the control thread state,
108    /// we keep a state flag here as well. This is so that we can synchronously
109    /// throw when trying to do things on the context when the context has just
110    /// been "closed()".
111    state: Cell<AudioContextState>,
112    channel_count: u32,
113}
114
115impl BaseAudioContext {
116    #[cfg_attr(crown, allow(crown::unrooted_must_root))]
117    pub(crate) fn new_inherited(
118        options: BaseAudioContextOptions,
119        pipeline_id: PipelineId,
120    ) -> Fallible<BaseAudioContext> {
121        let (sample_rate, channel_count) = match options {
122            BaseAudioContextOptions::AudioContext(ref opt) => (opt.sample_rate, 2),
123            BaseAudioContextOptions::OfflineAudioContext(ref opt) => {
124                (opt.sample_rate, opt.channels)
125            },
126        };
127
128        let client_context_id =
129            ClientContextId::build(pipeline_id.namespace_id.0, pipeline_id.index.0.get());
130        let audio_context_impl = ServoMedia::get()
131            .create_audio_context(&client_context_id, options.convert())
132            .map_err(|_| Error::NotSupported)?;
133
134        Ok(BaseAudioContext {
135            eventtarget: EventTarget::new_inherited(),
136            audio_context_impl,
137            destination: Default::default(),
138            listener: Default::default(),
139            in_flight_resume_promises_queue: Default::default(),
140            pending_resume_promises: Default::default(),
141            decode_resolvers: Default::default(),
142            sample_rate,
143            state: Cell::new(AudioContextState::Suspended),
144            channel_count: channel_count.into(),
145        })
146    }
147
148    /// Tells whether this is an OfflineAudioContext or not.
149    pub(crate) fn is_offline(&self) -> bool {
150        false
151    }
152
153    pub(crate) fn audio_context_impl(&self) -> Arc<Mutex<AudioContext>> {
154        self.audio_context_impl.clone()
155    }
156
157    pub(crate) fn destination_node(&self) -> NodeId {
158        self.audio_context_impl.lock().unwrap().dest_node()
159    }
160
161    pub(crate) fn listener(&self) -> NodeId {
162        self.audio_context_impl.lock().unwrap().listener()
163    }
164
165    // https://webaudio.github.io/web-audio-api/#allowed-to-start
166    pub(crate) fn is_allowed_to_start(&self) -> bool {
167        self.state.get() == AudioContextState::Suspended
168    }
169
170    fn push_pending_resume_promise(&self, promise: &Rc<Promise>) {
171        self.pending_resume_promises
172            .borrow_mut()
173            .push(promise.clone());
174    }
175
176    /// Takes the pending resume promises.
177    ///
178    /// The result with which these promises will be fulfilled is passed here
179    /// and this method returns nothing because we actually just move the
180    /// current list of pending resume promises to the
181    /// `in_flight_resume_promises_queue` field.
182    ///
183    /// Each call to this method must be followed by a call to
184    /// `fulfill_in_flight_resume_promises`, to actually fulfill the promises
185    /// which were taken and moved to the in-flight queue.
186    fn take_pending_resume_promises(&self, result: ErrorResult) {
187        let pending_resume_promises =
188            std::mem::take(&mut *self.pending_resume_promises.borrow_mut());
189        self.in_flight_resume_promises_queue
190            .borrow_mut()
191            .push_back((pending_resume_promises.into(), result));
192    }
193
194    /// Fulfills the next in-flight resume promises queue after running a closure.
195    ///
196    /// See the comment on `take_pending_resume_promises` for why this method
197    /// does not take a list of promises to fulfill. Callers cannot just pop
198    /// the front list off of `in_flight_resume_promises_queue` and later fulfill
199    /// the promises because that would mean putting
200    /// `#[cfg_attr(crown, allow(crown::unrooted_must_root))]` on even more functions, potentially
201    /// hiding actual safety bugs.
202    #[cfg_attr(crown, allow(crown::unrooted_must_root))]
203    fn fulfill_in_flight_resume_promises<F>(&self, f: F)
204    where
205        F: FnOnce(),
206    {
207        let (promises, result) = self
208            .in_flight_resume_promises_queue
209            .borrow_mut()
210            .pop_front()
211            .expect("there should be at least one list of in flight resume promises");
212        f();
213        for promise in &*promises {
214            match result {
215                Ok(ref value) => promise.resolve_native(value, CanGc::note()),
216                Err(ref error) => promise.reject_error(error.clone(), CanGc::note()),
217            }
218        }
219    }
220
221    /// Control thread processing state
222    pub(crate) fn control_thread_state(&self) -> ProcessingState {
223        self.audio_context_impl.lock().unwrap().state()
224    }
225
226    /// Set audio context state
227    pub(crate) fn set_state_attribute(&self, state: AudioContextState) {
228        self.state.set(state);
229    }
230
231    pub(crate) fn resume(&self) {
232        let this = Trusted::new(self);
233        // Set the rendering thread state to 'running' and start
234        // rendering the audio graph.
235        match self.audio_context_impl.lock().unwrap().resume() {
236            Ok(()) => {
237                self.take_pending_resume_promises(Ok(()));
238                self.global().task_manager().dom_manipulation_task_source().queue(
239                    task!(resume_success: move || {
240                        let this = this.root();
241                        this.fulfill_in_flight_resume_promises(|| {
242                            if this.state.get() != AudioContextState::Running {
243                                this.state.set(AudioContextState::Running);
244                                this.global().task_manager().dom_manipulation_task_source().queue_simple_event(
245                                    this.upcast(),
246                                    atom!("statechange"),
247                                    );
248                            }
249                        });
250                    })
251                );
252            },
253            Err(()) => {
254                self.take_pending_resume_promises(Err(Error::Type(
255                    "Something went wrong".to_owned(),
256                )));
257                self.global()
258                    .task_manager()
259                    .dom_manipulation_task_source()
260                    .queue(task!(resume_error: move || {
261                        this.root().fulfill_in_flight_resume_promises(|| {})
262                    }));
263            },
264        }
265    }
266
267    pub(crate) fn channel_count(&self) -> u32 {
268        self.channel_count
269    }
270}
271
272impl BaseAudioContextMethods<crate::DomTypeHolder> for BaseAudioContext {
273    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate>
274    fn SampleRate(&self) -> Finite<f32> {
275        Finite::wrap(self.sample_rate)
276    }
277
278    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-currenttime>
279    fn CurrentTime(&self) -> Finite<f64> {
280        let current_time = self.audio_context_impl.lock().unwrap().current_time();
281        Finite::wrap(current_time)
282    }
283
284    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state>
285    fn State(&self) -> AudioContextState {
286        self.state.get()
287    }
288
289    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-resume>
290    fn Resume(&self, comp: InRealm, can_gc: CanGc) -> Rc<Promise> {
291        // Step 1.
292        let promise = Promise::new_in_current_realm(comp, can_gc);
293
294        // Step 2.
295        if self.audio_context_impl.lock().unwrap().state() == ProcessingState::Closed {
296            promise.reject_error(Error::InvalidState(None), can_gc);
297            return promise;
298        }
299
300        // Step 3.
301        if self.state.get() == AudioContextState::Running {
302            promise.resolve_native(&(), can_gc);
303            return promise;
304        }
305
306        self.push_pending_resume_promise(&promise);
307
308        // Step 4.
309        if !self.is_allowed_to_start() {
310            return promise;
311        }
312
313        // Steps 5 and 6.
314        self.resume();
315
316        // Step 7.
317        promise
318    }
319
320    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination>
321    fn Destination(&self, can_gc: CanGc) -> DomRoot<AudioDestinationNode> {
322        let global = self.global();
323        self.destination.or_init(|| {
324            let mut options = AudioNodeOptions::empty();
325            options.channelCount = Some(self.channel_count);
326            options.channelCountMode = Some(ChannelCountMode::Explicit);
327            options.channelInterpretation = Some(ChannelInterpretation::Speakers);
328            AudioDestinationNode::new(&global, self, &options, can_gc)
329        })
330    }
331
332    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-listener>
333    fn Listener(&self, can_gc: CanGc) -> DomRoot<AudioListener> {
334        let global = self.global();
335        let window = global.as_window();
336        self.listener
337            .or_init(|| AudioListener::new(window, self, can_gc))
338    }
339
340    // https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-onstatechange
341    event_handler!(statechange, GetOnstatechange, SetOnstatechange);
342
343    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createoscillator>
344    fn CreateOscillator(&self, can_gc: CanGc) -> Fallible<DomRoot<OscillatorNode>> {
345        OscillatorNode::new(
346            self.global().as_window(),
347            self,
348            &OscillatorOptions::empty(),
349            can_gc,
350        )
351    }
352
353    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-creategain>
354    fn CreateGain(&self, can_gc: CanGc) -> Fallible<DomRoot<GainNode>> {
355        GainNode::new(
356            self.global().as_window(),
357            self,
358            &GainOptions::empty(),
359            can_gc,
360        )
361    }
362
363    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createpanner>
364    fn CreatePanner(&self, can_gc: CanGc) -> Fallible<DomRoot<PannerNode>> {
365        PannerNode::new(
366            self.global().as_window(),
367            self,
368            &PannerOptions::empty(),
369            can_gc,
370        )
371    }
372
373    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createanalyser>
374    fn CreateAnalyser(&self, can_gc: CanGc) -> Fallible<DomRoot<AnalyserNode>> {
375        AnalyserNode::new(
376            self.global().as_window(),
377            self,
378            &AnalyserOptions::empty(),
379            can_gc,
380        )
381    }
382
383    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbiquadfilter>
384    fn CreateBiquadFilter(&self, can_gc: CanGc) -> Fallible<DomRoot<BiquadFilterNode>> {
385        BiquadFilterNode::new(
386            self.global().as_window(),
387            self,
388            &BiquadFilterOptions::empty(),
389            can_gc,
390        )
391    }
392
393    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createstereopanner>
394    fn CreateStereoPanner(&self, can_gc: CanGc) -> Fallible<DomRoot<StereoPannerNode>> {
395        StereoPannerNode::new(
396            self.global().as_window(),
397            self,
398            &StereoPannerOptions::empty(),
399            can_gc,
400        )
401    }
402
403    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createconstantsource>
404    fn CreateConstantSource(&self, can_gc: CanGc) -> Fallible<DomRoot<ConstantSourceNode>> {
405        ConstantSourceNode::new(
406            self.global().as_window(),
407            self,
408            &ConstantSourceOptions::empty(),
409            can_gc,
410        )
411    }
412
413    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createchannelmerger>
414    fn CreateChannelMerger(
415        &self,
416        count: u32,
417        can_gc: CanGc,
418    ) -> Fallible<DomRoot<ChannelMergerNode>> {
419        let mut opts = ChannelMergerOptions::empty();
420        opts.numberOfInputs = count;
421        ChannelMergerNode::new(self.global().as_window(), self, &opts, can_gc)
422    }
423
424    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createchannelsplitter>
425    fn CreateChannelSplitter(
426        &self,
427        count: u32,
428        can_gc: CanGc,
429    ) -> Fallible<DomRoot<ChannelSplitterNode>> {
430        let mut opts = ChannelSplitterOptions::empty();
431        opts.numberOfOutputs = count;
432        ChannelSplitterNode::new(self.global().as_window(), self, &opts, can_gc)
433    }
434
435    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffer>
436    fn CreateBuffer(
437        &self,
438        number_of_channels: u32,
439        length: u32,
440        sample_rate: Finite<f32>,
441        can_gc: CanGc,
442    ) -> Fallible<DomRoot<AudioBuffer>> {
443        if number_of_channels == 0 ||
444            number_of_channels > MAX_CHANNEL_COUNT ||
445            length == 0 ||
446            *sample_rate <= 0.
447        {
448            return Err(Error::NotSupported);
449        }
450        Ok(AudioBuffer::new(
451            self.global().as_window(),
452            number_of_channels,
453            length,
454            *sample_rate,
455            None,
456            can_gc,
457        ))
458    }
459
460    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffersource>
461    fn CreateBufferSource(&self, can_gc: CanGc) -> Fallible<DomRoot<AudioBufferSourceNode>> {
462        AudioBufferSourceNode::new(
463            self.global().as_window(),
464            self,
465            &AudioBufferSourceOptions::empty(),
466            can_gc,
467        )
468    }
469
470    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-decodeaudiodata>
471    fn DecodeAudioData(
472        &self,
473        audio_data: CustomAutoRooterGuard<ArrayBuffer>,
474        decode_success_callback: Option<Rc<DecodeSuccessCallback>>,
475        decode_error_callback: Option<Rc<DecodeErrorCallback>>,
476        comp: InRealm,
477        can_gc: CanGc,
478    ) -> Rc<Promise> {
479        // Step 1.
480        let promise = Promise::new_in_current_realm(comp, can_gc);
481
482        if audio_data.len() > 0 {
483            // Step 2.
484            // XXX detach array buffer.
485            let uuid = Uuid::new_v4().simple().to_string();
486            let uuid_ = uuid.clone();
487            self.decode_resolvers.borrow_mut().insert(
488                uuid.clone(),
489                DecodeResolver {
490                    promise: promise.clone(),
491                    success_callback: decode_success_callback,
492                    error_callback: decode_error_callback,
493                },
494            );
495            let audio_data = audio_data.to_vec();
496            let decoded_audio = Arc::new(Mutex::new(Vec::new()));
497            let decoded_audio_ = decoded_audio.clone();
498            let decoded_audio__ = decoded_audio.clone();
499            // servo-media returns an audio channel position along
500            // with the AudioDecoderCallback progress callback, which
501            // may not be the same as the index of the decoded_audio
502            // Vec.
503            let channels = Arc::new(Mutex::new(HashMap::new()));
504            let this = Trusted::new(self);
505            let this_ = this.clone();
506            let task_source = self
507                .global()
508                .task_manager()
509                .dom_manipulation_task_source()
510                .to_sendable();
511            let task_source_clone = task_source.clone();
512            let callbacks = AudioDecoderCallbacks::new()
513                .ready(move |channel_count| {
514                    decoded_audio
515                        .lock()
516                        .unwrap()
517                        .resize(channel_count as usize, Vec::new());
518                })
519                .progress(move |buffer, channel_pos_mask| {
520                    let mut decoded_audio = decoded_audio_.lock().unwrap();
521                    let mut channels = channels.lock().unwrap();
522                    let channel = match channels.entry(channel_pos_mask) {
523                        Entry::Occupied(entry) => *entry.get(),
524                        Entry::Vacant(entry) => {
525                            let x = (channel_pos_mask as f32).log2() as usize;
526                            *entry.insert(x)
527                        },
528                    };
529                    decoded_audio[channel].extend_from_slice((*buffer).as_ref());
530                })
531                .eos(move || {
532                    task_source.queue(task!(audio_decode_eos: move || {
533                        let this = this.root();
534                        let decoded_audio = decoded_audio__.lock().unwrap();
535                        let length = if !decoded_audio.is_empty() {
536                            decoded_audio[0].len()
537                        } else {
538                            0
539                        };
540                        let buffer = AudioBuffer::new(
541                            this.global().as_window(),
542                            decoded_audio.len() as u32 /* number of channels */,
543                            length as u32,
544                            this.sample_rate,
545                            Some(decoded_audio.as_slice()),
546                            CanGc::note());
547                        let mut resolvers = this.decode_resolvers.borrow_mut();
548                        assert!(resolvers.contains_key(&uuid_));
549                        let resolver = resolvers.remove(&uuid_).unwrap();
550                        if let Some(callback) = resolver.success_callback {
551                            let _ = callback.Call__(&buffer, ExceptionHandling::Report, CanGc::note());
552                        }
553                        resolver.promise.resolve_native(&buffer, CanGc::note());
554                    }));
555                })
556                .error(move |error| {
557                    task_source_clone.queue(task!(audio_decode_eos: move || {
558                        let this = this_.root();
559                        let mut resolvers = this.decode_resolvers.borrow_mut();
560                        assert!(resolvers.contains_key(&uuid));
561                        let resolver = resolvers.remove(&uuid).unwrap();
562                        if let Some(callback) = resolver.error_callback {
563                            let _ = callback.Call__(
564                                &DOMException::new(&this.global(), DOMErrorName::DataCloneError, CanGc::note()),
565                                ExceptionHandling::Report, CanGc::note());
566                        }
567                        let error = format!("Audio decode error {:?}", error);
568                        resolver.promise.reject_error(Error::Type(error), CanGc::note());
569                    }));
570                })
571                .build();
572            self.audio_context_impl
573                .lock()
574                .unwrap()
575                .decode_audio_data(audio_data, callbacks);
576        } else {
577            // Step 3.
578            promise.reject_error(Error::DataClone(None), can_gc);
579            return promise;
580        }
581
582        // Step 4.
583        promise
584    }
585
586    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createiirfilter>
587    fn CreateIIRFilter(
588        &self,
589        feedforward: Vec<Finite<f64>>,
590        feedback: Vec<Finite<f64>>,
591        can_gc: CanGc,
592    ) -> Fallible<DomRoot<IIRFilterNode>> {
593        let opts = IIRFilterOptions {
594            parent: AudioNodeOptions::empty(),
595            feedback,
596            feedforward,
597        };
598        IIRFilterNode::new(self.global().as_window(), self, &opts, can_gc)
599    }
600}
601
602impl Convert<AudioContextOptions> for BaseAudioContextOptions {
603    fn convert(self) -> AudioContextOptions {
604        match self {
605            BaseAudioContextOptions::AudioContext(options) => {
606                AudioContextOptions::RealTimeAudioContext(options)
607            },
608            BaseAudioContextOptions::OfflineAudioContext(options) => {
609                AudioContextOptions::OfflineAudioContext(options)
610            },
611        }
612    }
613}
614
615impl Convert<AudioContextState> for ProcessingState {
616    fn convert(self) -> AudioContextState {
617        match self {
618            ProcessingState::Suspended => AudioContextState::Suspended,
619            ProcessingState::Running => AudioContextState::Running,
620            ProcessingState::Closed => AudioContextState::Closed,
621        }
622    }
623}