script/dom/audio/
baseaudiocontext.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::cell::Cell;
6use std::collections::hash_map::Entry;
7use std::collections::{HashMap, VecDeque};
8use std::rc::Rc;
9use std::sync::{Arc, Mutex};
10
11use base::id::PipelineId;
12use dom_struct::dom_struct;
13use js::rust::CustomAutoRooterGuard;
14use js::typedarray::ArrayBuffer;
15use servo_media::audio::context::{
16    AudioContext, AudioContextOptions, OfflineAudioContextOptions, ProcessingState,
17    RealTimeAudioContextOptions,
18};
19use servo_media::audio::decoder::AudioDecoderCallbacks;
20use servo_media::audio::graph::NodeId;
21use servo_media::{ClientContextId, ServoMedia};
22use uuid::Uuid;
23
24use crate::conversions::Convert;
25use crate::dom::audio::analysernode::AnalyserNode;
26use crate::dom::audio::audiobuffer::AudioBuffer;
27use crate::dom::audio::audiobuffersourcenode::AudioBufferSourceNode;
28use crate::dom::audio::audiodestinationnode::AudioDestinationNode;
29use crate::dom::audio::audiolistener::AudioListener;
30use crate::dom::audio::audionode::MAX_CHANNEL_COUNT;
31use crate::dom::audio::biquadfilternode::BiquadFilterNode;
32use crate::dom::audio::channelmergernode::ChannelMergerNode;
33use crate::dom::audio::channelsplitternode::ChannelSplitterNode;
34use crate::dom::audio::constantsourcenode::ConstantSourceNode;
35use crate::dom::audio::gainnode::GainNode;
36use crate::dom::audio::iirfilternode::IIRFilterNode;
37use crate::dom::audio::oscillatornode::OscillatorNode;
38use crate::dom::audio::pannernode::PannerNode;
39use crate::dom::audio::stereopannernode::StereoPannerNode;
40use crate::dom::bindings::callback::ExceptionHandling;
41use crate::dom::bindings::cell::DomRefCell;
42use crate::dom::bindings::codegen::Bindings::AnalyserNodeBinding::AnalyserOptions;
43use crate::dom::bindings::codegen::Bindings::AudioBufferSourceNodeBinding::AudioBufferSourceOptions;
44use crate::dom::bindings::codegen::Bindings::AudioNodeBinding::{
45    AudioNodeOptions, ChannelCountMode, ChannelInterpretation,
46};
47use crate::dom::bindings::codegen::Bindings::BaseAudioContextBinding::{
48    AudioContextState, BaseAudioContextMethods, DecodeErrorCallback, DecodeSuccessCallback,
49};
50use crate::dom::bindings::codegen::Bindings::BiquadFilterNodeBinding::BiquadFilterOptions;
51use crate::dom::bindings::codegen::Bindings::ChannelMergerNodeBinding::ChannelMergerOptions;
52use crate::dom::bindings::codegen::Bindings::ChannelSplitterNodeBinding::ChannelSplitterOptions;
53use crate::dom::bindings::codegen::Bindings::ConstantSourceNodeBinding::ConstantSourceOptions;
54use crate::dom::bindings::codegen::Bindings::GainNodeBinding::GainOptions;
55use crate::dom::bindings::codegen::Bindings::IIRFilterNodeBinding::IIRFilterOptions;
56use crate::dom::bindings::codegen::Bindings::OscillatorNodeBinding::OscillatorOptions;
57use crate::dom::bindings::codegen::Bindings::PannerNodeBinding::PannerOptions;
58use crate::dom::bindings::codegen::Bindings::StereoPannerNodeBinding::StereoPannerOptions;
59use crate::dom::bindings::error::{Error, ErrorResult, Fallible};
60use crate::dom::bindings::inheritance::Castable;
61use crate::dom::bindings::num::Finite;
62use crate::dom::bindings::refcounted::Trusted;
63use crate::dom::bindings::reflector::DomGlobal;
64use crate::dom::bindings::root::{DomRoot, MutNullableDom};
65use crate::dom::domexception::{DOMErrorName, DOMException};
66use crate::dom::eventtarget::EventTarget;
67use crate::dom::promise::Promise;
68use crate::realms::InRealm;
69use crate::script_runtime::CanGc;
70
71pub(crate) enum BaseAudioContextOptions {
72    AudioContext(RealTimeAudioContextOptions),
73    OfflineAudioContext(OfflineAudioContextOptions),
74}
75
76#[derive(JSTraceable, MallocSizeOf)]
77struct DecodeResolver {
78    #[conditional_malloc_size_of]
79    pub(crate) promise: Rc<Promise>,
80    #[conditional_malloc_size_of]
81    pub(crate) success_callback: Option<Rc<DecodeSuccessCallback>>,
82    #[conditional_malloc_size_of]
83    pub(crate) error_callback: Option<Rc<DecodeErrorCallback>>,
84}
85
86type BoxedSliceOfPromises = Box<[Rc<Promise>]>;
87
88#[dom_struct]
89pub(crate) struct BaseAudioContext {
90    eventtarget: EventTarget,
91    #[ignore_malloc_size_of = "servo_media"]
92    #[no_trace]
93    audio_context_impl: Arc<Mutex<AudioContext>>,
94    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination>
95    destination: MutNullableDom<AudioDestinationNode>,
96    listener: MutNullableDom<AudioListener>,
97    /// Resume promises which are soon to be fulfilled by a queued task.
98    #[conditional_malloc_size_of]
99    in_flight_resume_promises_queue: DomRefCell<VecDeque<(BoxedSliceOfPromises, ErrorResult)>>,
100    /// <https://webaudio.github.io/web-audio-api/#pendingresumepromises>
101    #[conditional_malloc_size_of]
102    pending_resume_promises: DomRefCell<Vec<Rc<Promise>>>,
103    decode_resolvers: DomRefCell<HashMap<String, DecodeResolver>>,
104    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate>
105    sample_rate: f32,
106    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state>
107    /// Although servo-media already keeps track of the control thread state,
108    /// we keep a state flag here as well. This is so that we can synchronously
109    /// throw when trying to do things on the context when the context has just
110    /// been "closed()".
111    state: Cell<AudioContextState>,
112    channel_count: u32,
113}
114
115impl BaseAudioContext {
116    pub(crate) fn new_inherited(
117        options: BaseAudioContextOptions,
118        pipeline_id: PipelineId,
119    ) -> Fallible<BaseAudioContext> {
120        let (sample_rate, channel_count) = match options {
121            BaseAudioContextOptions::AudioContext(ref opt) => (opt.sample_rate, 2),
122            BaseAudioContextOptions::OfflineAudioContext(ref opt) => {
123                (opt.sample_rate, opt.channels)
124            },
125        };
126
127        let client_context_id =
128            ClientContextId::build(pipeline_id.namespace_id.0, pipeline_id.index.0.get());
129        let audio_context_impl = ServoMedia::get()
130            .create_audio_context(&client_context_id, options.convert())
131            .map_err(|_| Error::NotSupported(None))?;
132
133        Ok(BaseAudioContext {
134            eventtarget: EventTarget::new_inherited(),
135            audio_context_impl,
136            destination: Default::default(),
137            listener: Default::default(),
138            in_flight_resume_promises_queue: Default::default(),
139            pending_resume_promises: Default::default(),
140            decode_resolvers: Default::default(),
141            sample_rate,
142            state: Cell::new(AudioContextState::Suspended),
143            channel_count: channel_count.into(),
144        })
145    }
146
147    /// Tells whether this is an OfflineAudioContext or not.
148    pub(crate) fn is_offline(&self) -> bool {
149        false
150    }
151
152    pub(crate) fn audio_context_impl(&self) -> Arc<Mutex<AudioContext>> {
153        self.audio_context_impl.clone()
154    }
155
156    pub(crate) fn destination_node(&self) -> NodeId {
157        self.audio_context_impl.lock().unwrap().dest_node()
158    }
159
160    pub(crate) fn listener(&self) -> NodeId {
161        self.audio_context_impl.lock().unwrap().listener()
162    }
163
164    // https://webaudio.github.io/web-audio-api/#allowed-to-start
165    pub(crate) fn is_allowed_to_start(&self) -> bool {
166        self.state.get() == AudioContextState::Suspended
167    }
168
169    fn push_pending_resume_promise(&self, promise: &Rc<Promise>) {
170        self.pending_resume_promises
171            .borrow_mut()
172            .push(promise.clone());
173    }
174
175    /// Takes the pending resume promises.
176    ///
177    /// The result with which these promises will be fulfilled is passed here
178    /// and this method returns nothing because we actually just move the
179    /// current list of pending resume promises to the
180    /// `in_flight_resume_promises_queue` field.
181    ///
182    /// Each call to this method must be followed by a call to
183    /// `fulfill_in_flight_resume_promises`, to actually fulfill the promises
184    /// which were taken and moved to the in-flight queue.
185    fn take_pending_resume_promises(&self, result: ErrorResult) {
186        let pending_resume_promises =
187            std::mem::take(&mut *self.pending_resume_promises.borrow_mut());
188        self.in_flight_resume_promises_queue
189            .borrow_mut()
190            .push_back((pending_resume_promises.into(), result));
191    }
192
193    /// Fulfills the next in-flight resume promises queue after running a closure.
194    ///
195    /// See the comment on `take_pending_resume_promises` for why this method
196    /// does not take a list of promises to fulfill. Callers cannot just pop
197    /// the front list off of `in_flight_resume_promises_queue` and later fulfill
198    /// the promises because that would mean putting
199    /// `#[cfg_attr(crown, expect(crown::unrooted_must_root))]` on even more functions, potentially
200    /// hiding actual safety bugs.
201    fn fulfill_in_flight_resume_promises<F>(&self, f: F)
202    where
203        F: FnOnce(),
204    {
205        let (promises, result) = self
206            .in_flight_resume_promises_queue
207            .borrow_mut()
208            .pop_front()
209            .expect("there should be at least one list of in flight resume promises");
210        f();
211        for promise in &*promises {
212            match result {
213                Ok(ref value) => promise.resolve_native(value, CanGc::note()),
214                Err(ref error) => promise.reject_error(error.clone(), CanGc::note()),
215            }
216        }
217    }
218
219    /// Control thread processing state
220    pub(crate) fn control_thread_state(&self) -> ProcessingState {
221        self.audio_context_impl.lock().unwrap().state()
222    }
223
224    /// Set audio context state
225    pub(crate) fn set_state_attribute(&self, state: AudioContextState) {
226        self.state.set(state);
227    }
228
229    pub(crate) fn resume(&self) {
230        let this = Trusted::new(self);
231        // Set the rendering thread state to 'running' and start
232        // rendering the audio graph.
233        match self.audio_context_impl.lock().unwrap().resume() {
234            Ok(()) => {
235                self.take_pending_resume_promises(Ok(()));
236                self.global().task_manager().dom_manipulation_task_source().queue(
237                    task!(resume_success: move || {
238                        let this = this.root();
239                        this.fulfill_in_flight_resume_promises(|| {
240                            if this.state.get() != AudioContextState::Running {
241                                this.state.set(AudioContextState::Running);
242                                this.global().task_manager().dom_manipulation_task_source().queue_simple_event(
243                                    this.upcast(),
244                                    atom!("statechange"),
245                                    );
246                            }
247                        });
248                    })
249                );
250            },
251            Err(()) => {
252                self.take_pending_resume_promises(Err(Error::Type(
253                    "Something went wrong".to_owned(),
254                )));
255                self.global()
256                    .task_manager()
257                    .dom_manipulation_task_source()
258                    .queue(task!(resume_error: move || {
259                        this.root().fulfill_in_flight_resume_promises(|| {})
260                    }));
261            },
262        }
263    }
264
265    pub(crate) fn channel_count(&self) -> u32 {
266        self.channel_count
267    }
268}
269
270impl BaseAudioContextMethods<crate::DomTypeHolder> for BaseAudioContext {
271    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-samplerate>
272    fn SampleRate(&self) -> Finite<f32> {
273        Finite::wrap(self.sample_rate)
274    }
275
276    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-currenttime>
277    fn CurrentTime(&self) -> Finite<f64> {
278        let current_time = self.audio_context_impl.lock().unwrap().current_time();
279        Finite::wrap(current_time)
280    }
281
282    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-state>
283    fn State(&self) -> AudioContextState {
284        self.state.get()
285    }
286
287    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-resume>
288    fn Resume(&self, comp: InRealm, can_gc: CanGc) -> Rc<Promise> {
289        // Step 1.
290        let promise = Promise::new_in_current_realm(comp, can_gc);
291
292        // Step 2.
293        if self.audio_context_impl.lock().unwrap().state() == ProcessingState::Closed {
294            promise.reject_error(Error::InvalidState(None), can_gc);
295            return promise;
296        }
297
298        // Step 3.
299        if self.state.get() == AudioContextState::Running {
300            promise.resolve_native(&(), can_gc);
301            return promise;
302        }
303
304        self.push_pending_resume_promise(&promise);
305
306        // Step 4.
307        if !self.is_allowed_to_start() {
308            return promise;
309        }
310
311        // Steps 5 and 6.
312        self.resume();
313
314        // Step 7.
315        promise
316    }
317
318    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-destination>
319    fn Destination(&self, can_gc: CanGc) -> DomRoot<AudioDestinationNode> {
320        let global = self.global();
321        self.destination.or_init(|| {
322            let mut options = AudioNodeOptions::empty();
323            options.channelCount = Some(self.channel_count);
324            options.channelCountMode = Some(ChannelCountMode::Explicit);
325            options.channelInterpretation = Some(ChannelInterpretation::Speakers);
326            AudioDestinationNode::new(&global, self, &options, can_gc)
327        })
328    }
329
330    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-listener>
331    fn Listener(&self, can_gc: CanGc) -> DomRoot<AudioListener> {
332        let global = self.global();
333        let window = global.as_window();
334        self.listener
335            .or_init(|| AudioListener::new(window, self, can_gc))
336    }
337
338    // https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-onstatechange
339    event_handler!(statechange, GetOnstatechange, SetOnstatechange);
340
341    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createoscillator>
342    fn CreateOscillator(&self, can_gc: CanGc) -> Fallible<DomRoot<OscillatorNode>> {
343        OscillatorNode::new(
344            self.global().as_window(),
345            self,
346            &OscillatorOptions::empty(),
347            can_gc,
348        )
349    }
350
351    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-creategain>
352    fn CreateGain(&self, can_gc: CanGc) -> Fallible<DomRoot<GainNode>> {
353        GainNode::new(
354            self.global().as_window(),
355            self,
356            &GainOptions::empty(),
357            can_gc,
358        )
359    }
360
361    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createpanner>
362    fn CreatePanner(&self, can_gc: CanGc) -> Fallible<DomRoot<PannerNode>> {
363        PannerNode::new(
364            self.global().as_window(),
365            self,
366            &PannerOptions::empty(),
367            can_gc,
368        )
369    }
370
371    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createanalyser>
372    fn CreateAnalyser(&self, can_gc: CanGc) -> Fallible<DomRoot<AnalyserNode>> {
373        AnalyserNode::new(
374            self.global().as_window(),
375            self,
376            &AnalyserOptions::empty(),
377            can_gc,
378        )
379    }
380
381    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbiquadfilter>
382    fn CreateBiquadFilter(&self, can_gc: CanGc) -> Fallible<DomRoot<BiquadFilterNode>> {
383        BiquadFilterNode::new(
384            self.global().as_window(),
385            self,
386            &BiquadFilterOptions::empty(),
387            can_gc,
388        )
389    }
390
391    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createstereopanner>
392    fn CreateStereoPanner(&self, can_gc: CanGc) -> Fallible<DomRoot<StereoPannerNode>> {
393        StereoPannerNode::new(
394            self.global().as_window(),
395            self,
396            &StereoPannerOptions::empty(),
397            can_gc,
398        )
399    }
400
401    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createconstantsource>
402    fn CreateConstantSource(&self, can_gc: CanGc) -> Fallible<DomRoot<ConstantSourceNode>> {
403        ConstantSourceNode::new(
404            self.global().as_window(),
405            self,
406            &ConstantSourceOptions::empty(),
407            can_gc,
408        )
409    }
410
411    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createchannelmerger>
412    fn CreateChannelMerger(
413        &self,
414        count: u32,
415        can_gc: CanGc,
416    ) -> Fallible<DomRoot<ChannelMergerNode>> {
417        let mut opts = ChannelMergerOptions::empty();
418        opts.numberOfInputs = count;
419        ChannelMergerNode::new(self.global().as_window(), self, &opts, can_gc)
420    }
421
422    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createchannelsplitter>
423    fn CreateChannelSplitter(
424        &self,
425        count: u32,
426        can_gc: CanGc,
427    ) -> Fallible<DomRoot<ChannelSplitterNode>> {
428        let mut opts = ChannelSplitterOptions::empty();
429        opts.numberOfOutputs = count;
430        ChannelSplitterNode::new(self.global().as_window(), self, &opts, can_gc)
431    }
432
433    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffer>
434    fn CreateBuffer(
435        &self,
436        number_of_channels: u32,
437        length: u32,
438        sample_rate: Finite<f32>,
439        can_gc: CanGc,
440    ) -> Fallible<DomRoot<AudioBuffer>> {
441        if number_of_channels == 0 ||
442            number_of_channels > MAX_CHANNEL_COUNT ||
443            length == 0 ||
444            *sample_rate <= 0.
445        {
446            return Err(Error::NotSupported(None));
447        }
448        Ok(AudioBuffer::new(
449            self.global().as_window(),
450            number_of_channels,
451            length,
452            *sample_rate,
453            None,
454            can_gc,
455        ))
456    }
457
458    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createbuffersource>
459    fn CreateBufferSource(&self, can_gc: CanGc) -> Fallible<DomRoot<AudioBufferSourceNode>> {
460        AudioBufferSourceNode::new(
461            self.global().as_window(),
462            self,
463            &AudioBufferSourceOptions::empty(),
464            can_gc,
465        )
466    }
467
468    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-decodeaudiodata>
469    fn DecodeAudioData(
470        &self,
471        audio_data: CustomAutoRooterGuard<ArrayBuffer>,
472        decode_success_callback: Option<Rc<DecodeSuccessCallback>>,
473        decode_error_callback: Option<Rc<DecodeErrorCallback>>,
474        comp: InRealm,
475        can_gc: CanGc,
476    ) -> Rc<Promise> {
477        // Step 1.
478        let promise = Promise::new_in_current_realm(comp, can_gc);
479
480        if audio_data.len() > 0 {
481            // Step 2.
482            // XXX detach array buffer.
483            let uuid = Uuid::new_v4().simple().to_string();
484            let uuid_ = uuid.clone();
485            self.decode_resolvers.borrow_mut().insert(
486                uuid.clone(),
487                DecodeResolver {
488                    promise: promise.clone(),
489                    success_callback: decode_success_callback,
490                    error_callback: decode_error_callback,
491                },
492            );
493            let audio_data = audio_data.to_vec();
494            let decoded_audio = Arc::new(Mutex::new(Vec::new()));
495            let decoded_audio_ = decoded_audio.clone();
496            let decoded_audio__ = decoded_audio.clone();
497            // servo-media returns an audio channel position along
498            // with the AudioDecoderCallback progress callback, which
499            // may not be the same as the index of the decoded_audio
500            // Vec.
501            let channels = Arc::new(Mutex::new(HashMap::new()));
502            let this = Trusted::new(self);
503            let this_ = this.clone();
504            let task_source = self
505                .global()
506                .task_manager()
507                .dom_manipulation_task_source()
508                .to_sendable();
509            let task_source_clone = task_source.clone();
510            let callbacks = AudioDecoderCallbacks::new()
511                .ready(move |channel_count| {
512                    decoded_audio
513                        .lock()
514                        .unwrap()
515                        .resize(channel_count as usize, Vec::new());
516                })
517                .progress(move |buffer, channel_pos_mask| {
518                    let mut decoded_audio = decoded_audio_.lock().unwrap();
519                    let mut channels = channels.lock().unwrap();
520                    let channel = match channels.entry(channel_pos_mask) {
521                        Entry::Occupied(entry) => *entry.get(),
522                        Entry::Vacant(entry) => {
523                            let x = (channel_pos_mask as f32).log2() as usize;
524                            *entry.insert(x)
525                        },
526                    };
527                    decoded_audio[channel].extend_from_slice((*buffer).as_ref());
528                })
529                .eos(move || {
530                    task_source.queue(task!(audio_decode_eos: move || {
531                        let this = this.root();
532                        let decoded_audio = decoded_audio__.lock().unwrap();
533                        let length = if !decoded_audio.is_empty() {
534                            decoded_audio[0].len()
535                        } else {
536                            0
537                        };
538                        let buffer = AudioBuffer::new(
539                            this.global().as_window(),
540                            decoded_audio.len() as u32 /* number of channels */,
541                            length as u32,
542                            this.sample_rate,
543                            Some(decoded_audio.as_slice()),
544                            CanGc::note());
545                        let mut resolvers = this.decode_resolvers.borrow_mut();
546                        assert!(resolvers.contains_key(&uuid_));
547                        let resolver = resolvers.remove(&uuid_).unwrap();
548                        if let Some(callback) = resolver.success_callback {
549                            let _ = callback.Call__(&buffer, ExceptionHandling::Report, CanGc::note());
550                        }
551                        resolver.promise.resolve_native(&buffer, CanGc::note());
552                    }));
553                })
554                .error(move |error| {
555                    task_source_clone.queue(task!(audio_decode_eos: move || {
556                        let this = this_.root();
557                        let mut resolvers = this.decode_resolvers.borrow_mut();
558                        assert!(resolvers.contains_key(&uuid));
559                        let resolver = resolvers.remove(&uuid).unwrap();
560                        if let Some(callback) = resolver.error_callback {
561                            let _ = callback.Call__(
562                                &DOMException::new(&this.global(), DOMErrorName::DataCloneError, CanGc::note()),
563                                ExceptionHandling::Report, CanGc::note());
564                        }
565                        let error = format!("Audio decode error {:?}", error);
566                        resolver.promise.reject_error(Error::Type(error), CanGc::note());
567                    }));
568                })
569                .build();
570            self.audio_context_impl
571                .lock()
572                .unwrap()
573                .decode_audio_data(audio_data, callbacks);
574        } else {
575            // Step 3.
576            promise.reject_error(Error::DataClone(None), can_gc);
577            return promise;
578        }
579
580        // Step 4.
581        promise
582    }
583
584    /// <https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createiirfilter>
585    fn CreateIIRFilter(
586        &self,
587        feedforward: Vec<Finite<f64>>,
588        feedback: Vec<Finite<f64>>,
589        can_gc: CanGc,
590    ) -> Fallible<DomRoot<IIRFilterNode>> {
591        let opts = IIRFilterOptions {
592            parent: AudioNodeOptions::empty(),
593            feedback,
594            feedforward,
595        };
596        IIRFilterNode::new(self.global().as_window(), self, &opts, can_gc)
597    }
598}
599
600impl Convert<AudioContextOptions> for BaseAudioContextOptions {
601    fn convert(self) -> AudioContextOptions {
602        match self {
603            BaseAudioContextOptions::AudioContext(options) => {
604                AudioContextOptions::RealTimeAudioContext(options)
605            },
606            BaseAudioContextOptions::OfflineAudioContext(options) => {
607                AudioContextOptions::OfflineAudioContext(options)
608            },
609        }
610    }
611}
612
613impl Convert<AudioContextState> for ProcessingState {
614    fn convert(self) -> AudioContextState {
615        match self {
616            ProcessingState::Suspended => AudioContextState::Suspended,
617            ProcessingState::Running => AudioContextState::Running,
618            ProcessingState::Closed => AudioContextState::Closed,
619        }
620    }
621}