webgpu/
canvas_context.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5//! Main process implementation of [GPUCanvasContext](https://www.w3.org/TR/webgpu/#canvas-context)
6
7use std::ptr::NonNull;
8use std::sync::{Arc, Mutex};
9
10use arrayvec::ArrayVec;
11use base::Epoch;
12use compositing_traits::{
13    CrossProcessCompositorApi, ExternalImageSource, SerializableImageData,
14    WebRenderExternalImageApi,
15};
16use euclid::default::Size2D;
17use ipc_channel::ipc::IpcSender;
18use log::warn;
19use pixels::{SharedSnapshot, Snapshot, SnapshotAlphaMode, SnapshotPixelFormat};
20use rustc_hash::FxHashMap;
21use webgpu_traits::{
22    ContextConfiguration, PRESENTATION_BUFFER_COUNT, PendingTexture, WebGPUContextId, WebGPUMsg,
23};
24use webrender_api::units::DeviceIntSize;
25use webrender_api::{
26    ExternalImageData, ExternalImageId, ExternalImageType, ImageDescriptor, ImageDescriptorFlags,
27    ImageFormat, ImageKey,
28};
29use wgpu_core::device::HostMap;
30use wgpu_core::global::Global;
31use wgpu_core::id::{
32    self, BufferId, CommandBufferId, CommandEncoderId, DeviceId, QueueId, TextureId,
33};
34use wgpu_core::resource::{
35    BufferAccessError, BufferDescriptor, BufferMapOperation, CreateBufferError,
36};
37use wgpu_types::{
38    BufferUsages, COPY_BYTES_PER_ROW_ALIGNMENT, CommandBufferDescriptor, CommandEncoderDescriptor,
39    Extent3d, Origin3d, TexelCopyBufferInfo, TexelCopyBufferLayout, TexelCopyTextureInfo,
40    TextureAspect,
41};
42
43pub type WGPUImageMap = Arc<Mutex<FxHashMap<WebGPUContextId, ContextData>>>;
44
45const fn image_data(context_id: WebGPUContextId) -> ExternalImageData {
46    ExternalImageData {
47        id: ExternalImageId(context_id.0),
48        channel_index: 0,
49        image_type: ExternalImageType::Buffer,
50        normalized_uvs: false,
51    }
52}
53
54/// Allocated buffer on GPU device
55#[derive(Clone, Copy, Debug)]
56struct Buffer {
57    device_id: DeviceId,
58    queue_id: QueueId,
59    size: u64,
60}
61
62impl Buffer {
63    /// Returns true if buffer is compatible with provided configuration
64    fn has_compatible_config(&self, config: &ContextConfiguration) -> bool {
65        config.device_id == self.device_id && self.size == config.buffer_size()
66    }
67}
68
69/// Mapped GPUBuffer
70#[derive(Debug)]
71struct MappedBuffer {
72    buffer: Buffer,
73    data: NonNull<u8>,
74    len: u64,
75    image_size: Size2D<u32>,
76    image_format: ImageFormat,
77    is_opaque: bool,
78}
79
80// Mapped buffer can be shared between safely (it's read-only)
81unsafe impl Send for MappedBuffer {}
82
83impl MappedBuffer {
84    const fn slice(&'_ self) -> &'_ [u8] {
85        // Safety: Pointer is from wgpu, and we only use it here
86        unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.len as usize) }
87    }
88
89    fn stride(&self) -> u32 {
90        (self.image_size.width * self.image_format.bytes_per_pixel() as u32)
91            .next_multiple_of(COPY_BYTES_PER_ROW_ALIGNMENT)
92    }
93}
94
95#[derive(Debug)]
96enum StagingBufferState {
97    /// The Initial state: the buffer has yet to be created with only an
98    /// id reserved for it.
99    Unassigned,
100    /// The buffer is allocated in the WGPU Device and is ready to be used.
101    Available(Buffer),
102    /// `mapAsync` is currently running on the buffer.
103    Mapping(Buffer),
104    /// The buffer is currently mapped.
105    Mapped(MappedBuffer),
106}
107
108/// A staging buffer used for texture to buffer to CPU copy operations.
109#[derive(Debug)]
110struct StagingBuffer {
111    global: Arc<Global>,
112    buffer_id: BufferId,
113    state: StagingBufferState,
114}
115
116// [`StagingBuffer`] only used for reading (never for writing)
117// so it is safe to share between threads.
118unsafe impl Sync for StagingBuffer {}
119
120impl StagingBuffer {
121    fn new(global: Arc<Global>, buffer_id: BufferId) -> Self {
122        Self {
123            global,
124            buffer_id,
125            state: StagingBufferState::Unassigned,
126        }
127    }
128
129    const fn is_mapped(&self) -> bool {
130        matches!(self.state, StagingBufferState::Mapped(..))
131    }
132
133    /// Return true if buffer can be used directly with provided config
134    /// without any additional work
135    fn is_available_and_has_compatible_config(&self, config: &ContextConfiguration) -> bool {
136        let StagingBufferState::Available(buffer) = &self.state else {
137            return false;
138        };
139        buffer.has_compatible_config(config)
140    }
141
142    /// Return true if buffer is not mapping or being mapped
143    const fn needs_assignment(&self) -> bool {
144        matches!(
145            self.state,
146            StagingBufferState::Unassigned | StagingBufferState::Available(_)
147        )
148    }
149
150    /// Make buffer available by unmapping / destroying it and then recreating it if needed.
151    fn ensure_available(&mut self, config: &ContextConfiguration) -> Result<(), CreateBufferError> {
152        let recreate = match &self.state {
153            StagingBufferState::Unassigned => true,
154            StagingBufferState::Available(buffer) |
155            StagingBufferState::Mapping(buffer) |
156            StagingBufferState::Mapped(MappedBuffer { buffer, .. }) => {
157                if buffer.has_compatible_config(config) {
158                    let _ = self.global.buffer_unmap(self.buffer_id);
159                    false
160                } else {
161                    self.global.buffer_drop(self.buffer_id);
162                    true
163                }
164            },
165        };
166        if recreate {
167            let buffer_size = config.buffer_size();
168            let (_, error) = self.global.device_create_buffer(
169                config.device_id,
170                &BufferDescriptor {
171                    label: None,
172                    size: buffer_size,
173                    usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST,
174                    mapped_at_creation: false,
175                },
176                Some(self.buffer_id),
177            );
178            if let Some(error) = error {
179                return Err(error);
180            };
181            self.state = StagingBufferState::Available(Buffer {
182                device_id: config.device_id,
183                queue_id: config.queue_id,
184                size: buffer_size,
185            });
186        }
187        Ok(())
188    }
189
190    /// Makes buffer available and prepares command encoder
191    /// that will copy texture to this staging buffer.
192    ///
193    /// Caller must submit command buffer to queue.
194    fn prepare_load_texture_command_buffer(
195        &mut self,
196        texture_id: TextureId,
197        encoder_id: CommandEncoderId,
198        config: &ContextConfiguration,
199    ) -> Result<CommandBufferId, Box<dyn std::error::Error>> {
200        self.ensure_available(config)?;
201        let StagingBufferState::Available(buffer) = &self.state else {
202            unreachable!("Should be made available by `ensure_available`")
203        };
204        let device_id = buffer.device_id;
205        let command_descriptor = CommandEncoderDescriptor { label: None };
206        let (encoder_id, error) = self.global.device_create_command_encoder(
207            device_id,
208            &command_descriptor,
209            Some(encoder_id),
210        );
211        if let Some(error) = error {
212            return Err(error.into());
213        };
214        let buffer_info = TexelCopyBufferInfo {
215            buffer: self.buffer_id,
216            layout: TexelCopyBufferLayout {
217                offset: 0,
218                bytes_per_row: Some(config.stride()),
219                rows_per_image: None,
220            },
221        };
222        let texture_info = TexelCopyTextureInfo {
223            texture: texture_id,
224            mip_level: 0,
225            origin: Origin3d::ZERO,
226            aspect: TextureAspect::All,
227        };
228        let copy_size = Extent3d {
229            width: config.size.width,
230            height: config.size.height,
231            depth_or_array_layers: 1,
232        };
233        self.global.command_encoder_copy_texture_to_buffer(
234            encoder_id,
235            &texture_info,
236            &buffer_info,
237            &copy_size,
238        )?;
239        let (command_buffer_id, error) = self
240            .global
241            .command_encoder_finish(encoder_id, &CommandBufferDescriptor::default());
242        if let Some(error) = error {
243            return Err(error.into());
244        };
245        Ok(command_buffer_id)
246    }
247
248    /// Unmaps the buffer or cancels a mapping operation if one is in progress.
249    fn unmap(&mut self) {
250        match self.state {
251            StagingBufferState::Unassigned | StagingBufferState::Available(_) => {},
252            StagingBufferState::Mapping(buffer) |
253            StagingBufferState::Mapped(MappedBuffer { buffer, .. }) => {
254                let _ = self.global.buffer_unmap(self.buffer_id);
255                self.state = StagingBufferState::Available(buffer)
256            },
257        }
258    }
259
260    /// Obtain a snapshot from this buffer if is mapped or return `None` if it is not mapped.
261    fn snapshot(&self) -> Option<Snapshot> {
262        let StagingBufferState::Mapped(mapped) = &self.state else {
263            return None;
264        };
265        let format = match mapped.image_format {
266            ImageFormat::RGBA8 => SnapshotPixelFormat::RGBA,
267            ImageFormat::BGRA8 => SnapshotPixelFormat::BGRA,
268            _ => unreachable!("GPUCanvasContext does not support other formats per spec"),
269        };
270        let alpha_mode = if mapped.is_opaque {
271            SnapshotAlphaMode::AsOpaque {
272                premultiplied: false,
273            }
274        } else {
275            SnapshotAlphaMode::Transparent {
276                premultiplied: true,
277            }
278        };
279        let padded_byte_width = mapped.stride();
280        let data = mapped.slice();
281        let bytes_per_pixel = mapped.image_format.bytes_per_pixel() as usize;
282        let mut result_unpadded =
283            Vec::<u8>::with_capacity(mapped.image_size.area() as usize * bytes_per_pixel);
284        for row in 0..mapped.image_size.height {
285            let start = (row * padded_byte_width).try_into().ok()?;
286            result_unpadded
287                .extend(&data[start..start + mapped.image_size.width as usize * bytes_per_pixel]);
288        }
289        let mut snapshot =
290            Snapshot::from_vec(mapped.image_size, format, alpha_mode, result_unpadded);
291        if mapped.is_opaque {
292            snapshot.transform(SnapshotAlphaMode::Opaque, snapshot.format())
293        }
294        Some(snapshot)
295    }
296}
297
298impl Drop for StagingBuffer {
299    fn drop(&mut self) {
300        match self.state {
301            StagingBufferState::Unassigned => {},
302            StagingBufferState::Available(_) |
303            StagingBufferState::Mapping(_) |
304            StagingBufferState::Mapped(_) => {
305                self.global.buffer_drop(self.buffer_id);
306            },
307        }
308    }
309}
310
311#[derive(Default)]
312pub struct WGPUExternalImages {
313    pub images: WGPUImageMap,
314    pub locked_ids: FxHashMap<WebGPUContextId, PresentationStagingBuffer>,
315}
316
317impl WebRenderExternalImageApi for WGPUExternalImages {
318    fn lock(&mut self, id: u64) -> (ExternalImageSource<'_>, Size2D<i32>) {
319        let id = WebGPUContextId(id);
320        let presentation = {
321            let mut webgpu_contexts = self.images.lock().unwrap();
322            webgpu_contexts
323                .get_mut(&id)
324                .and_then(|context_data| context_data.presentation.clone())
325        };
326        let Some(presentation) = presentation else {
327            return (ExternalImageSource::Invalid, Size2D::zero());
328        };
329        self.locked_ids.insert(id, presentation);
330        let presentation = self.locked_ids.get(&id).unwrap();
331        let StagingBufferState::Mapped(mapped_buffer) = &presentation.staging_buffer.state else {
332            unreachable!("Presentation staging buffer should be mapped")
333        };
334        let size = mapped_buffer.image_size;
335        (
336            ExternalImageSource::RawData(mapped_buffer.slice()),
337            size.cast().cast_unit(),
338        )
339    }
340
341    fn unlock(&mut self, id: u64) {
342        let id = WebGPUContextId(id);
343        let Some(presentation) = self.locked_ids.remove(&id) else {
344            return;
345        };
346        let mut webgpu_contexts = self.images.lock().unwrap();
347        if let Some(context_data) = webgpu_contexts.get_mut(&id) {
348            // We use this to return staging buffer if a newer one exists.
349            presentation.maybe_destroy(context_data);
350        } else {
351            // This will not free this buffer id in script,
352            // but that's okay because we still have many free ids.
353            drop(presentation);
354        }
355    }
356}
357
358/// Staging buffer currently used for presenting the epoch.
359///
360/// Users should [`ContextData::replace_presentation`] when done.
361#[derive(Clone)]
362pub struct PresentationStagingBuffer {
363    epoch: Epoch,
364    staging_buffer: Arc<StagingBuffer>,
365}
366
367impl PresentationStagingBuffer {
368    fn new(epoch: Epoch, staging_buffer: StagingBuffer) -> Self {
369        Self {
370            epoch,
371            staging_buffer: Arc::new(staging_buffer),
372        }
373    }
374
375    /// If the internal staging buffer is not shared,
376    /// unmap it and call [`ContextData::return_staging_buffer`] with it.
377    fn maybe_destroy(self, context_data: &mut ContextData) {
378        if let Some(mut staging_buffer) = Arc::into_inner(self.staging_buffer) {
379            staging_buffer.unmap();
380            context_data.return_staging_buffer(staging_buffer);
381        }
382    }
383}
384
385/// The embedder process-side representation of what is the `GPUCanvasContext` in script.
386pub struct ContextData {
387    /// The [`ImageKey`] of the WebRender image associated with this context.
388    image_key: Option<ImageKey>,
389    /// The current size of this context.
390    size: DeviceIntSize,
391    /// Staging buffers that are not actively used.
392    ///
393    /// Staging buffer here are either [`StagingBufferState::Unassigned`] or [`StagingBufferState::Available`].
394    /// They are removed from here when they are in process of being mapped or are already mapped.
395    inactive_staging_buffers: ArrayVec<StagingBuffer, PRESENTATION_BUFFER_COUNT>,
396    /// The [`PresentationStagingBuffer`] of the most recent presentation. This will
397    /// be `None` directly after initialization, as clearing is handled completely in
398    /// the `ScriptThread`.
399    presentation: Option<PresentationStagingBuffer>,
400    /// Next epoch to be used
401    next_epoch: Epoch,
402}
403
404impl ContextData {
405    fn new(
406        global: &Arc<Global>,
407        buffer_ids: ArrayVec<id::BufferId, PRESENTATION_BUFFER_COUNT>,
408        size: DeviceIntSize,
409    ) -> Self {
410        Self {
411            image_key: None,
412            size,
413            inactive_staging_buffers: buffer_ids
414                .iter()
415                .map(|buffer_id| StagingBuffer::new(global.clone(), *buffer_id))
416                .collect(),
417            presentation: None,
418            next_epoch: Epoch(1),
419        }
420    }
421
422    /// Returns `None` if no staging buffer is unused or failure when making it available
423    fn get_or_make_available_buffer(
424        &'_ mut self,
425        config: &ContextConfiguration,
426    ) -> Option<StagingBuffer> {
427        self.inactive_staging_buffers
428            .iter()
429            // Try to get first preallocated GPUBuffer.
430            .position(|staging_buffer| {
431                staging_buffer.is_available_and_has_compatible_config(config)
432            })
433            // Fall back to the first inactive staging buffer.
434            .or_else(|| {
435                self.inactive_staging_buffers
436                    .iter()
437                    .position(|staging_buffer| staging_buffer.needs_assignment())
438            })
439            // Or just the use first one.
440            .or_else(|| {
441                if self.inactive_staging_buffers.is_empty() {
442                    None
443                } else {
444                    Some(0)
445                }
446            })
447            .and_then(|index| {
448                let mut staging_buffer = self.inactive_staging_buffers.remove(index);
449                if staging_buffer.ensure_available(config).is_ok() {
450                    Some(staging_buffer)
451                } else {
452                    // If we fail to make it available, return it to the list of inactive staging buffers.
453                    self.inactive_staging_buffers.push(staging_buffer);
454                    None
455                }
456            })
457    }
458
459    /// Destroy the context that this [`ContextData`] represents,
460    /// freeing all of its buffers, and deleting the associated WebRender image.
461    fn destroy(
462        mut self,
463        script_sender: &IpcSender<WebGPUMsg>,
464        compositor_api: &CrossProcessCompositorApi,
465    ) {
466        // This frees the id in the `ScriptThread`.
467        for staging_buffer in self.inactive_staging_buffers {
468            if let Err(error) = script_sender.send(WebGPUMsg::FreeBuffer(staging_buffer.buffer_id))
469            {
470                warn!(
471                    "Unable to send FreeBuffer({:?}) ({error})",
472                    staging_buffer.buffer_id
473                );
474            };
475        }
476        if let Some(image_key) = self.image_key.take() {
477            compositor_api.delete_image(image_key);
478        }
479    }
480
481    /// Advance the [`Epoch`] and return the new one.
482    fn next_epoch(&mut self) -> Epoch {
483        let epoch = self.next_epoch;
484        self.next_epoch.next();
485        epoch
486    }
487
488    /// If the given [`PresentationStagingBuffer`] is for a newer presentation, replace the existing
489    /// one. Deallocate the older one by calling [`Self::return_staging_buffer`] on it.
490    fn replace_presentation(&mut self, presentation: PresentationStagingBuffer) {
491        let stale_presentation = if presentation.epoch >=
492            self.presentation
493                .as_ref()
494                .map(|p| p.epoch)
495                .unwrap_or_default()
496        {
497            self.presentation.replace(presentation)
498        } else {
499            Some(presentation)
500        };
501        if let Some(stale_presentation) = stale_presentation {
502            stale_presentation.maybe_destroy(self);
503        }
504    }
505
506    fn clear_presentation(&mut self) {
507        if let Some(stale_presentation) = self.presentation.take() {
508            stale_presentation.maybe_destroy(self);
509        }
510    }
511
512    fn return_staging_buffer(&mut self, staging_buffer: StagingBuffer) {
513        self.inactive_staging_buffers.push(staging_buffer)
514    }
515}
516
517impl crate::WGPU {
518    pub(crate) fn create_context(
519        &self,
520        context_id: WebGPUContextId,
521        size: DeviceIntSize,
522        buffer_ids: ArrayVec<id::BufferId, PRESENTATION_BUFFER_COUNT>,
523    ) {
524        let context_data = ContextData::new(&self.global, buffer_ids, size);
525        assert!(
526            self.wgpu_image_map
527                .lock()
528                .unwrap()
529                .insert(context_id, context_data)
530                .is_none(),
531            "Context should be created only once!"
532        );
533    }
534
535    pub(crate) fn set_image_key(&self, context_id: WebGPUContextId, image_key: ImageKey) {
536        let mut webgpu_contexts = self.wgpu_image_map.lock().unwrap();
537        let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
538
539        if let Some(old_image_key) = context_data.image_key.replace(image_key) {
540            self.compositor_api.delete_image(old_image_key);
541        }
542
543        self.compositor_api.add_image(
544            image_key,
545            ImageDescriptor {
546                format: ImageFormat::BGRA8,
547                size: context_data.size,
548                stride: None,
549                offset: 0,
550                flags: ImageDescriptorFlags::empty(),
551            },
552            SerializableImageData::External(image_data(context_id)),
553        );
554    }
555
556    pub(crate) fn get_image(
557        &self,
558        context_id: WebGPUContextId,
559        pending_texture: Option<PendingTexture>,
560        sender: IpcSender<SharedSnapshot>,
561    ) {
562        let mut webgpu_contexts = self.wgpu_image_map.lock().unwrap();
563        let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
564        if let Some(PendingTexture {
565            texture_id,
566            encoder_id,
567            configuration,
568        }) = pending_texture
569        {
570            let Some(staging_buffer) = context_data.get_or_make_available_buffer(&configuration)
571            else {
572                warn!("Failure obtaining available staging buffer");
573                sender
574                    .send(SharedSnapshot::cleared(configuration.size))
575                    .unwrap();
576                return;
577            };
578
579            let epoch = context_data.next_epoch();
580            let wgpu_image_map = self.wgpu_image_map.clone();
581            let sender = sender.clone();
582            drop(webgpu_contexts);
583            self.texture_download(
584                texture_id,
585                encoder_id,
586                staging_buffer,
587                configuration,
588                move |staging_buffer| {
589                    let mut webgpu_contexts = wgpu_image_map.lock().unwrap();
590                    let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
591                    sender
592                        .send(
593                            staging_buffer
594                                .snapshot()
595                                .as_ref()
596                                .map(Snapshot::to_shared)
597                                .unwrap_or_else(|| SharedSnapshot::cleared(configuration.size)),
598                        )
599                        .unwrap();
600                    if staging_buffer.is_mapped() {
601                        context_data.replace_presentation(PresentationStagingBuffer::new(
602                            epoch,
603                            staging_buffer,
604                        ));
605                    } else {
606                        // failure
607                        context_data.return_staging_buffer(staging_buffer);
608                    }
609                },
610            );
611        } else {
612            sender
613                .send(
614                    context_data
615                        .presentation
616                        .as_ref()
617                        .and_then(|presentation_staging_buffer| {
618                            presentation_staging_buffer.staging_buffer.snapshot()
619                        })
620                        .unwrap_or_else(Snapshot::empty)
621                        .to_shared(),
622                )
623                .unwrap();
624        }
625    }
626
627    /// Read the texture to the staging buffer, map it to CPU memory, and update the
628    /// image in WebRender when complete.
629    pub(crate) fn present(
630        &self,
631        context_id: WebGPUContextId,
632        pending_texture: Option<PendingTexture>,
633        size: Size2D<u32>,
634        canvas_epoch: Epoch,
635    ) {
636        let mut webgpu_contexts = self.wgpu_image_map.lock().unwrap();
637        let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
638
639        let Some(image_key) = context_data.image_key else {
640            return;
641        };
642
643        let Some(PendingTexture {
644            texture_id,
645            encoder_id,
646            configuration,
647        }) = pending_texture
648        else {
649            context_data.clear_presentation();
650            self.compositor_api.update_image(
651                image_key,
652                ImageDescriptor {
653                    format: ImageFormat::BGRA8,
654                    size: size.cast_unit().cast(),
655                    stride: None,
656                    offset: 0,
657                    flags: ImageDescriptorFlags::empty(),
658                },
659                SerializableImageData::External(image_data(context_id)),
660                Some(canvas_epoch),
661            );
662            return;
663        };
664        let Some(staging_buffer) = context_data.get_or_make_available_buffer(&configuration) else {
665            warn!("Failure obtaining available staging buffer");
666            context_data.clear_presentation();
667            self.compositor_api.update_image(
668                image_key,
669                configuration.into(),
670                SerializableImageData::External(image_data(context_id)),
671                Some(canvas_epoch),
672            );
673            return;
674        };
675        let epoch = context_data.next_epoch();
676        let wgpu_image_map = self.wgpu_image_map.clone();
677        let compositor_api = self.compositor_api.clone();
678        drop(webgpu_contexts);
679        self.texture_download(
680            texture_id,
681            encoder_id,
682            staging_buffer,
683            configuration,
684            move |staging_buffer| {
685                let mut webgpu_contexts = wgpu_image_map.lock().unwrap();
686                let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
687                if staging_buffer.is_mapped() {
688                    context_data.replace_presentation(PresentationStagingBuffer::new(
689                        epoch,
690                        staging_buffer,
691                    ));
692                } else {
693                    context_data.return_staging_buffer(staging_buffer);
694                    context_data.clear_presentation();
695                }
696                // update image in WR
697                compositor_api.update_image(
698                    image_key,
699                    configuration.into(),
700                    SerializableImageData::External(image_data(context_id)),
701                    Some(canvas_epoch),
702                );
703            },
704        );
705    }
706
707    /// Copies data from provided texture using `encoder_id` to the provided [`StagingBuffer`].
708    ///
709    /// `callback` is guaranteed to be called.
710    ///
711    /// Returns a [`StagingBuffer`] with the [`StagingBufferState::Mapped`] state
712    /// on success or [`StagingBufferState::Available`] on failure.
713    fn texture_download(
714        &self,
715        texture_id: TextureId,
716        encoder_id: CommandEncoderId,
717        mut staging_buffer: StagingBuffer,
718        config: ContextConfiguration,
719        callback: impl FnOnce(StagingBuffer) + Send + 'static,
720    ) {
721        let Ok(command_buffer_id) =
722            staging_buffer.prepare_load_texture_command_buffer(texture_id, encoder_id, &config)
723        else {
724            return callback(staging_buffer);
725        };
726        let StagingBufferState::Available(buffer) = &staging_buffer.state else {
727            unreachable!("`prepare_load_texture_command_buffer` should make buffer available")
728        };
729        let buffer_id = staging_buffer.buffer_id;
730        let buffer_size = buffer.size;
731        {
732            let _guard = self.poller.lock();
733            let result = self
734                .global
735                .queue_submit(buffer.queue_id, &[command_buffer_id]);
736            if result.is_err() {
737                return callback(staging_buffer);
738            }
739        }
740        staging_buffer.state = match staging_buffer.state {
741            StagingBufferState::Available(buffer) => StagingBufferState::Mapping(buffer),
742            _ => unreachable!("`prepare_load_texture_command_buffer` should make buffer available"),
743        };
744        let map_callback = {
745            let token = self.poller.token();
746            Box::new(move |result: Result<(), BufferAccessError>| {
747                drop(token);
748                staging_buffer.state = match staging_buffer.state {
749                    StagingBufferState::Mapping(buffer) => {
750                        if let Ok((data, len)) = result.and_then(|_| {
751                            staging_buffer.global.buffer_get_mapped_range(
752                                staging_buffer.buffer_id,
753                                0,
754                                Some(buffer.size),
755                            )
756                        }) {
757                            StagingBufferState::Mapped(MappedBuffer {
758                                buffer,
759                                data,
760                                len,
761                                image_size: config.size,
762                                image_format: config.format,
763                                is_opaque: config.is_opaque,
764                            })
765                        } else {
766                            StagingBufferState::Available(buffer)
767                        }
768                    },
769                    _ => {
770                        unreachable!("Mapping buffer should have StagingBufferState::Mapping state")
771                    },
772                };
773                callback(staging_buffer);
774            })
775        };
776        let map_op = BufferMapOperation {
777            host: HostMap::Read,
778            callback: Some(map_callback),
779        };
780        // error is handled by map_callback
781        let _ = self
782            .global
783            .buffer_map_async(buffer_id, 0, Some(buffer_size), map_op);
784        self.poller.wake();
785    }
786
787    pub(crate) fn destroy_context(&mut self, context_id: WebGPUContextId) {
788        self.wgpu_image_map
789            .lock()
790            .unwrap()
791            .remove(&context_id)
792            .unwrap()
793            .destroy(&self.script_sender, &self.compositor_api);
794    }
795}