1use std::ptr::NonNull;
8use std::sync::{Arc, Mutex};
9
10use arrayvec::ArrayVec;
11use base::Epoch;
12use base::generic_channel::GenericSender;
13use euclid::default::Size2D;
14use log::warn;
15use paint_api::{
16 CrossProcessPaintApi, ExternalImageSource, SerializableImageData, WebRenderExternalImageApi,
17};
18use pixels::{SharedSnapshot, Snapshot, SnapshotAlphaMode, SnapshotPixelFormat};
19use rustc_hash::FxHashMap;
20use webgpu_traits::{
21 ContextConfiguration, PRESENTATION_BUFFER_COUNT, PendingTexture, WebGPUContextId, WebGPUMsg,
22};
23use webrender_api::units::DeviceIntSize;
24use webrender_api::{
25 ExternalImageData, ExternalImageId, ExternalImageType, ImageDescriptor, ImageDescriptorFlags,
26 ImageFormat, ImageKey,
27};
28use wgpu_core::device::HostMap;
29use wgpu_core::global::Global;
30use wgpu_core::id::{
31 self, BufferId, CommandBufferId, CommandEncoderId, DeviceId, QueueId, TextureId,
32};
33use wgpu_core::resource::{
34 BufferAccessError, BufferDescriptor, BufferMapOperation, CreateBufferError,
35};
36use wgpu_types::{
37 BufferUsages, COPY_BYTES_PER_ROW_ALIGNMENT, CommandBufferDescriptor, CommandEncoderDescriptor,
38 Extent3d, Origin3d, TexelCopyBufferInfo, TexelCopyBufferLayout, TexelCopyTextureInfo,
39 TextureAspect,
40};
41
42pub type WebGpuExternalImageMap = Arc<Mutex<FxHashMap<WebGPUContextId, ContextData>>>;
43
44const fn image_data(context_id: WebGPUContextId) -> ExternalImageData {
45 ExternalImageData {
46 id: ExternalImageId(context_id.0),
47 channel_index: 0,
48 image_type: ExternalImageType::Buffer,
49 normalized_uvs: false,
50 }
51}
52
53#[derive(Clone, Copy, Debug)]
55struct Buffer {
56 device_id: DeviceId,
57 queue_id: QueueId,
58 size: u64,
59}
60
61impl Buffer {
62 fn has_compatible_config(&self, config: &ContextConfiguration) -> bool {
64 config.device_id == self.device_id && self.size == config.buffer_size()
65 }
66}
67
68#[derive(Debug)]
70struct MappedBuffer {
71 buffer: Buffer,
72 data: NonNull<u8>,
73 len: u64,
74 image_size: Size2D<u32>,
75 image_format: ImageFormat,
76 is_opaque: bool,
77}
78
79unsafe impl Send for MappedBuffer {}
81
82impl MappedBuffer {
83 const fn slice(&'_ self) -> &'_ [u8] {
84 unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.len as usize) }
86 }
87
88 fn stride(&self) -> u32 {
89 (self.image_size.width * self.image_format.bytes_per_pixel() as u32)
90 .next_multiple_of(COPY_BYTES_PER_ROW_ALIGNMENT)
91 }
92}
93
94#[derive(Debug)]
95enum StagingBufferState {
96 Unassigned,
99 Available(Buffer),
101 Mapping(Buffer),
103 Mapped(MappedBuffer),
105}
106
107#[derive(Debug)]
109struct StagingBuffer {
110 global: Arc<Global>,
111 buffer_id: BufferId,
112 state: StagingBufferState,
113}
114
115unsafe impl Sync for StagingBuffer {}
118
119impl StagingBuffer {
120 fn new(global: Arc<Global>, buffer_id: BufferId) -> Self {
121 Self {
122 global,
123 buffer_id,
124 state: StagingBufferState::Unassigned,
125 }
126 }
127
128 const fn is_mapped(&self) -> bool {
129 matches!(self.state, StagingBufferState::Mapped(..))
130 }
131
132 fn is_available_and_has_compatible_config(&self, config: &ContextConfiguration) -> bool {
135 let StagingBufferState::Available(buffer) = &self.state else {
136 return false;
137 };
138 buffer.has_compatible_config(config)
139 }
140
141 const fn needs_assignment(&self) -> bool {
143 matches!(
144 self.state,
145 StagingBufferState::Unassigned | StagingBufferState::Available(_)
146 )
147 }
148
149 fn ensure_available(&mut self, config: &ContextConfiguration) -> Result<(), CreateBufferError> {
151 let recreate = match &self.state {
152 StagingBufferState::Unassigned => true,
153 StagingBufferState::Available(buffer) |
154 StagingBufferState::Mapping(buffer) |
155 StagingBufferState::Mapped(MappedBuffer { buffer, .. }) => {
156 if buffer.has_compatible_config(config) {
157 let _ = self.global.buffer_unmap(self.buffer_id);
158 false
159 } else {
160 self.global.buffer_drop(self.buffer_id);
161 true
162 }
163 },
164 };
165 if recreate {
166 let buffer_size = config.buffer_size();
167 let (_, error) = self.global.device_create_buffer(
168 config.device_id,
169 &BufferDescriptor {
170 label: None,
171 size: buffer_size,
172 usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST,
173 mapped_at_creation: false,
174 },
175 Some(self.buffer_id),
176 );
177 if let Some(error) = error {
178 return Err(error);
179 };
180 self.state = StagingBufferState::Available(Buffer {
181 device_id: config.device_id,
182 queue_id: config.queue_id,
183 size: buffer_size,
184 });
185 }
186 Ok(())
187 }
188
189 fn prepare_load_texture_command_buffer(
194 &mut self,
195 texture_id: TextureId,
196 encoder_id: CommandEncoderId,
197 config: &ContextConfiguration,
198 ) -> Result<CommandBufferId, Box<dyn std::error::Error>> {
199 self.ensure_available(config)?;
200 let StagingBufferState::Available(buffer) = &self.state else {
201 unreachable!("Should be made available by `ensure_available`")
202 };
203 let device_id = buffer.device_id;
204 let command_descriptor = CommandEncoderDescriptor { label: None };
205 let (encoder_id, error) = self.global.device_create_command_encoder(
206 device_id,
207 &command_descriptor,
208 Some(encoder_id),
209 );
210 if let Some(error) = error {
211 return Err(error.into());
212 };
213 let buffer_info = TexelCopyBufferInfo {
214 buffer: self.buffer_id,
215 layout: TexelCopyBufferLayout {
216 offset: 0,
217 bytes_per_row: Some(config.stride()),
218 rows_per_image: None,
219 },
220 };
221 let texture_info = TexelCopyTextureInfo {
222 texture: texture_id,
223 mip_level: 0,
224 origin: Origin3d::ZERO,
225 aspect: TextureAspect::All,
226 };
227 let copy_size = Extent3d {
228 width: config.size.width,
229 height: config.size.height,
230 depth_or_array_layers: 1,
231 };
232 self.global.command_encoder_copy_texture_to_buffer(
233 encoder_id,
234 &texture_info,
235 &buffer_info,
236 ©_size,
237 )?;
238 let (command_buffer_id, error) = self
239 .global
240 .command_encoder_finish(encoder_id, &CommandBufferDescriptor::default());
241 if let Some(error) = error {
242 return Err(error.into());
243 };
244 Ok(command_buffer_id)
245 }
246
247 fn unmap(&mut self) {
249 match self.state {
250 StagingBufferState::Unassigned | StagingBufferState::Available(_) => {},
251 StagingBufferState::Mapping(buffer) |
252 StagingBufferState::Mapped(MappedBuffer { buffer, .. }) => {
253 let _ = self.global.buffer_unmap(self.buffer_id);
254 self.state = StagingBufferState::Available(buffer)
255 },
256 }
257 }
258
259 fn snapshot(&self) -> Option<Snapshot> {
261 let StagingBufferState::Mapped(mapped) = &self.state else {
262 return None;
263 };
264 let format = match mapped.image_format {
265 ImageFormat::RGBA8 => SnapshotPixelFormat::RGBA,
266 ImageFormat::BGRA8 => SnapshotPixelFormat::BGRA,
267 _ => unreachable!("GPUCanvasContext does not support other formats per spec"),
268 };
269 let alpha_mode = if mapped.is_opaque {
270 SnapshotAlphaMode::AsOpaque {
271 premultiplied: false,
272 }
273 } else {
274 SnapshotAlphaMode::Transparent {
275 premultiplied: true,
276 }
277 };
278 let padded_byte_width = mapped.stride();
279 let data = mapped.slice();
280 let bytes_per_pixel = mapped.image_format.bytes_per_pixel() as usize;
281 let mut result_unpadded =
282 Vec::<u8>::with_capacity(mapped.image_size.area() as usize * bytes_per_pixel);
283 for row in 0..mapped.image_size.height {
284 let start = (row * padded_byte_width).try_into().ok()?;
285 result_unpadded
286 .extend(&data[start..start + mapped.image_size.width as usize * bytes_per_pixel]);
287 }
288 let mut snapshot =
289 Snapshot::from_vec(mapped.image_size, format, alpha_mode, result_unpadded);
290 if mapped.is_opaque {
291 snapshot.transform(SnapshotAlphaMode::Opaque, snapshot.format())
292 }
293 Some(snapshot)
294 }
295}
296
297impl Drop for StagingBuffer {
298 fn drop(&mut self) {
299 match self.state {
300 StagingBufferState::Unassigned => {},
301 StagingBufferState::Available(_) |
302 StagingBufferState::Mapping(_) |
303 StagingBufferState::Mapped(_) => {
304 self.global.buffer_drop(self.buffer_id);
305 },
306 }
307 }
308}
309
310pub struct WebGpuExternalImages {
311 pub image_map: WebGpuExternalImageMap,
312 pub locked_ids: FxHashMap<WebGPUContextId, PresentationStagingBuffer>,
313}
314
315impl WebGpuExternalImages {
316 pub fn new(image_map: WebGpuExternalImageMap) -> Self {
317 Self {
318 image_map,
319 locked_ids: Default::default(),
320 }
321 }
322}
323
324impl WebRenderExternalImageApi for WebGpuExternalImages {
325 fn lock(&mut self, id: u64) -> (ExternalImageSource<'_>, Size2D<i32>) {
326 let id = WebGPUContextId(id);
327 let presentation = {
328 let mut webgpu_contexts = self.image_map.lock().unwrap();
329 webgpu_contexts
330 .get_mut(&id)
331 .and_then(|context_data| context_data.presentation.clone())
332 };
333 let Some(presentation) = presentation else {
334 return (ExternalImageSource::Invalid, Size2D::zero());
335 };
336 self.locked_ids.insert(id, presentation);
337 let presentation = self.locked_ids.get(&id).unwrap();
338 let StagingBufferState::Mapped(mapped_buffer) = &presentation.staging_buffer.state else {
339 unreachable!("Presentation staging buffer should be mapped")
340 };
341 let size = mapped_buffer.image_size;
342 (
343 ExternalImageSource::RawData(mapped_buffer.slice()),
344 size.cast().cast_unit(),
345 )
346 }
347
348 fn unlock(&mut self, id: u64) {
349 let id = WebGPUContextId(id);
350 let Some(presentation) = self.locked_ids.remove(&id) else {
351 return;
352 };
353 let mut webgpu_contexts = self.image_map.lock().unwrap();
354 if let Some(context_data) = webgpu_contexts.get_mut(&id) {
355 presentation.maybe_destroy(context_data);
357 } else {
358 drop(presentation);
361 }
362 }
363}
364
365#[derive(Clone)]
369pub struct PresentationStagingBuffer {
370 epoch: Epoch,
371 staging_buffer: Arc<StagingBuffer>,
372}
373
374impl PresentationStagingBuffer {
375 fn new(epoch: Epoch, staging_buffer: StagingBuffer) -> Self {
376 Self {
377 epoch,
378 staging_buffer: Arc::new(staging_buffer),
379 }
380 }
381
382 fn maybe_destroy(self, context_data: &mut ContextData) {
385 if let Some(mut staging_buffer) = Arc::into_inner(self.staging_buffer) {
386 staging_buffer.unmap();
387 context_data.return_staging_buffer(staging_buffer);
388 }
389 }
390}
391
392pub struct ContextData {
394 image_key: Option<ImageKey>,
396 size: DeviceIntSize,
398 inactive_staging_buffers: ArrayVec<StagingBuffer, PRESENTATION_BUFFER_COUNT>,
403 presentation: Option<PresentationStagingBuffer>,
407 next_epoch: Epoch,
409}
410
411impl ContextData {
412 fn new(
413 global: &Arc<Global>,
414 buffer_ids: ArrayVec<id::BufferId, PRESENTATION_BUFFER_COUNT>,
415 size: DeviceIntSize,
416 ) -> Self {
417 Self {
418 image_key: None,
419 size,
420 inactive_staging_buffers: buffer_ids
421 .iter()
422 .map(|buffer_id| StagingBuffer::new(global.clone(), *buffer_id))
423 .collect(),
424 presentation: None,
425 next_epoch: Epoch(1),
426 }
427 }
428
429 fn get_or_make_available_buffer(
431 &'_ mut self,
432 config: &ContextConfiguration,
433 ) -> Option<StagingBuffer> {
434 self.inactive_staging_buffers
435 .iter()
436 .position(|staging_buffer| {
438 staging_buffer.is_available_and_has_compatible_config(config)
439 })
440 .or_else(|| {
442 self.inactive_staging_buffers
443 .iter()
444 .position(|staging_buffer| staging_buffer.needs_assignment())
445 })
446 .or_else(|| {
448 if self.inactive_staging_buffers.is_empty() {
449 None
450 } else {
451 Some(0)
452 }
453 })
454 .and_then(|index| {
455 let mut staging_buffer = self.inactive_staging_buffers.remove(index);
456 if staging_buffer.ensure_available(config).is_ok() {
457 Some(staging_buffer)
458 } else {
459 self.inactive_staging_buffers.push(staging_buffer);
461 None
462 }
463 })
464 }
465
466 fn destroy(
469 mut self,
470 script_sender: &GenericSender<WebGPUMsg>,
471 paint_api: &CrossProcessPaintApi,
472 ) {
473 for staging_buffer in self.inactive_staging_buffers {
475 if let Err(error) = script_sender.send(WebGPUMsg::FreeBuffer(staging_buffer.buffer_id))
476 {
477 warn!(
478 "Unable to send FreeBuffer({:?}) ({error})",
479 staging_buffer.buffer_id
480 );
481 };
482 }
483 if let Some(image_key) = self.image_key.take() {
484 paint_api.delete_image(image_key);
485 }
486 }
487
488 fn next_epoch(&mut self) -> Epoch {
490 let epoch = self.next_epoch;
491 self.next_epoch.next();
492 epoch
493 }
494
495 fn replace_presentation(&mut self, presentation: PresentationStagingBuffer) {
498 let stale_presentation = if presentation.epoch >=
499 self.presentation
500 .as_ref()
501 .map(|p| p.epoch)
502 .unwrap_or_default()
503 {
504 self.presentation.replace(presentation)
505 } else {
506 Some(presentation)
507 };
508 if let Some(stale_presentation) = stale_presentation {
509 stale_presentation.maybe_destroy(self);
510 }
511 }
512
513 fn clear_presentation(&mut self) {
514 if let Some(stale_presentation) = self.presentation.take() {
515 stale_presentation.maybe_destroy(self);
516 }
517 }
518
519 fn return_staging_buffer(&mut self, staging_buffer: StagingBuffer) {
520 self.inactive_staging_buffers.push(staging_buffer)
521 }
522}
523
524impl crate::WGPU {
525 pub(crate) fn create_context(
526 &self,
527 context_id: WebGPUContextId,
528 size: DeviceIntSize,
529 buffer_ids: ArrayVec<id::BufferId, PRESENTATION_BUFFER_COUNT>,
530 ) {
531 let context_data = ContextData::new(&self.global, buffer_ids, size);
532 assert!(
533 self.wgpu_image_map
534 .lock()
535 .unwrap()
536 .insert(context_id, context_data)
537 .is_none(),
538 "Context should be created only once!"
539 );
540 }
541
542 pub(crate) fn set_image_key(&self, context_id: WebGPUContextId, image_key: ImageKey) {
543 let mut webgpu_contexts = self.wgpu_image_map.lock().unwrap();
544 let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
545
546 if let Some(old_image_key) = context_data.image_key.replace(image_key) {
547 self.paint_api.delete_image(old_image_key);
548 }
549
550 self.paint_api.add_image(
551 image_key,
552 ImageDescriptor {
553 format: ImageFormat::BGRA8,
554 size: context_data.size,
555 stride: None,
556 offset: 0,
557 flags: ImageDescriptorFlags::empty(),
558 },
559 SerializableImageData::External(image_data(context_id)),
560 false,
561 );
562 }
563
564 pub(crate) fn get_image(
565 &self,
566 context_id: WebGPUContextId,
567 pending_texture: Option<PendingTexture>,
568 sender: GenericSender<SharedSnapshot>,
569 ) {
570 let mut webgpu_contexts = self.wgpu_image_map.lock().unwrap();
571 let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
572 if let Some(PendingTexture {
573 texture_id,
574 encoder_id,
575 configuration,
576 }) = pending_texture
577 {
578 let Some(staging_buffer) = context_data.get_or_make_available_buffer(&configuration)
579 else {
580 warn!("Failure obtaining available staging buffer");
581 sender
582 .send(SharedSnapshot::cleared(configuration.size))
583 .unwrap();
584 return;
585 };
586
587 let epoch = context_data.next_epoch();
588 let wgpu_image_map = self.wgpu_image_map.clone();
589 let sender = sender.clone();
590 drop(webgpu_contexts);
591 self.texture_download(
592 texture_id,
593 encoder_id,
594 staging_buffer,
595 configuration,
596 move |staging_buffer| {
597 let mut webgpu_contexts = wgpu_image_map.lock().unwrap();
598 let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
599 sender
600 .send(
601 staging_buffer
602 .snapshot()
603 .as_ref()
604 .map(Snapshot::to_shared)
605 .unwrap_or_else(|| SharedSnapshot::cleared(configuration.size)),
606 )
607 .unwrap();
608 if staging_buffer.is_mapped() {
609 context_data.replace_presentation(PresentationStagingBuffer::new(
610 epoch,
611 staging_buffer,
612 ));
613 } else {
614 context_data.return_staging_buffer(staging_buffer);
616 }
617 },
618 );
619 } else {
620 sender
621 .send(
622 context_data
623 .presentation
624 .as_ref()
625 .and_then(|presentation_staging_buffer| {
626 presentation_staging_buffer.staging_buffer.snapshot()
627 })
628 .unwrap_or_else(Snapshot::empty)
629 .to_shared(),
630 )
631 .unwrap();
632 }
633 }
634
635 pub(crate) fn present(
638 &self,
639 context_id: WebGPUContextId,
640 pending_texture: Option<PendingTexture>,
641 size: Size2D<u32>,
642 canvas_epoch: Epoch,
643 ) {
644 let mut webgpu_contexts = self.wgpu_image_map.lock().unwrap();
645 let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
646
647 let Some(image_key) = context_data.image_key else {
648 return;
649 };
650
651 let Some(PendingTexture {
652 texture_id,
653 encoder_id,
654 configuration,
655 }) = pending_texture
656 else {
657 context_data.clear_presentation();
658 self.paint_api.update_image(
659 image_key,
660 ImageDescriptor {
661 format: ImageFormat::BGRA8,
662 size: size.cast_unit().cast(),
663 stride: None,
664 offset: 0,
665 flags: ImageDescriptorFlags::empty(),
666 },
667 SerializableImageData::External(image_data(context_id)),
668 Some(canvas_epoch),
669 );
670 return;
671 };
672 let Some(staging_buffer) = context_data.get_or_make_available_buffer(&configuration) else {
673 warn!("Failure obtaining available staging buffer");
674 context_data.clear_presentation();
675 self.paint_api.update_image(
676 image_key,
677 configuration.into(),
678 SerializableImageData::External(image_data(context_id)),
679 Some(canvas_epoch),
680 );
681 return;
682 };
683 let epoch = context_data.next_epoch();
684 let wgpu_image_map = self.wgpu_image_map.clone();
685 let paint_api = self.paint_api.clone();
686 drop(webgpu_contexts);
687 self.texture_download(
688 texture_id,
689 encoder_id,
690 staging_buffer,
691 configuration,
692 move |staging_buffer| {
693 let mut webgpu_contexts = wgpu_image_map.lock().unwrap();
694 let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
695 if staging_buffer.is_mapped() {
696 context_data.replace_presentation(PresentationStagingBuffer::new(
697 epoch,
698 staging_buffer,
699 ));
700 } else {
701 context_data.return_staging_buffer(staging_buffer);
702 context_data.clear_presentation();
703 }
704 paint_api.update_image(
706 image_key,
707 configuration.into(),
708 SerializableImageData::External(image_data(context_id)),
709 Some(canvas_epoch),
710 );
711 },
712 );
713 }
714
715 fn texture_download(
722 &self,
723 texture_id: TextureId,
724 encoder_id: CommandEncoderId,
725 mut staging_buffer: StagingBuffer,
726 config: ContextConfiguration,
727 callback: impl FnOnce(StagingBuffer) + Send + 'static,
728 ) {
729 let Ok(command_buffer_id) =
730 staging_buffer.prepare_load_texture_command_buffer(texture_id, encoder_id, &config)
731 else {
732 return callback(staging_buffer);
733 };
734 let StagingBufferState::Available(buffer) = &staging_buffer.state else {
735 unreachable!("`prepare_load_texture_command_buffer` should make buffer available")
736 };
737 let buffer_id = staging_buffer.buffer_id;
738 let buffer_size = buffer.size;
739 {
740 let _guard = self.poller.lock();
741 let result = self
742 .global
743 .queue_submit(buffer.queue_id, &[command_buffer_id]);
744 if result.is_err() {
745 return callback(staging_buffer);
746 }
747 }
748 staging_buffer.state = match staging_buffer.state {
749 StagingBufferState::Available(buffer) => StagingBufferState::Mapping(buffer),
750 _ => unreachable!("`prepare_load_texture_command_buffer` should make buffer available"),
751 };
752 let map_callback = {
753 let token = self.poller.token();
754 Box::new(move |result: Result<(), BufferAccessError>| {
755 drop(token);
756 staging_buffer.state = match staging_buffer.state {
757 StagingBufferState::Mapping(buffer) => {
758 if let Ok((data, len)) = result.and_then(|_| {
759 staging_buffer.global.buffer_get_mapped_range(
760 staging_buffer.buffer_id,
761 0,
762 Some(buffer.size),
763 )
764 }) {
765 StagingBufferState::Mapped(MappedBuffer {
766 buffer,
767 data,
768 len,
769 image_size: config.size,
770 image_format: config.format,
771 is_opaque: config.is_opaque,
772 })
773 } else {
774 StagingBufferState::Available(buffer)
775 }
776 },
777 _ => {
778 unreachable!("Mapping buffer should have StagingBufferState::Mapping state")
779 },
780 };
781 callback(staging_buffer);
782 })
783 };
784 let map_op = BufferMapOperation {
785 host: HostMap::Read,
786 callback: Some(map_callback),
787 };
788 let _ = self
790 .global
791 .buffer_map_async(buffer_id, 0, Some(buffer_size), map_op);
792 self.poller.wake();
793 }
794
795 pub(crate) fn destroy_context(&mut self, context_id: WebGPUContextId) {
796 self.wgpu_image_map
797 .lock()
798 .unwrap()
799 .remove(&context_id)
800 .unwrap()
801 .destroy(&self.script_sender, &self.paint_api);
802 }
803}