1use std::collections::HashMap;
6use std::ptr::NonNull;
7use std::slice;
8use std::sync::{Arc, Mutex};
9
10use arrayvec::ArrayVec;
11use base::Epoch;
12use compositing_traits::{
13 CrossProcessCompositorApi, ExternalImageSource, SerializableImageData,
14 WebrenderExternalImageApi,
15};
16use euclid::default::Size2D;
17use ipc_channel::ipc::IpcSender;
18use log::{error, warn};
19use pixels::{IpcSnapshot, Snapshot, SnapshotAlphaMode, SnapshotPixelFormat};
20use serde::{Deserialize, Serialize};
21use webgpu_traits::{
22 ContextConfiguration, Error, PRESENTATION_BUFFER_COUNT, WebGPUContextId, WebGPUMsg,
23};
24use webrender_api::units::DeviceIntSize;
25use webrender_api::{
26 ExternalImageData, ExternalImageId, ExternalImageType, ImageDescriptor, ImageDescriptorFlags,
27 ImageFormat, ImageKey,
28};
29use wgpu_core::device::HostMap;
30use wgpu_core::global::Global;
31use wgpu_core::id;
32use wgpu_core::resource::{BufferAccessError, BufferMapOperation};
33
34use crate::wgt;
35
36const DEFAULT_IMAGE_FORMAT: ImageFormat = ImageFormat::RGBA8;
37
38pub type WGPUImageMap = Arc<Mutex<HashMap<WebGPUContextId, ContextData>>>;
39
40#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
43struct PresentationId(u64);
44
45struct GPUPresentationBuffer {
46 global: Arc<Global>,
47 buffer_id: id::BufferId,
48 data: NonNull<u8>,
49 size: usize,
50}
51
52unsafe impl Send for GPUPresentationBuffer {}
54unsafe impl Sync for GPUPresentationBuffer {}
55
56impl GPUPresentationBuffer {
57 fn new(global: Arc<Global>, buffer_id: id::BufferId, buffer_size: u64) -> Self {
58 let (data, size) = global
59 .buffer_get_mapped_range(buffer_id, 0, Some(buffer_size))
60 .unwrap();
61 GPUPresentationBuffer {
62 global,
63 buffer_id,
64 data,
65 size: size as usize,
66 }
67 }
68
69 fn slice(&self) -> &[u8] {
70 unsafe { slice::from_raw_parts(self.data.as_ptr(), self.size) }
71 }
72}
73
74impl Drop for GPUPresentationBuffer {
75 fn drop(&mut self) {
76 let _ = self.global.buffer_unmap(self.buffer_id);
77 }
78}
79
80#[derive(Default)]
81pub struct WGPUExternalImages {
82 pub images: WGPUImageMap,
83 pub locked_ids: HashMap<WebGPUContextId, Vec<u8>>,
84}
85
86impl WebrenderExternalImageApi for WGPUExternalImages {
87 fn lock(&mut self, id: u64) -> (ExternalImageSource<'_>, Size2D<i32>) {
88 let id = WebGPUContextId(id);
89 let webgpu_contexts = self.images.lock().unwrap();
90 let context_data = webgpu_contexts.get(&id).unwrap();
91 let size = context_data.image_desc.size().cast_unit();
92 let data = if let Some(present_buffer) = context_data
93 .swap_chain
94 .as_ref()
95 .and_then(|swap_chain| swap_chain.data.as_ref())
96 {
97 present_buffer.slice().to_vec()
98 } else {
99 context_data.dummy_data()
100 };
101 self.locked_ids.insert(id, data);
102 (
103 ExternalImageSource::RawData(self.locked_ids.get(&id).unwrap().as_slice()),
104 size,
105 )
106 }
107
108 fn unlock(&mut self, id: u64) {
109 let id = WebGPUContextId(id);
110 self.locked_ids.remove(&id);
111 }
112}
113
114#[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
116enum PresentationBufferState {
117 #[default]
120 Unassigned,
121 Available,
123 Mapping,
125 Mapped,
127}
128
129struct SwapChain {
130 device_id: id::DeviceId,
131 queue_id: id::QueueId,
132 data: Option<GPUPresentationBuffer>,
133}
134
135#[derive(Clone, Copy, Debug, PartialEq)]
136pub struct WebGPUImageDescriptor(pub ImageDescriptor);
137
138impl WebGPUImageDescriptor {
139 fn new(format: ImageFormat, size: DeviceIntSize, is_opaque: bool) -> Self {
140 let stride = ((size.width * format.bytes_per_pixel()) |
141 (wgt::COPY_BYTES_PER_ROW_ALIGNMENT as i32 - 1)) +
142 1;
143 Self(ImageDescriptor {
144 format,
145 size,
146 stride: Some(stride),
147 offset: 0,
148 flags: if is_opaque {
149 ImageDescriptorFlags::IS_OPAQUE
150 } else {
151 ImageDescriptorFlags::empty()
152 },
153 })
154 }
155
156 fn default(size: DeviceIntSize) -> Self {
157 Self::new(DEFAULT_IMAGE_FORMAT, size, false)
158 }
159
160 fn update(&mut self, new: Self) -> bool {
162 if self.0 != new.0 {
163 self.0 = new.0;
164 true
165 } else {
166 false
167 }
168 }
169
170 fn buffer_stride(&self) -> i32 {
171 self.0
172 .stride
173 .expect("Stride should be set by WebGPUImageDescriptor")
174 }
175
176 fn buffer_size(&self) -> wgt::BufferAddress {
177 (self.buffer_stride() * self.0.size.height) as wgt::BufferAddress
178 }
179
180 fn size(&self) -> DeviceIntSize {
181 self.0.size
182 }
183}
184
185pub struct ContextData {
186 image_key: ImageKey,
187 image_desc: WebGPUImageDescriptor,
188 image_data: ExternalImageData,
189 buffer_ids: ArrayVec<(id::BufferId, PresentationBufferState), PRESENTATION_BUFFER_COUNT>,
190 swap_chain: Option<SwapChain>,
192 next_presentation_id: PresentationId,
194 current_presentation_id: PresentationId,
198}
199
200impl ContextData {
201 fn new(
203 context_id: WebGPUContextId,
204 image_key: ImageKey,
205 size: DeviceIntSize,
206 buffer_ids: ArrayVec<id::BufferId, PRESENTATION_BUFFER_COUNT>,
207 ) -> Self {
208 let image_data = ExternalImageData {
209 id: ExternalImageId(context_id.0),
210 channel_index: 0,
211 image_type: ExternalImageType::Buffer,
212 normalized_uvs: false,
213 };
214
215 Self {
216 image_key,
217 image_desc: WebGPUImageDescriptor::default(size),
218 image_data,
219 swap_chain: None,
220 buffer_ids: buffer_ids
221 .iter()
222 .map(|&buffer_id| (buffer_id, PresentationBufferState::Unassigned))
223 .collect(),
224 current_presentation_id: PresentationId(0),
225 next_presentation_id: PresentationId(1),
226 }
227 }
228
229 fn dummy_data(&self) -> Vec<u8> {
230 vec![0; self.image_desc.buffer_size() as usize]
231 }
232
233 fn get_available_buffer(&'_ mut self, global: &Arc<Global>) -> Option<id::BufferId> {
236 assert!(self.swap_chain.is_some());
237 if let Some((buffer_id, buffer_state)) = self
238 .buffer_ids
239 .iter_mut()
240 .find(|(_, state)| *state == PresentationBufferState::Available)
241 {
242 *buffer_state = PresentationBufferState::Mapping;
243 Some(*buffer_id)
244 } else if let Some((buffer_id, buffer_state)) = self
245 .buffer_ids
246 .iter_mut()
247 .find(|(_, state)| *state == PresentationBufferState::Unassigned)
248 {
249 *buffer_state = PresentationBufferState::Mapping;
250 let buffer_id = *buffer_id;
251 let buffer_desc = wgt::BufferDescriptor {
252 label: None,
253 size: self.image_desc.buffer_size(),
254 usage: wgt::BufferUsages::MAP_READ | wgt::BufferUsages::COPY_DST,
255 mapped_at_creation: false,
256 };
257 let _ = global.device_create_buffer(
258 self.swap_chain.as_ref().unwrap().device_id,
259 &buffer_desc,
260 Some(buffer_id),
261 );
262 Some(buffer_id)
263 } else {
264 error!("No available presentation buffer: {:?}", self.buffer_ids);
265 None
266 }
267 }
268
269 fn get_buffer_state(&mut self, buffer_id: id::BufferId) -> &mut PresentationBufferState {
270 &mut self
271 .buffer_ids
272 .iter_mut()
273 .find(|(id, _)| *id == buffer_id)
274 .expect("Presentation buffer should have associated state")
275 .1
276 }
277
278 fn unmap_old_buffer(&mut self, presentation_buffer: GPUPresentationBuffer) {
279 assert!(self.swap_chain.is_some());
280 let buffer_state = self.get_buffer_state(presentation_buffer.buffer_id);
281 assert_eq!(*buffer_state, PresentationBufferState::Mapped);
282 *buffer_state = PresentationBufferState::Available;
283 drop(presentation_buffer);
284 }
285
286 fn destroy_swapchain(&mut self, global: &Arc<Global>) {
287 drop(self.swap_chain.take());
288 for (buffer_id, buffer_state) in &mut self.buffer_ids {
290 match buffer_state {
291 PresentationBufferState::Unassigned => {
292 },
294 _ => {
295 global.buffer_drop(*buffer_id);
296 },
297 }
298 *buffer_state = PresentationBufferState::Unassigned;
299 }
300 }
301
302 fn destroy(
303 mut self,
304 global: &Arc<Global>,
305 script_sender: &IpcSender<WebGPUMsg>,
306 compositor_api: &CrossProcessCompositorApi,
307 ) {
308 self.destroy_swapchain(global);
309 for (buffer_id, _) in self.buffer_ids {
310 if let Err(e) = script_sender.send(WebGPUMsg::FreeBuffer(buffer_id)) {
311 warn!("Unable to send FreeBuffer({:?}) ({:?})", buffer_id, e);
312 };
313 }
314 compositor_api.delete_image(self.image_key);
315 }
316
317 fn check_and_update_presentation_id(&mut self, presentation_id: PresentationId) -> bool {
319 if presentation_id > self.current_presentation_id {
320 self.current_presentation_id = presentation_id;
321 true
322 } else {
323 false
324 }
325 }
326
327 fn next_presentation_id(&mut self) -> PresentationId {
329 let res = PresentationId(self.next_presentation_id.0);
330 self.next_presentation_id.0 += 1;
331 res
332 }
333}
334
335impl crate::WGPU {
336 pub(crate) fn create_context(
337 &self,
338 context_id: WebGPUContextId,
339 image_key: ImageKey,
340 size: DeviceIntSize,
341 buffer_ids: ArrayVec<id::BufferId, PRESENTATION_BUFFER_COUNT>,
342 ) {
343 let context_data = ContextData::new(context_id, image_key, size, buffer_ids);
344 self.compositor_api.add_image(
345 image_key,
346 context_data.image_desc.0,
347 SerializableImageData::External(context_data.image_data),
348 );
349 assert!(
350 self.wgpu_image_map
351 .lock()
352 .unwrap()
353 .insert(context_id, context_data)
354 .is_none(),
355 "Context should be created only once!"
356 );
357 }
358
359 pub(crate) fn get_image(&self, context_id: WebGPUContextId) -> IpcSnapshot {
360 let webgpu_contexts = self.wgpu_image_map.lock().unwrap();
361 let context_data = webgpu_contexts.get(&context_id).unwrap();
362 let size = context_data.image_desc.size().cast().cast_unit();
363 let data = if let Some(present_buffer) = context_data
364 .swap_chain
365 .as_ref()
366 .and_then(|swap_chain| swap_chain.data.as_ref())
367 {
368 let format = match context_data.image_desc.0.format {
369 ImageFormat::RGBA8 => SnapshotPixelFormat::RGBA,
370 ImageFormat::BGRA8 => SnapshotPixelFormat::BGRA,
371 _ => unimplemented!(),
372 };
373 let alpha_mode = if context_data.image_desc.0.is_opaque() {
374 SnapshotAlphaMode::AsOpaque {
375 premultiplied: false,
376 }
377 } else {
378 SnapshotAlphaMode::Transparent {
379 premultiplied: true,
380 }
381 };
382 Snapshot::from_vec(size, format, alpha_mode, present_buffer.slice().to_vec())
383 } else {
384 Snapshot::cleared(size)
385 };
386 data.as_ipc()
387 }
388
389 pub(crate) fn update_context(
390 &self,
391 context_id: WebGPUContextId,
392 size: DeviceIntSize,
393 config: Option<ContextConfiguration>,
394 ) {
395 let mut webgpu_contexts = self.wgpu_image_map.lock().unwrap();
396 let context_data = webgpu_contexts.get_mut(&context_id).unwrap();
397
398 let presentation_id = context_data.next_presentation_id();
399 context_data.check_and_update_presentation_id(presentation_id);
400
401 let needs_image_update = if let Some(config) = config {
404 let new_image_desc =
405 WebGPUImageDescriptor::new(config.format(), size, config.is_opaque);
406 let needs_swapchain_rebuild = context_data.swap_chain.is_none() ||
407 new_image_desc.buffer_size() != context_data.image_desc.buffer_size();
408 if needs_swapchain_rebuild {
409 context_data.destroy_swapchain(&self.global);
410 context_data.swap_chain = Some(SwapChain {
411 device_id: config.device_id,
412 queue_id: config.queue_id,
413 data: None,
414 });
415 }
416 context_data.image_desc.update(new_image_desc)
417 } else {
418 context_data.destroy_swapchain(&self.global);
419 context_data
420 .image_desc
421 .update(WebGPUImageDescriptor::default(size))
422 };
423
424 if needs_image_update {
425 self.compositor_api.update_image(
426 context_data.image_key,
427 context_data.image_desc.0,
428 SerializableImageData::External(context_data.image_data),
429 None,
430 );
431 }
432 }
433
434 pub(crate) fn swapchain_present(
436 &mut self,
437 context_id: WebGPUContextId,
438 encoder_id: id::Id<id::markers::CommandEncoder>,
439 texture_id: id::Id<id::markers::Texture>,
440 canvas_epoch: Option<Epoch>,
441 ) -> Result<(), Box<dyn std::error::Error>> {
442 fn err<T: std::error::Error + 'static>(e: Option<T>) -> Result<(), T> {
443 if let Some(error) = e {
444 Err(error)
445 } else {
446 Ok(())
447 }
448 }
449
450 let global = &self.global;
451 let device_id;
452 let queue_id;
453 let buffer_id;
454 let image_desc;
455 let presentation_id;
456 {
457 if let Some(context_data) = self.wgpu_image_map.lock().unwrap().get_mut(&context_id) {
458 let Some(swap_chain) = context_data.swap_chain.as_ref() else {
459 return Ok(());
460 };
461 device_id = swap_chain.device_id;
462 queue_id = swap_chain.queue_id;
463 buffer_id = context_data.get_available_buffer(global).unwrap();
464 image_desc = context_data.image_desc;
465 presentation_id = context_data.next_presentation_id();
466 } else {
467 return Ok(());
468 }
469 }
470 let comm_desc = wgt::CommandEncoderDescriptor { label: None };
471 let (encoder_id, error) =
472 global.device_create_command_encoder(device_id, &comm_desc, Some(encoder_id));
473 err(error)?;
474 let buffer_cv = wgt::TexelCopyBufferInfo {
475 buffer: buffer_id,
476 layout: wgt::TexelCopyBufferLayout {
477 offset: 0,
478 bytes_per_row: Some(image_desc.buffer_stride() as u32),
479 rows_per_image: None,
480 },
481 };
482 let texture_cv = wgt::TexelCopyTextureInfo {
483 texture: texture_id,
484 mip_level: 0,
485 origin: wgt::Origin3d::ZERO,
486 aspect: wgt::TextureAspect::All,
487 };
488 let copy_size = wgt::Extent3d {
489 width: image_desc.size().width as u32,
490 height: image_desc.size().height as u32,
491 depth_or_array_layers: 1,
492 };
493 global.command_encoder_copy_texture_to_buffer(
494 encoder_id,
495 &texture_cv,
496 &buffer_cv,
497 ©_size,
498 )?;
499 let (command_buffer_id, error) =
500 global.command_encoder_finish(encoder_id, &wgt::CommandBufferDescriptor::default());
501 err(error)?;
502 {
503 let _guard = self.poller.lock();
504 global
505 .queue_submit(queue_id, &[command_buffer_id])
506 .map_err(|(_, error)| Error::from_error(error))?;
507 }
508 let callback = {
509 let global = Arc::clone(&self.global);
510 let wgpu_image_map = Arc::clone(&self.wgpu_image_map);
511 let compositor_api = self.compositor_api.clone();
512 let token = self.poller.token();
513 Box::new(move |result| {
514 drop(token);
515 update_wr_image(
516 result,
517 global,
518 buffer_id,
519 wgpu_image_map,
520 context_id,
521 compositor_api,
522 image_desc,
523 presentation_id,
524 canvas_epoch,
525 );
526 })
527 };
528 let map_op = BufferMapOperation {
529 host: HostMap::Read,
530 callback: Some(callback),
531 };
532 global.buffer_map_async(buffer_id, 0, Some(image_desc.buffer_size()), map_op)?;
533 self.poller.wake();
534 Ok(())
535 }
536
537 pub(crate) fn destroy_context(&mut self, context_id: WebGPUContextId) {
538 self.wgpu_image_map
539 .lock()
540 .unwrap()
541 .remove(&context_id)
542 .unwrap()
543 .destroy(&self.global, &self.script_sender, &self.compositor_api);
544 }
545}
546
547#[allow(clippy::too_many_arguments)]
548fn update_wr_image(
549 result: Result<(), BufferAccessError>,
550 global: Arc<Global>,
551 buffer_id: id::BufferId,
552 wgpu_image_map: WGPUImageMap,
553 context_id: WebGPUContextId,
554 compositor_api: CrossProcessCompositorApi,
555 image_desc: WebGPUImageDescriptor,
556 presentation_id: PresentationId,
557 canvas_epoch: Option<Epoch>,
558) {
559 match result {
560 Ok(()) => {
561 if let Some(context_data) = wgpu_image_map.lock().unwrap().get_mut(&context_id) {
562 if !context_data.check_and_update_presentation_id(presentation_id) {
563 let buffer_state = context_data.get_buffer_state(buffer_id);
564 if *buffer_state == PresentationBufferState::Mapping {
565 let _ = global.buffer_unmap(buffer_id);
566 *buffer_state = PresentationBufferState::Available;
567 }
568 return;
570 }
571 assert_eq!(image_desc, context_data.image_desc);
572 let buffer_state = context_data.get_buffer_state(buffer_id);
573 assert_eq!(*buffer_state, PresentationBufferState::Mapping);
574 *buffer_state = PresentationBufferState::Mapped;
575 let presentation_buffer =
576 GPUPresentationBuffer::new(global, buffer_id, image_desc.buffer_size());
577 let Some(swap_chain) = context_data.swap_chain.as_mut() else {
578 return;
579 };
580 let old_presentation_buffer = swap_chain.data.replace(presentation_buffer);
581 compositor_api.update_image(
582 context_data.image_key,
583 context_data.image_desc.0,
584 SerializableImageData::External(context_data.image_data),
585 canvas_epoch,
586 );
587 if let Some(old_presentation_buffer) = old_presentation_buffer {
588 context_data.unmap_old_buffer(old_presentation_buffer)
589 }
590 } else {
591 error!("WebGPU Context {:?} is destroyed", context_id);
592 }
593 },
594 _ => error!("Could not map buffer({:?})", buffer_id),
595 }
596}