wgpu_core/device/
resource.rs

1use alloc::{
2    borrow::Cow,
3    boxed::Box,
4    string::{String, ToString as _},
5    sync::{Arc, Weak},
6    vec::Vec,
7};
8use core::{
9    fmt,
10    mem::{self, ManuallyDrop},
11    num::NonZeroU32,
12    sync::atomic::{AtomicBool, Ordering},
13};
14
15use arrayvec::ArrayVec;
16use bitflags::Flags;
17use smallvec::SmallVec;
18use wgt::{
19    math::align_to, DeviceLostReason, TextureFormat, TextureSampleType, TextureSelector,
20    TextureViewDimension,
21};
22
23#[cfg(feature = "trace")]
24use crate::device::trace;
25use crate::{
26    binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError},
27    command, conv,
28    device::{
29        bgl, create_validator, life::WaitIdleError, map_buffer, AttachmentData,
30        DeviceLostInvocation, HostMap, MissingDownlevelFlags, MissingFeatures, RenderPassContext,
31        CLEANUP_WAIT_MS,
32    },
33    hal_label,
34    init_tracker::{
35        BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
36        TextureInitTrackerAction,
37    },
38    instance::{Adapter, RequestDeviceError},
39    lock::{rank, Mutex, RwLock},
40    pipeline,
41    pool::ResourcePool,
42    resource::{
43        self, Buffer, Fallible, Labeled, ParentDevice, QuerySet, RawResourceAccess, Sampler,
44        StagingBuffer, Texture, TextureView, TextureViewNotRenderableReason, Tlas, TrackingData,
45    },
46    resource_log,
47    snatch::{SnatchGuard, SnatchLock, Snatchable},
48    timestamp_normalization::TIMESTAMP_NORMALIZATION_BUFFER_USES,
49    track::{BindGroupStates, DeviceTracker, TrackerIndexAllocators, UsageScope, UsageScopePool},
50    validation::{self, validate_color_attachment_bytes_per_sample},
51    weak_vec::WeakVec,
52    FastHashMap, LabelHelpers, OnceCellOrLock,
53};
54
55use super::{
56    queue::Queue, DeviceDescriptor, DeviceError, DeviceLostClosure, UserClosures,
57    ENTRYPOINT_FAILURE_ERROR, ZERO_BUFFER_SIZE,
58};
59
60#[cfg(supports_64bit_atomics)]
61use core::sync::atomic::AtomicU64;
62#[cfg(not(supports_64bit_atomics))]
63use portable_atomic::AtomicU64;
64
65pub(crate) struct CommandIndices {
66    /// The index of the last command submission that was attempted.
67    ///
68    /// Note that `fence` may never be signalled with this value, if the command
69    /// submission failed. If you need to wait for everything running on a
70    /// `Queue` to complete, wait for [`last_successful_submission_index`].
71    ///
72    /// [`last_successful_submission_index`]: Device::last_successful_submission_index
73    pub(crate) active_submission_index: hal::FenceValue,
74    pub(crate) next_acceleration_structure_build_command_index: u64,
75}
76
77/// Structure describing a logical device. Some members are internally mutable,
78/// stored behind mutexes.
79pub struct Device {
80    raw: Box<dyn hal::DynDevice>,
81    pub(crate) adapter: Arc<Adapter>,
82    pub(crate) queue: OnceCellOrLock<Weak<Queue>>,
83    pub(crate) zero_buffer: ManuallyDrop<Box<dyn hal::DynBuffer>>,
84    /// The `label` from the descriptor used to create the resource.
85    label: String,
86
87    pub(crate) command_allocator: command::CommandAllocator,
88
89    pub(crate) command_indices: RwLock<CommandIndices>,
90
91    /// The index of the last successful submission to this device's
92    /// [`hal::Queue`].
93    ///
94    /// Unlike [`active_submission_index`], which is incremented each time
95    /// submission is attempted, this is updated only when submission succeeds,
96    /// so waiting for this value won't hang waiting for work that was never
97    /// submitted.
98    ///
99    /// [`active_submission_index`]: CommandIndices::active_submission_index
100    pub(crate) last_successful_submission_index: hal::AtomicFenceValue,
101
102    // NOTE: if both are needed, the `snatchable_lock` must be consistently acquired before the
103    // `fence` lock to avoid deadlocks.
104    pub(crate) fence: RwLock<ManuallyDrop<Box<dyn hal::DynFence>>>,
105    pub(crate) snatchable_lock: SnatchLock,
106
107    /// Is this device valid? Valid is closely associated with "lose the device",
108    /// which can be triggered by various methods, including at the end of device
109    /// destroy, and by any GPU errors that cause us to no longer trust the state
110    /// of the device. Ideally we would like to fold valid into the storage of
111    /// the device itself (for example as an Error enum), but unfortunately we
112    /// need to continue to be able to retrieve the device in poll_devices to
113    /// determine if it can be dropped. If our internal accesses of devices were
114    /// done through ref-counted references and external accesses checked for
115    /// Error enums, we wouldn't need this. For now, we need it. All the call
116    /// sites where we check it are areas that should be revisited if we start
117    /// using ref-counted references for internal access.
118    pub(crate) valid: AtomicBool,
119
120    /// Closure to be called on "lose the device". This is invoked directly by
121    /// device.lose or by the UserCallbacks returned from maintain when the device
122    /// has been destroyed and its queues are empty.
123    pub(crate) device_lost_closure: Mutex<Option<DeviceLostClosure>>,
124
125    /// Stores the state of buffers and textures.
126    pub(crate) trackers: Mutex<DeviceTracker>,
127    pub(crate) tracker_indices: TrackerIndexAllocators,
128    /// Pool of bind group layouts, allowing deduplication.
129    pub(crate) bgl_pool: ResourcePool<bgl::EntryMap, BindGroupLayout>,
130    pub(crate) alignments: hal::Alignments,
131    pub(crate) limits: wgt::Limits,
132    pub(crate) features: wgt::Features,
133    pub(crate) downlevel: wgt::DownlevelCapabilities,
134    pub(crate) instance_flags: wgt::InstanceFlags,
135    pub(crate) deferred_destroy: Mutex<Vec<DeferredDestroy>>,
136    pub(crate) usage_scopes: UsageScopePool,
137    pub(crate) indirect_validation: Option<crate::indirect_validation::IndirectValidation>,
138    // Optional so that we can late-initialize this after the queue is created.
139    pub(crate) timestamp_normalizer:
140        OnceCellOrLock<crate::timestamp_normalization::TimestampNormalizer>,
141    // needs to be dropped last
142    #[cfg(feature = "trace")]
143    pub(crate) trace: Mutex<Option<trace::Trace>>,
144}
145
146pub(crate) enum DeferredDestroy {
147    TextureViews(WeakVec<TextureView>),
148    BindGroups(WeakVec<BindGroup>),
149}
150
151impl fmt::Debug for Device {
152    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
153        f.debug_struct("Device")
154            .field("label", &self.label())
155            .field("limits", &self.limits)
156            .field("features", &self.features)
157            .field("downlevel", &self.downlevel)
158            .finish()
159    }
160}
161
162impl Drop for Device {
163    fn drop(&mut self) {
164        resource_log!("Drop {}", self.error_ident());
165
166        // SAFETY: We are in the Drop impl and we don't use self.zero_buffer anymore after this point.
167        let zero_buffer = unsafe { ManuallyDrop::take(&mut self.zero_buffer) };
168        // SAFETY: We are in the Drop impl and we don't use self.fence anymore after this point.
169        let fence = unsafe { ManuallyDrop::take(&mut self.fence.write()) };
170        if let Some(indirect_validation) = self.indirect_validation.take() {
171            indirect_validation.dispose(self.raw.as_ref());
172        }
173        if let Some(timestamp_normalizer) = self.timestamp_normalizer.take() {
174            timestamp_normalizer.dispose(self.raw.as_ref());
175        }
176        unsafe {
177            self.raw.destroy_buffer(zero_buffer);
178            self.raw.destroy_fence(fence);
179        }
180    }
181}
182
183impl Device {
184    pub(crate) fn raw(&self) -> &dyn hal::DynDevice {
185        self.raw.as_ref()
186    }
187    pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> {
188        if self.features.contains(feature) {
189            Ok(())
190        } else {
191            Err(MissingFeatures(feature))
192        }
193    }
194
195    pub(crate) fn require_downlevel_flags(
196        &self,
197        flags: wgt::DownlevelFlags,
198    ) -> Result<(), MissingDownlevelFlags> {
199        if self.downlevel.flags.contains(flags) {
200            Ok(())
201        } else {
202            Err(MissingDownlevelFlags(flags))
203        }
204    }
205}
206
207impl Device {
208    pub(crate) fn new(
209        raw_device: Box<dyn hal::DynDevice>,
210        adapter: &Arc<Adapter>,
211        desc: &DeviceDescriptor,
212        instance_flags: wgt::InstanceFlags,
213    ) -> Result<Self, DeviceError> {
214        #[cfg(not(feature = "trace"))]
215        match &desc.trace {
216            wgt::Trace::Off => {}
217            _ => {
218                log::error!("wgpu-core feature 'trace' is not enabled");
219            }
220        };
221        #[cfg(feature = "trace")]
222        let trace_dir_name: Option<&std::path::PathBuf> = match &desc.trace {
223            wgt::Trace::Off => None,
224            wgt::Trace::Directory(d) => Some(d),
225            // The enum is non_exhaustive, so we must have a fallback arm (that should be
226            // unreachable in practice).
227            t => {
228                log::error!("unimplemented wgpu_types::Trace variant {t:?}");
229                None
230            }
231        };
232
233        let fence = unsafe { raw_device.create_fence() }.map_err(DeviceError::from_hal)?;
234
235        let command_allocator = command::CommandAllocator::new();
236
237        let rt_uses = if desc
238            .required_features
239            .contains(wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)
240        {
241            wgt::BufferUses::TOP_LEVEL_ACCELERATION_STRUCTURE_INPUT
242        } else {
243            wgt::BufferUses::empty()
244        };
245
246        // Create zeroed buffer used for texture clears (and raytracing if required).
247        let zero_buffer = unsafe {
248            raw_device.create_buffer(&hal::BufferDescriptor {
249                label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags),
250                size: ZERO_BUFFER_SIZE,
251                usage: wgt::BufferUses::COPY_SRC | wgt::BufferUses::COPY_DST | rt_uses,
252                memory_flags: hal::MemoryFlags::empty(),
253            })
254        }
255        .map_err(DeviceError::from_hal)?;
256
257        let alignments = adapter.raw.capabilities.alignments.clone();
258        let downlevel = adapter.raw.capabilities.downlevel.clone();
259
260        let enable_indirect_validation = instance_flags
261            .contains(wgt::InstanceFlags::VALIDATION_INDIRECT_CALL)
262            && downlevel
263                .flags
264                .contains(wgt::DownlevelFlags::INDIRECT_EXECUTION);
265
266        let indirect_validation = if enable_indirect_validation {
267            Some(crate::indirect_validation::IndirectValidation::new(
268                raw_device.as_ref(),
269                &desc.required_limits,
270                &desc.required_features,
271                adapter.backend(),
272            )?)
273        } else {
274            None
275        };
276
277        Ok(Self {
278            raw: raw_device,
279            adapter: adapter.clone(),
280            queue: OnceCellOrLock::new(),
281            zero_buffer: ManuallyDrop::new(zero_buffer),
282            label: desc.label.to_string(),
283            command_allocator,
284            command_indices: RwLock::new(
285                rank::DEVICE_COMMAND_INDICES,
286                CommandIndices {
287                    active_submission_index: 0,
288                    // By starting at one, we can put the result in a NonZeroU64.
289                    next_acceleration_structure_build_command_index: 1,
290                },
291            ),
292            last_successful_submission_index: AtomicU64::new(0),
293            fence: RwLock::new(rank::DEVICE_FENCE, ManuallyDrop::new(fence)),
294            snatchable_lock: unsafe { SnatchLock::new(rank::DEVICE_SNATCHABLE_LOCK) },
295            valid: AtomicBool::new(true),
296            device_lost_closure: Mutex::new(rank::DEVICE_LOST_CLOSURE, None),
297            trackers: Mutex::new(rank::DEVICE_TRACKERS, DeviceTracker::new()),
298            tracker_indices: TrackerIndexAllocators::new(),
299            bgl_pool: ResourcePool::new(),
300            #[cfg(feature = "trace")]
301            trace: Mutex::new(
302                rank::DEVICE_TRACE,
303                trace_dir_name.and_then(|path| match trace::Trace::new(path.clone()) {
304                    Ok(mut trace) => {
305                        trace.add(trace::Action::Init {
306                            desc: wgt::DeviceDescriptor {
307                                trace: wgt::Trace::Off,
308                                ..desc.clone()
309                            },
310                            backend: adapter.backend(),
311                        });
312                        Some(trace)
313                    }
314                    Err(e) => {
315                        log::error!("Unable to start a trace in '{path:?}': {e}");
316                        None
317                    }
318                }),
319            ),
320            alignments,
321            limits: desc.required_limits.clone(),
322            features: desc.required_features,
323            downlevel,
324            instance_flags,
325            deferred_destroy: Mutex::new(rank::DEVICE_DEFERRED_DESTROY, Vec::new()),
326            usage_scopes: Mutex::new(rank::DEVICE_USAGE_SCOPES, Default::default()),
327            timestamp_normalizer: OnceCellOrLock::new(),
328            indirect_validation,
329        })
330    }
331
332    pub fn late_init_resources_with_queue(&self) -> Result<(), RequestDeviceError> {
333        let queue = self.get_queue().unwrap();
334
335        let timestamp_normalizer = crate::timestamp_normalization::TimestampNormalizer::new(
336            self,
337            queue.get_timestamp_period(),
338        )?;
339
340        self.timestamp_normalizer
341            .set(timestamp_normalizer)
342            .unwrap_or_else(|_| panic!("Called late_init_resources_with_queue twice"));
343
344        Ok(())
345    }
346
347    /// Returns the backend this device is using.
348    pub fn backend(&self) -> wgt::Backend {
349        self.adapter.backend()
350    }
351
352    pub fn is_valid(&self) -> bool {
353        self.valid.load(Ordering::Acquire)
354    }
355
356    pub fn check_is_valid(&self) -> Result<(), DeviceError> {
357        if self.is_valid() {
358            Ok(())
359        } else {
360            Err(DeviceError::Lost)
361        }
362    }
363
364    /// Checks that we are operating within the memory budget reported by the native APIs.
365    ///
366    /// If we are not, the device gets invalidated.
367    ///
368    /// The budget might fluctuate over the lifetime of the application, so it should be checked
369    /// somewhat frequently.
370    pub fn lose_if_oom(&self) {
371        let _ = self
372            .raw()
373            .check_if_oom()
374            .map_err(|e| self.handle_hal_error(e));
375    }
376
377    pub fn handle_hal_error(&self, error: hal::DeviceError) -> DeviceError {
378        match error {
379            hal::DeviceError::OutOfMemory
380            | hal::DeviceError::Lost
381            | hal::DeviceError::Unexpected => {
382                self.lose(&error.to_string());
383            }
384        }
385        DeviceError::from_hal(error)
386    }
387
388    pub fn handle_hal_error_with_nonfatal_oom(&self, error: hal::DeviceError) -> DeviceError {
389        match error {
390            hal::DeviceError::OutOfMemory => DeviceError::from_hal(error),
391            error => self.handle_hal_error(error),
392        }
393    }
394
395    /// Run some destroy operations that were deferred.
396    ///
397    /// Destroying the resources requires taking a write lock on the device's snatch lock,
398    /// so a good reason for deferring resource destruction is when we don't know for sure
399    /// how risky it is to take the lock (typically, it shouldn't be taken from the drop
400    /// implementation of a reference-counted structure).
401    /// The snatch lock must not be held while this function is called.
402    pub(crate) fn deferred_resource_destruction(&self) {
403        let deferred_destroy = mem::take(&mut *self.deferred_destroy.lock());
404        for item in deferred_destroy {
405            match item {
406                DeferredDestroy::TextureViews(views) => {
407                    for view in views {
408                        let Some(view) = view.upgrade() else {
409                            continue;
410                        };
411                        let Some(raw_view) = view.raw.snatch(&mut self.snatchable_lock.write())
412                        else {
413                            continue;
414                        };
415
416                        resource_log!("Destroy raw {}", view.error_ident());
417
418                        unsafe {
419                            self.raw().destroy_texture_view(raw_view);
420                        }
421                    }
422                }
423                DeferredDestroy::BindGroups(bind_groups) => {
424                    for bind_group in bind_groups {
425                        let Some(bind_group) = bind_group.upgrade() else {
426                            continue;
427                        };
428                        let Some(raw_bind_group) =
429                            bind_group.raw.snatch(&mut self.snatchable_lock.write())
430                        else {
431                            continue;
432                        };
433
434                        resource_log!("Destroy raw {}", bind_group.error_ident());
435
436                        unsafe {
437                            self.raw().destroy_bind_group(raw_bind_group);
438                        }
439                    }
440                }
441            }
442        }
443    }
444
445    pub fn get_queue(&self) -> Option<Arc<Queue>> {
446        self.queue.get().as_ref()?.upgrade()
447    }
448
449    pub fn set_queue(&self, queue: &Arc<Queue>) {
450        assert!(self.queue.set(Arc::downgrade(queue)).is_ok());
451    }
452
453    /// Check the current status of the GPU and process any submissions that have
454    /// finished.
455    ///
456    /// The `poll_type` argument tells if this function should wait for a particular
457    /// submission index to complete, or if it should just poll the current status.
458    ///
459    /// This will process _all_ completed submissions, even if the caller only asked
460    /// us to poll to a given submission index.
461    ///
462    /// Return a pair `(closures, result)`, where:
463    ///
464    /// - `closures` is a list of callbacks that need to be invoked informing the user
465    ///   about various things occurring. These happen and should be handled even if
466    ///   this function returns an error, hence they are outside of the result.
467    ///
468    /// - `results` is a boolean indicating the result of the wait operation, including
469    ///   if there was a timeout or a validation error.
470    pub(crate) fn maintain<'this>(
471        &'this self,
472        fence: crate::lock::RwLockReadGuard<ManuallyDrop<Box<dyn hal::DynFence>>>,
473        poll_type: wgt::PollType<crate::SubmissionIndex>,
474        snatch_guard: SnatchGuard,
475    ) -> (UserClosures, Result<wgt::PollStatus, WaitIdleError>) {
476        profiling::scope!("Device::maintain");
477
478        let mut user_closures = UserClosures::default();
479
480        // If a wait was requested, determine which submission index to wait for.
481        let wait_submission_index = match poll_type {
482            wgt::PollType::WaitForSubmissionIndex(submission_index) => {
483                let last_successful_submission_index = self
484                    .last_successful_submission_index
485                    .load(Ordering::Acquire);
486
487                if submission_index > last_successful_submission_index {
488                    let result = Err(WaitIdleError::WrongSubmissionIndex(
489                        submission_index,
490                        last_successful_submission_index,
491                    ));
492
493                    return (user_closures, result);
494                }
495
496                Some(submission_index)
497            }
498            wgt::PollType::Wait => Some(
499                self.last_successful_submission_index
500                    .load(Ordering::Acquire),
501            ),
502            wgt::PollType::Poll => None,
503        };
504
505        // Wait for the submission index if requested.
506        if let Some(target_submission_index) = wait_submission_index {
507            log::trace!("Device::maintain: waiting for submission index {target_submission_index}");
508
509            let wait_result = unsafe {
510                self.raw()
511                    .wait(fence.as_ref(), target_submission_index, CLEANUP_WAIT_MS)
512            };
513
514            // This error match is only about `DeviceErrors`. At this stage we do not care if
515            // the wait succeeded or not, and the `Ok(bool)`` variant is ignored.
516            if let Err(e) = wait_result {
517                let hal_error: WaitIdleError = self.handle_hal_error(e).into();
518                return (user_closures, Err(hal_error));
519            }
520        }
521
522        // Get the currently finished submission index. This may be higher than the requested
523        // wait, or it may be less than the requested wait if the wait failed.
524        let fence_value_result = unsafe { self.raw().get_fence_value(fence.as_ref()) };
525        let current_finished_submission = match fence_value_result {
526            Ok(fence_value) => fence_value,
527            Err(e) => {
528                let hal_error: WaitIdleError = self.handle_hal_error(e).into();
529                return (user_closures, Err(hal_error));
530            }
531        };
532
533        // Maintain all finished submissions on the queue, updating the relevant user closures and collecting if the queue is empty.
534        //
535        // We don't use the result of the wait here, as we want to progress forward as far as possible
536        // and the wait could have been for submissions that finished long ago.
537        let mut queue_empty = false;
538        if let Some(queue) = self.get_queue() {
539            let queue_result = queue.maintain(current_finished_submission, &snatch_guard);
540            (
541                user_closures.submissions,
542                user_closures.mappings,
543                user_closures.blas_compact_ready,
544                queue_empty,
545            ) = queue_result
546        };
547
548        // Based on the queue empty status, and the current finished submission index, determine the result of the poll.
549        let result = if queue_empty {
550            if let Some(wait_submission_index) = wait_submission_index {
551                // Assert to ensure that if we received a queue empty status, the fence shows the correct value.
552                // This is defensive, as this should never be hit.
553                assert!(
554                    current_finished_submission >= wait_submission_index,
555                    "If the queue is empty, the current submission index ({}) should be at least the wait submission index ({})",
556                    current_finished_submission,
557                    wait_submission_index
558                );
559            }
560
561            Ok(wgt::PollStatus::QueueEmpty)
562        } else if let Some(wait_submission_index) = wait_submission_index {
563            // This is theoretically possible to succeed more than checking on the poll result
564            // as submissions could have finished in the time between the timeout resolving,
565            // the thread getting scheduled again, and us checking the fence value.
566            if current_finished_submission >= wait_submission_index {
567                Ok(wgt::PollStatus::WaitSucceeded)
568            } else {
569                Err(WaitIdleError::Timeout)
570            }
571        } else {
572            Ok(wgt::PollStatus::Poll)
573        };
574
575        // Detect if we have been destroyed and now need to lose the device.
576        //
577        // If we are invalid (set at start of destroy) and our queue is empty,
578        // and we have a DeviceLostClosure, return the closure to be called by
579        // our caller. This will complete the steps for both destroy and for
580        // "lose the device".
581        let mut should_release_gpu_resource = false;
582        if !self.is_valid() && queue_empty {
583            // We can release gpu resources associated with this device (but not
584            // while holding the life_tracker lock).
585            should_release_gpu_resource = true;
586
587            // If we have a DeviceLostClosure, build an invocation with the
588            // reason DeviceLostReason::Destroyed and no message.
589            if let Some(device_lost_closure) = self.device_lost_closure.lock().take() {
590                user_closures
591                    .device_lost_invocations
592                    .push(DeviceLostInvocation {
593                        closure: device_lost_closure,
594                        reason: DeviceLostReason::Destroyed,
595                        message: String::new(),
596                    });
597            }
598        }
599
600        // Don't hold the locks while calling release_gpu_resources.
601        drop(fence);
602        drop(snatch_guard);
603
604        if should_release_gpu_resource {
605            self.release_gpu_resources();
606        }
607
608        (user_closures, result)
609    }
610
611    pub(crate) fn create_buffer(
612        self: &Arc<Self>,
613        desc: &resource::BufferDescriptor,
614    ) -> Result<Arc<Buffer>, resource::CreateBufferError> {
615        self.check_is_valid()?;
616
617        if desc.size > self.limits.max_buffer_size {
618            return Err(resource::CreateBufferError::MaxBufferSize {
619                requested: desc.size,
620                maximum: self.limits.max_buffer_size,
621            });
622        }
623
624        if desc.usage.contains(wgt::BufferUsages::INDEX)
625            && desc.usage.contains(
626                wgt::BufferUsages::VERTEX
627                    | wgt::BufferUsages::UNIFORM
628                    | wgt::BufferUsages::INDIRECT
629                    | wgt::BufferUsages::STORAGE,
630            )
631        {
632            self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?;
633        }
634
635        if desc.usage.is_empty() || desc.usage.contains_unknown_bits() {
636            return Err(resource::CreateBufferError::InvalidUsage(desc.usage));
637        }
638
639        if !self
640            .features
641            .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS)
642        {
643            use wgt::BufferUsages as Bu;
644            let write_mismatch = desc.usage.contains(Bu::MAP_WRITE)
645                && !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage);
646            let read_mismatch = desc.usage.contains(Bu::MAP_READ)
647                && !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage);
648            if write_mismatch || read_mismatch {
649                return Err(resource::CreateBufferError::UsageMismatch(desc.usage));
650            }
651        }
652
653        let mut usage = conv::map_buffer_usage(desc.usage);
654
655        if desc.usage.contains(wgt::BufferUsages::INDIRECT) {
656            self.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)?;
657            // We are going to be reading from it, internally;
658            // when validating the content of the buffer
659            usage |= wgt::BufferUses::STORAGE_READ_ONLY | wgt::BufferUses::STORAGE_READ_WRITE;
660        }
661
662        if desc.usage.contains(wgt::BufferUsages::QUERY_RESOLVE) {
663            usage |= TIMESTAMP_NORMALIZATION_BUFFER_USES;
664        }
665
666        if desc.mapped_at_creation {
667            if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
668                return Err(resource::CreateBufferError::UnalignedSize);
669            }
670            if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
671                // we are going to be copying into it, internally
672                usage |= wgt::BufferUses::COPY_DST;
673            }
674        } else {
675            // We are required to zero out (initialize) all memory. This is done
676            // on demand using clear_buffer which requires write transfer usage!
677            usage |= wgt::BufferUses::COPY_DST;
678        }
679
680        let actual_size = if desc.size == 0 {
681            wgt::COPY_BUFFER_ALIGNMENT
682        } else if desc.usage.contains(wgt::BufferUsages::VERTEX) {
683            // Bumping the size by 1 so that we can bind an empty range at the
684            // end of the buffer.
685            desc.size + 1
686        } else {
687            desc.size
688        };
689        let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT;
690        let aligned_size = if clear_remainder != 0 {
691            actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder
692        } else {
693            actual_size
694        };
695
696        let hal_desc = hal::BufferDescriptor {
697            label: desc.label.to_hal(self.instance_flags),
698            size: aligned_size,
699            usage,
700            memory_flags: hal::MemoryFlags::empty(),
701        };
702        let buffer = unsafe { self.raw().create_buffer(&hal_desc) }
703            .map_err(|e| self.handle_hal_error_with_nonfatal_oom(e))?;
704
705        let timestamp_normalization_bind_group = Snatchable::new(
706            self.timestamp_normalizer
707                .get()
708                .unwrap()
709                .create_normalization_bind_group(
710                    self,
711                    &*buffer,
712                    desc.label.as_deref(),
713                    desc.size,
714                    desc.usage,
715                )?,
716        );
717
718        let indirect_validation_bind_groups =
719            self.create_indirect_validation_bind_groups(buffer.as_ref(), desc.size, desc.usage)?;
720
721        let buffer = Buffer {
722            raw: Snatchable::new(buffer),
723            device: self.clone(),
724            usage: desc.usage,
725            size: desc.size,
726            initialization_status: RwLock::new(
727                rank::BUFFER_INITIALIZATION_STATUS,
728                BufferInitTracker::new(aligned_size),
729            ),
730            map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
731            label: desc.label.to_string(),
732            tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
733            bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
734            timestamp_normalization_bind_group,
735            indirect_validation_bind_groups,
736        };
737
738        let buffer = Arc::new(buffer);
739
740        let buffer_use = if !desc.mapped_at_creation {
741            wgt::BufferUses::empty()
742        } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
743            // buffer is mappable, so we are just doing that at start
744            let map_size = buffer.size;
745            let mapping = if map_size == 0 {
746                hal::BufferMapping {
747                    ptr: core::ptr::NonNull::dangling(),
748                    is_coherent: true,
749                }
750            } else {
751                let snatch_guard: SnatchGuard = self.snatchable_lock.read();
752                map_buffer(&buffer, 0, map_size, HostMap::Write, &snatch_guard)?
753            };
754            *buffer.map_state.lock() = resource::BufferMapState::Active {
755                mapping,
756                range: 0..map_size,
757                host: HostMap::Write,
758            };
759            wgt::BufferUses::MAP_WRITE
760        } else {
761            let mut staging_buffer =
762                StagingBuffer::new(self, wgt::BufferSize::new(aligned_size).unwrap())?;
763
764            // Zero initialize memory and then mark the buffer as initialized
765            // (it's guaranteed that this is the case by the time the buffer is usable)
766            staging_buffer.write_zeros();
767            buffer.initialization_status.write().drain(0..aligned_size);
768
769            *buffer.map_state.lock() = resource::BufferMapState::Init { staging_buffer };
770            wgt::BufferUses::COPY_DST
771        };
772
773        self.trackers
774            .lock()
775            .buffers
776            .insert_single(&buffer, buffer_use);
777
778        Ok(buffer)
779    }
780
781    pub(crate) fn create_texture_from_hal(
782        self: &Arc<Self>,
783        hal_texture: Box<dyn hal::DynTexture>,
784        desc: &resource::TextureDescriptor,
785    ) -> Result<Arc<Texture>, resource::CreateTextureError> {
786        let format_features = self
787            .describe_format_features(desc.format)
788            .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))?;
789
790        unsafe { self.raw().add_raw_texture(&*hal_texture) };
791
792        let texture = Texture::new(
793            self,
794            resource::TextureInner::Native { raw: hal_texture },
795            conv::map_texture_usage(desc.usage, desc.format.into(), format_features.flags),
796            desc,
797            format_features,
798            resource::TextureClearMode::None,
799            false,
800        );
801
802        let texture = Arc::new(texture);
803
804        self.trackers
805            .lock()
806            .textures
807            .insert_single(&texture, wgt::TextureUses::UNINITIALIZED);
808
809        Ok(texture)
810    }
811
812    pub(crate) fn create_buffer_from_hal(
813        self: &Arc<Self>,
814        hal_buffer: Box<dyn hal::DynBuffer>,
815        desc: &resource::BufferDescriptor,
816    ) -> (Fallible<Buffer>, Option<resource::CreateBufferError>) {
817        let timestamp_normalization_bind_group = match self
818            .timestamp_normalizer
819            .get()
820            .unwrap()
821            .create_normalization_bind_group(
822                self,
823                &*hal_buffer,
824                desc.label.as_deref(),
825                desc.size,
826                desc.usage,
827            ) {
828            Ok(bg) => Snatchable::new(bg),
829            Err(e) => {
830                return (
831                    Fallible::Invalid(Arc::new(desc.label.to_string())),
832                    Some(e.into()),
833                )
834            }
835        };
836
837        let indirect_validation_bind_groups = match self.create_indirect_validation_bind_groups(
838            hal_buffer.as_ref(),
839            desc.size,
840            desc.usage,
841        ) {
842            Ok(ok) => ok,
843            Err(e) => return (Fallible::Invalid(Arc::new(desc.label.to_string())), Some(e)),
844        };
845
846        unsafe { self.raw().add_raw_buffer(&*hal_buffer) };
847
848        let buffer = Buffer {
849            raw: Snatchable::new(hal_buffer),
850            device: self.clone(),
851            usage: desc.usage,
852            size: desc.size,
853            initialization_status: RwLock::new(
854                rank::BUFFER_INITIALIZATION_STATUS,
855                BufferInitTracker::new(0),
856            ),
857            map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
858            label: desc.label.to_string(),
859            tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
860            bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
861            timestamp_normalization_bind_group,
862            indirect_validation_bind_groups,
863        };
864
865        let buffer = Arc::new(buffer);
866
867        self.trackers
868            .lock()
869            .buffers
870            .insert_single(&buffer, wgt::BufferUses::empty());
871
872        (Fallible::Valid(buffer), None)
873    }
874
875    fn create_indirect_validation_bind_groups(
876        &self,
877        raw_buffer: &dyn hal::DynBuffer,
878        buffer_size: u64,
879        usage: wgt::BufferUsages,
880    ) -> Result<Snatchable<crate::indirect_validation::BindGroups>, resource::CreateBufferError>
881    {
882        if !usage.contains(wgt::BufferUsages::INDIRECT) {
883            return Ok(Snatchable::empty());
884        }
885
886        let Some(ref indirect_validation) = self.indirect_validation else {
887            return Ok(Snatchable::empty());
888        };
889
890        let bind_groups = crate::indirect_validation::BindGroups::new(
891            indirect_validation,
892            self,
893            buffer_size,
894            raw_buffer,
895        )
896        .map_err(resource::CreateBufferError::IndirectValidationBindGroup)?;
897
898        if let Some(bind_groups) = bind_groups {
899            Ok(Snatchable::new(bind_groups))
900        } else {
901            Ok(Snatchable::empty())
902        }
903    }
904
905    pub(crate) fn create_texture(
906        self: &Arc<Self>,
907        desc: &resource::TextureDescriptor,
908    ) -> Result<Arc<Texture>, resource::CreateTextureError> {
909        use resource::{CreateTextureError, TextureDimensionError};
910
911        self.check_is_valid()?;
912
913        if desc.usage.is_empty() || desc.usage.contains_unknown_bits() {
914            return Err(CreateTextureError::InvalidUsage(desc.usage));
915        }
916
917        conv::check_texture_dimension_size(
918            desc.dimension,
919            desc.size,
920            desc.sample_count,
921            &self.limits,
922        )?;
923
924        if desc.dimension != wgt::TextureDimension::D2 {
925            // Depth textures can only be 2D
926            if desc.format.is_depth_stencil_format() {
927                return Err(CreateTextureError::InvalidDepthDimension(
928                    desc.dimension,
929                    desc.format,
930                ));
931            }
932        }
933
934        if desc.dimension != wgt::TextureDimension::D2
935            && desc.dimension != wgt::TextureDimension::D3
936        {
937            // Compressed textures can only be 2D or 3D
938            if desc.format.is_compressed() {
939                return Err(CreateTextureError::InvalidCompressedDimension(
940                    desc.dimension,
941                    desc.format,
942                ));
943            }
944
945            // Renderable textures can only be 2D or 3D
946            if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
947                return Err(CreateTextureError::InvalidDimensionUsages(
948                    wgt::TextureUsages::RENDER_ATTACHMENT,
949                    desc.dimension,
950                ));
951            }
952        }
953
954        if desc.format.is_compressed() {
955            let (block_width, block_height) = desc.format.block_dimensions();
956
957            if desc.size.width % block_width != 0 {
958                return Err(CreateTextureError::InvalidDimension(
959                    TextureDimensionError::NotMultipleOfBlockWidth {
960                        width: desc.size.width,
961                        block_width,
962                        format: desc.format,
963                    },
964                ));
965            }
966
967            if desc.size.height % block_height != 0 {
968                return Err(CreateTextureError::InvalidDimension(
969                    TextureDimensionError::NotMultipleOfBlockHeight {
970                        height: desc.size.height,
971                        block_height,
972                        format: desc.format,
973                    },
974                ));
975            }
976
977            if desc.dimension == wgt::TextureDimension::D3 {
978                // Only BCn formats with Sliced 3D feature can be used for 3D textures
979                if desc.format.is_bcn() {
980                    self.require_features(wgt::Features::TEXTURE_COMPRESSION_BC_SLICED_3D)
981                        .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
982                } else if desc.format.is_astc() {
983                    self.require_features(wgt::Features::TEXTURE_COMPRESSION_ASTC_SLICED_3D)
984                        .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
985                } else {
986                    return Err(CreateTextureError::InvalidCompressedDimension(
987                        desc.dimension,
988                        desc.format,
989                    ));
990                }
991            }
992        }
993
994        {
995            let (width_multiple, height_multiple) = desc.format.size_multiple_requirement();
996
997            if desc.size.width % width_multiple != 0 {
998                return Err(CreateTextureError::InvalidDimension(
999                    TextureDimensionError::WidthNotMultipleOf {
1000                        width: desc.size.width,
1001                        multiple: width_multiple,
1002                        format: desc.format,
1003                    },
1004                ));
1005            }
1006
1007            if desc.size.height % height_multiple != 0 {
1008                return Err(CreateTextureError::InvalidDimension(
1009                    TextureDimensionError::HeightNotMultipleOf {
1010                        height: desc.size.height,
1011                        multiple: height_multiple,
1012                        format: desc.format,
1013                    },
1014                ));
1015            }
1016        }
1017
1018        let format_features = self
1019            .describe_format_features(desc.format)
1020            .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
1021
1022        if desc.sample_count > 1 {
1023            if desc.mip_level_count != 1 {
1024                return Err(CreateTextureError::InvalidMipLevelCount {
1025                    requested: desc.mip_level_count,
1026                    maximum: 1,
1027                });
1028            }
1029
1030            if desc.size.depth_or_array_layers != 1 {
1031                return Err(CreateTextureError::InvalidDimension(
1032                    TextureDimensionError::MultisampledDepthOrArrayLayer(
1033                        desc.size.depth_or_array_layers,
1034                    ),
1035                ));
1036            }
1037
1038            if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) {
1039                return Err(CreateTextureError::InvalidMultisampledStorageBinding);
1040            }
1041
1042            if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
1043                return Err(CreateTextureError::MultisampledNotRenderAttachment);
1044            }
1045
1046            if !format_features.flags.intersects(
1047                wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4
1048                    | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2
1049                    | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8
1050                    | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16,
1051            ) {
1052                return Err(CreateTextureError::InvalidMultisampledFormat(desc.format));
1053            }
1054
1055            if !format_features
1056                .flags
1057                .sample_count_supported(desc.sample_count)
1058            {
1059                return Err(CreateTextureError::InvalidSampleCount(
1060                    desc.sample_count,
1061                    desc.format,
1062                    desc.format
1063                        .guaranteed_format_features(self.features)
1064                        .flags
1065                        .supported_sample_counts(),
1066                    self.adapter
1067                        .get_texture_format_features(desc.format)
1068                        .flags
1069                        .supported_sample_counts(),
1070                ));
1071            };
1072        }
1073
1074        let mips = desc.mip_level_count;
1075        let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS);
1076        if mips == 0 || mips > max_levels_allowed {
1077            return Err(CreateTextureError::InvalidMipLevelCount {
1078                requested: mips,
1079                maximum: max_levels_allowed,
1080            });
1081        }
1082
1083        let missing_allowed_usages = desc.usage - format_features.allowed_usages;
1084        if !missing_allowed_usages.is_empty() {
1085            // detect downlevel incompatibilities
1086            let wgpu_allowed_usages = desc
1087                .format
1088                .guaranteed_format_features(self.features)
1089                .allowed_usages;
1090            let wgpu_missing_usages = desc.usage - wgpu_allowed_usages;
1091            return Err(CreateTextureError::InvalidFormatUsages(
1092                missing_allowed_usages,
1093                desc.format,
1094                wgpu_missing_usages.is_empty(),
1095            ));
1096        }
1097
1098        let mut hal_view_formats = Vec::new();
1099        for format in desc.view_formats.iter() {
1100            if desc.format == *format {
1101                continue;
1102            }
1103            if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() {
1104                return Err(CreateTextureError::InvalidViewFormat(*format, desc.format));
1105            }
1106            hal_view_formats.push(*format);
1107        }
1108        if !hal_view_formats.is_empty() {
1109            self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?;
1110        }
1111
1112        let hal_usage = conv::map_texture_usage_for_texture(desc, &format_features);
1113
1114        let hal_desc = hal::TextureDescriptor {
1115            label: desc.label.to_hal(self.instance_flags),
1116            size: desc.size,
1117            mip_level_count: desc.mip_level_count,
1118            sample_count: desc.sample_count,
1119            dimension: desc.dimension,
1120            format: desc.format,
1121            usage: hal_usage,
1122            memory_flags: hal::MemoryFlags::empty(),
1123            view_formats: hal_view_formats,
1124        };
1125
1126        let raw_texture = unsafe { self.raw().create_texture(&hal_desc) }
1127            .map_err(|e| self.handle_hal_error_with_nonfatal_oom(e))?;
1128
1129        let clear_mode = if hal_usage
1130            .intersects(wgt::TextureUses::DEPTH_STENCIL_WRITE | wgt::TextureUses::COLOR_TARGET)
1131            && desc.dimension == wgt::TextureDimension::D2
1132        {
1133            let (is_color, usage) = if desc.format.is_depth_stencil_format() {
1134                (false, wgt::TextureUses::DEPTH_STENCIL_WRITE)
1135            } else {
1136                (true, wgt::TextureUses::COLOR_TARGET)
1137            };
1138
1139            let clear_label = hal_label(
1140                Some("(wgpu internal) clear texture view"),
1141                self.instance_flags,
1142            );
1143
1144            let mut clear_views = SmallVec::new();
1145            for mip_level in 0..desc.mip_level_count {
1146                for array_layer in 0..desc.size.depth_or_array_layers {
1147                    macro_rules! push_clear_view {
1148                        ($format:expr, $aspect:expr) => {
1149                            let desc = hal::TextureViewDescriptor {
1150                                label: clear_label,
1151                                format: $format,
1152                                dimension: TextureViewDimension::D2,
1153                                usage,
1154                                range: wgt::ImageSubresourceRange {
1155                                    aspect: $aspect,
1156                                    base_mip_level: mip_level,
1157                                    mip_level_count: Some(1),
1158                                    base_array_layer: array_layer,
1159                                    array_layer_count: Some(1),
1160                                },
1161                            };
1162                            clear_views.push(ManuallyDrop::new(
1163                                unsafe {
1164                                    self.raw().create_texture_view(raw_texture.as_ref(), &desc)
1165                                }
1166                                .map_err(|e| self.handle_hal_error(e))?,
1167                            ));
1168                        };
1169                    }
1170
1171                    if let Some(planes) = desc.format.planes() {
1172                        for plane in 0..planes {
1173                            let aspect = wgt::TextureAspect::from_plane(plane).unwrap();
1174                            let format = desc.format.aspect_specific_format(aspect).unwrap();
1175                            push_clear_view!(format, aspect);
1176                        }
1177                    } else {
1178                        push_clear_view!(desc.format, wgt::TextureAspect::All);
1179                    }
1180                }
1181            }
1182            resource::TextureClearMode::RenderPass {
1183                clear_views,
1184                is_color,
1185            }
1186        } else {
1187            resource::TextureClearMode::BufferCopy
1188        };
1189
1190        let texture = Texture::new(
1191            self,
1192            resource::TextureInner::Native { raw: raw_texture },
1193            hal_usage,
1194            desc,
1195            format_features,
1196            clear_mode,
1197            true,
1198        );
1199
1200        let texture = Arc::new(texture);
1201
1202        self.trackers
1203            .lock()
1204            .textures
1205            .insert_single(&texture, wgt::TextureUses::UNINITIALIZED);
1206
1207        Ok(texture)
1208    }
1209
1210    pub(crate) fn create_texture_view(
1211        self: &Arc<Self>,
1212        texture: &Arc<Texture>,
1213        desc: &resource::TextureViewDescriptor,
1214    ) -> Result<Arc<TextureView>, resource::CreateTextureViewError> {
1215        self.check_is_valid()?;
1216
1217        let snatch_guard = texture.device.snatchable_lock.read();
1218
1219        let texture_raw = texture.try_raw(&snatch_guard)?;
1220
1221        // resolve TextureViewDescriptor defaults
1222        // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults
1223        let resolved_format = desc.format.unwrap_or_else(|| {
1224            texture
1225                .desc
1226                .format
1227                .aspect_specific_format(desc.range.aspect)
1228                .unwrap_or(texture.desc.format)
1229        });
1230
1231        let resolved_dimension = desc
1232            .dimension
1233            .unwrap_or_else(|| match texture.desc.dimension {
1234                wgt::TextureDimension::D1 => TextureViewDimension::D1,
1235                wgt::TextureDimension::D2 => {
1236                    if texture.desc.array_layer_count() == 1 {
1237                        TextureViewDimension::D2
1238                    } else {
1239                        TextureViewDimension::D2Array
1240                    }
1241                }
1242                wgt::TextureDimension::D3 => TextureViewDimension::D3,
1243            });
1244
1245        let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| {
1246            texture
1247                .desc
1248                .mip_level_count
1249                .saturating_sub(desc.range.base_mip_level)
1250        });
1251
1252        let resolved_array_layer_count =
1253            desc.range
1254                .array_layer_count
1255                .unwrap_or_else(|| match resolved_dimension {
1256                    TextureViewDimension::D1
1257                    | TextureViewDimension::D2
1258                    | TextureViewDimension::D3 => 1,
1259                    TextureViewDimension::Cube => 6,
1260                    TextureViewDimension::D2Array | TextureViewDimension::CubeArray => texture
1261                        .desc
1262                        .array_layer_count()
1263                        .saturating_sub(desc.range.base_array_layer),
1264                });
1265
1266        let resolved_usage = {
1267            let usage = desc.usage.unwrap_or(wgt::TextureUsages::empty());
1268            if usage.is_empty() {
1269                texture.desc.usage
1270            } else if texture.desc.usage.contains(usage) {
1271                usage
1272            } else {
1273                return Err(resource::CreateTextureViewError::InvalidTextureViewUsage {
1274                    view: usage,
1275                    texture: texture.desc.usage,
1276                });
1277            }
1278        };
1279
1280        let format_features = self.describe_format_features(resolved_format)?;
1281        let allowed_format_usages = format_features.allowed_usages;
1282        if resolved_usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
1283            && !allowed_format_usages.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
1284        {
1285            return Err(
1286                resource::CreateTextureViewError::TextureViewFormatNotRenderable(resolved_format),
1287            );
1288        }
1289
1290        if resolved_usage.contains(wgt::TextureUsages::STORAGE_BINDING)
1291            && !allowed_format_usages.contains(wgt::TextureUsages::STORAGE_BINDING)
1292        {
1293            return Err(
1294                resource::CreateTextureViewError::TextureViewFormatNotStorage(resolved_format),
1295            );
1296        }
1297
1298        // validate TextureViewDescriptor
1299
1300        let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect);
1301        if aspects.is_empty() {
1302            return Err(resource::CreateTextureViewError::InvalidAspect {
1303                texture_format: texture.desc.format,
1304                requested_aspect: desc.range.aspect,
1305            });
1306        }
1307
1308        let format_is_good = if desc.range.aspect == wgt::TextureAspect::All {
1309            resolved_format == texture.desc.format
1310                || texture.desc.view_formats.contains(&resolved_format)
1311        } else {
1312            Some(resolved_format)
1313                == texture
1314                    .desc
1315                    .format
1316                    .aspect_specific_format(desc.range.aspect)
1317        };
1318        if !format_is_good {
1319            return Err(resource::CreateTextureViewError::FormatReinterpretation {
1320                texture: texture.desc.format,
1321                view: resolved_format,
1322            });
1323        }
1324
1325        // check if multisampled texture is seen as anything but 2D
1326        if texture.desc.sample_count > 1 && resolved_dimension != TextureViewDimension::D2 {
1327            return Err(
1328                resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension(
1329                    resolved_dimension,
1330                ),
1331            );
1332        }
1333
1334        // check if the dimension is compatible with the texture
1335        if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() {
1336            return Err(
1337                resource::CreateTextureViewError::InvalidTextureViewDimension {
1338                    view: resolved_dimension,
1339                    texture: texture.desc.dimension,
1340                },
1341            );
1342        }
1343
1344        match resolved_dimension {
1345            TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => {
1346                if resolved_array_layer_count != 1 {
1347                    return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
1348                        requested: resolved_array_layer_count,
1349                        dim: resolved_dimension,
1350                    });
1351                }
1352            }
1353            TextureViewDimension::Cube => {
1354                if resolved_array_layer_count != 6 {
1355                    return Err(
1356                        resource::CreateTextureViewError::InvalidCubemapTextureDepth {
1357                            depth: resolved_array_layer_count,
1358                        },
1359                    );
1360                }
1361            }
1362            TextureViewDimension::CubeArray => {
1363                if resolved_array_layer_count % 6 != 0 {
1364                    return Err(
1365                        resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
1366                            depth: resolved_array_layer_count,
1367                        },
1368                    );
1369                }
1370            }
1371            _ => {}
1372        }
1373
1374        match resolved_dimension {
1375            TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
1376                if texture.desc.size.width != texture.desc.size.height {
1377                    return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize);
1378                }
1379            }
1380            _ => {}
1381        }
1382
1383        if resolved_mip_level_count == 0 {
1384            return Err(resource::CreateTextureViewError::ZeroMipLevelCount);
1385        }
1386
1387        let mip_level_end = desc
1388            .range
1389            .base_mip_level
1390            .saturating_add(resolved_mip_level_count);
1391
1392        let level_end = texture.desc.mip_level_count;
1393        if mip_level_end > level_end {
1394            return Err(resource::CreateTextureViewError::TooManyMipLevels {
1395                requested: mip_level_end,
1396                total: level_end,
1397            });
1398        }
1399
1400        if resolved_array_layer_count == 0 {
1401            return Err(resource::CreateTextureViewError::ZeroArrayLayerCount);
1402        }
1403
1404        let array_layer_end = desc
1405            .range
1406            .base_array_layer
1407            .saturating_add(resolved_array_layer_count);
1408
1409        let layer_end = texture.desc.array_layer_count();
1410        if array_layer_end > layer_end {
1411            return Err(resource::CreateTextureViewError::TooManyArrayLayers {
1412                requested: array_layer_end,
1413                total: layer_end,
1414            });
1415        };
1416
1417        // https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view
1418        let render_extent = 'error: {
1419            if !resolved_usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
1420                break 'error Err(TextureViewNotRenderableReason::Usage(resolved_usage));
1421            }
1422
1423            let allowed_view_dimensions = [
1424                TextureViewDimension::D2,
1425                TextureViewDimension::D2Array,
1426                TextureViewDimension::D3,
1427            ];
1428            if !allowed_view_dimensions.contains(&resolved_dimension) {
1429                break 'error Err(TextureViewNotRenderableReason::Dimension(
1430                    resolved_dimension,
1431                ));
1432            }
1433
1434            if resolved_mip_level_count != 1 {
1435                break 'error Err(TextureViewNotRenderableReason::MipLevelCount(
1436                    resolved_mip_level_count,
1437                ));
1438            }
1439
1440            if resolved_array_layer_count != 1
1441                && !(self.features.contains(wgt::Features::MULTIVIEW))
1442            {
1443                break 'error Err(TextureViewNotRenderableReason::ArrayLayerCount(
1444                    resolved_array_layer_count,
1445                ));
1446            }
1447
1448            if aspects != hal::FormatAspects::from(texture.desc.format) {
1449                break 'error Err(TextureViewNotRenderableReason::Aspects(aspects));
1450            }
1451
1452            Ok(texture
1453                .desc
1454                .compute_render_extent(desc.range.base_mip_level))
1455        };
1456
1457        // filter the usages based on the other criteria
1458        let usage = {
1459            let resolved_hal_usage = conv::map_texture_usage(
1460                resolved_usage,
1461                resolved_format.into(),
1462                format_features.flags,
1463            );
1464            let mask_copy = !(wgt::TextureUses::COPY_SRC | wgt::TextureUses::COPY_DST);
1465            let mask_dimension = match resolved_dimension {
1466                TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
1467                    wgt::TextureUses::RESOURCE
1468                }
1469                TextureViewDimension::D3 => {
1470                    wgt::TextureUses::RESOURCE
1471                        | wgt::TextureUses::STORAGE_READ_ONLY
1472                        | wgt::TextureUses::STORAGE_WRITE_ONLY
1473                        | wgt::TextureUses::STORAGE_READ_WRITE
1474                }
1475                _ => wgt::TextureUses::all(),
1476            };
1477            let mask_mip_level = if resolved_mip_level_count == 1 {
1478                wgt::TextureUses::all()
1479            } else {
1480                wgt::TextureUses::RESOURCE
1481            };
1482            resolved_hal_usage & mask_copy & mask_dimension & mask_mip_level
1483        };
1484
1485        // use the combined depth-stencil format for the view
1486        let format = if resolved_format.is_depth_stencil_component(texture.desc.format) {
1487            texture.desc.format
1488        } else {
1489            resolved_format
1490        };
1491
1492        let resolved_range = wgt::ImageSubresourceRange {
1493            aspect: desc.range.aspect,
1494            base_mip_level: desc.range.base_mip_level,
1495            mip_level_count: Some(resolved_mip_level_count),
1496            base_array_layer: desc.range.base_array_layer,
1497            array_layer_count: Some(resolved_array_layer_count),
1498        };
1499
1500        let hal_desc = hal::TextureViewDescriptor {
1501            label: desc.label.to_hal(self.instance_flags),
1502            format,
1503            dimension: resolved_dimension,
1504            usage,
1505            range: resolved_range,
1506        };
1507
1508        let raw = unsafe { self.raw().create_texture_view(texture_raw, &hal_desc) }
1509            .map_err(|e| self.handle_hal_error(e))?;
1510
1511        let selector = TextureSelector {
1512            mips: desc.range.base_mip_level..mip_level_end,
1513            layers: desc.range.base_array_layer..array_layer_end,
1514        };
1515
1516        let view = TextureView {
1517            raw: Snatchable::new(raw),
1518            parent: texture.clone(),
1519            device: self.clone(),
1520            desc: resource::HalTextureViewDescriptor {
1521                texture_format: texture.desc.format,
1522                format: resolved_format,
1523                dimension: resolved_dimension,
1524                usage: resolved_usage,
1525                range: resolved_range,
1526            },
1527            format_features: texture.format_features,
1528            render_extent,
1529            samples: texture.desc.sample_count,
1530            selector,
1531            label: desc.label.to_string(),
1532            tracking_data: TrackingData::new(self.tracker_indices.texture_views.clone()),
1533        };
1534
1535        let view = Arc::new(view);
1536
1537        {
1538            let mut views = texture.views.lock();
1539            views.push(Arc::downgrade(&view));
1540        }
1541
1542        Ok(view)
1543    }
1544
1545    pub(crate) fn create_sampler(
1546        self: &Arc<Self>,
1547        desc: &resource::SamplerDescriptor,
1548    ) -> Result<Arc<Sampler>, resource::CreateSamplerError> {
1549        self.check_is_valid()?;
1550
1551        if desc
1552            .address_modes
1553            .iter()
1554            .any(|am| am == &wgt::AddressMode::ClampToBorder)
1555        {
1556            self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?;
1557        }
1558
1559        if desc.border_color == Some(wgt::SamplerBorderColor::Zero) {
1560            self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?;
1561        }
1562
1563        if desc.lod_min_clamp < 0.0 {
1564            return Err(resource::CreateSamplerError::InvalidLodMinClamp(
1565                desc.lod_min_clamp,
1566            ));
1567        }
1568        if desc.lod_max_clamp < desc.lod_min_clamp {
1569            return Err(resource::CreateSamplerError::InvalidLodMaxClamp {
1570                lod_min_clamp: desc.lod_min_clamp,
1571                lod_max_clamp: desc.lod_max_clamp,
1572            });
1573        }
1574
1575        if desc.anisotropy_clamp < 1 {
1576            return Err(resource::CreateSamplerError::InvalidAnisotropy(
1577                desc.anisotropy_clamp,
1578            ));
1579        }
1580
1581        if desc.anisotropy_clamp != 1 {
1582            if !matches!(desc.min_filter, wgt::FilterMode::Linear) {
1583                return Err(
1584                    resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
1585                        filter_type: resource::SamplerFilterErrorType::MinFilter,
1586                        filter_mode: desc.min_filter,
1587                        anisotropic_clamp: desc.anisotropy_clamp,
1588                    },
1589                );
1590            }
1591            if !matches!(desc.mag_filter, wgt::FilterMode::Linear) {
1592                return Err(
1593                    resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
1594                        filter_type: resource::SamplerFilterErrorType::MagFilter,
1595                        filter_mode: desc.mag_filter,
1596                        anisotropic_clamp: desc.anisotropy_clamp,
1597                    },
1598                );
1599            }
1600            if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) {
1601                return Err(
1602                    resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
1603                        filter_type: resource::SamplerFilterErrorType::MipmapFilter,
1604                        filter_mode: desc.mipmap_filter,
1605                        anisotropic_clamp: desc.anisotropy_clamp,
1606                    },
1607                );
1608            }
1609        }
1610
1611        let anisotropy_clamp = if self
1612            .downlevel
1613            .flags
1614            .contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING)
1615        {
1616            // Clamp anisotropy clamp to [1, 16] per the wgpu-hal interface
1617            desc.anisotropy_clamp.min(16)
1618        } else {
1619            // If it isn't supported, set this unconditionally to 1
1620            1
1621        };
1622
1623        //TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS
1624
1625        let hal_desc = hal::SamplerDescriptor {
1626            label: desc.label.to_hal(self.instance_flags),
1627            address_modes: desc.address_modes,
1628            mag_filter: desc.mag_filter,
1629            min_filter: desc.min_filter,
1630            mipmap_filter: desc.mipmap_filter,
1631            lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp,
1632            compare: desc.compare,
1633            anisotropy_clamp,
1634            border_color: desc.border_color,
1635        };
1636
1637        let raw = unsafe { self.raw().create_sampler(&hal_desc) }
1638            .map_err(|e| self.handle_hal_error_with_nonfatal_oom(e))?;
1639
1640        let sampler = Sampler {
1641            raw: ManuallyDrop::new(raw),
1642            device: self.clone(),
1643            label: desc.label.to_string(),
1644            tracking_data: TrackingData::new(self.tracker_indices.samplers.clone()),
1645            comparison: desc.compare.is_some(),
1646            filtering: desc.min_filter == wgt::FilterMode::Linear
1647                || desc.mag_filter == wgt::FilterMode::Linear
1648                || desc.mipmap_filter == wgt::FilterMode::Linear,
1649        };
1650
1651        let sampler = Arc::new(sampler);
1652
1653        Ok(sampler)
1654    }
1655
1656    pub(crate) fn create_shader_module<'a>(
1657        self: &Arc<Self>,
1658        desc: &pipeline::ShaderModuleDescriptor<'a>,
1659        source: pipeline::ShaderModuleSource<'a>,
1660    ) -> Result<Arc<pipeline::ShaderModule>, pipeline::CreateShaderModuleError> {
1661        self.check_is_valid()?;
1662
1663        let (module, source) = match source {
1664            #[cfg(feature = "wgsl")]
1665            pipeline::ShaderModuleSource::Wgsl(code) => {
1666                profiling::scope!("naga::front::wgsl::parse_str");
1667                let module = naga::front::wgsl::parse_str(&code).map_err(|inner| {
1668                    pipeline::CreateShaderModuleError::Parsing(naga::error::ShaderError {
1669                        source: code.to_string(),
1670                        label: desc.label.as_ref().map(|l| l.to_string()),
1671                        inner: Box::new(inner),
1672                    })
1673                })?;
1674                (Cow::Owned(module), code.into_owned())
1675            }
1676            #[cfg(feature = "spirv")]
1677            pipeline::ShaderModuleSource::SpirV(spv, options) => {
1678                let parser = naga::front::spv::Frontend::new(spv.iter().cloned(), &options);
1679                profiling::scope!("naga::front::spv::Frontend");
1680                let module = parser.parse().map_err(|inner| {
1681                    pipeline::CreateShaderModuleError::ParsingSpirV(naga::error::ShaderError {
1682                        source: String::new(),
1683                        label: desc.label.as_ref().map(|l| l.to_string()),
1684                        inner: Box::new(inner),
1685                    })
1686                })?;
1687                (Cow::Owned(module), String::new())
1688            }
1689            #[cfg(feature = "glsl")]
1690            pipeline::ShaderModuleSource::Glsl(code, options) => {
1691                let mut parser = naga::front::glsl::Frontend::default();
1692                profiling::scope!("naga::front::glsl::Frontend.parse");
1693                let module = parser.parse(&options, &code).map_err(|inner| {
1694                    pipeline::CreateShaderModuleError::ParsingGlsl(naga::error::ShaderError {
1695                        source: code.to_string(),
1696                        label: desc.label.as_ref().map(|l| l.to_string()),
1697                        inner: Box::new(inner),
1698                    })
1699                })?;
1700                (Cow::Owned(module), code.into_owned())
1701            }
1702            pipeline::ShaderModuleSource::Naga(module) => (module, String::new()),
1703            pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"),
1704        };
1705        for (_, var) in module.global_variables.iter() {
1706            match var.binding {
1707                Some(br) if br.group >= self.limits.max_bind_groups => {
1708                    return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex {
1709                        bind: br,
1710                        group: br.group,
1711                        limit: self.limits.max_bind_groups,
1712                    });
1713                }
1714                _ => continue,
1715            };
1716        }
1717
1718        profiling::scope!("naga::validate");
1719        let debug_source =
1720            if self.instance_flags.contains(wgt::InstanceFlags::DEBUG) && !source.is_empty() {
1721                Some(hal::DebugSource {
1722                    file_name: Cow::Owned(
1723                        desc.label
1724                            .as_ref()
1725                            .map_or("shader".to_string(), |l| l.to_string()),
1726                    ),
1727                    source_code: Cow::Owned(source.clone()),
1728                })
1729            } else {
1730                None
1731            };
1732
1733        let info = create_validator(
1734            self.features,
1735            self.downlevel.flags,
1736            naga::valid::ValidationFlags::all(),
1737        )
1738        .validate(&module)
1739        .map_err(|inner| {
1740            pipeline::CreateShaderModuleError::Validation(naga::error::ShaderError {
1741                source,
1742                label: desc.label.as_ref().map(|l| l.to_string()),
1743                inner: Box::new(inner),
1744            })
1745        })?;
1746
1747        let interface = validation::Interface::new(&module, &info, self.limits.clone());
1748        let hal_shader = hal::ShaderInput::Naga(hal::NagaShader {
1749            module,
1750            info,
1751            debug_source,
1752        });
1753        let hal_desc = hal::ShaderModuleDescriptor {
1754            label: desc.label.to_hal(self.instance_flags),
1755            runtime_checks: desc.runtime_checks,
1756        };
1757        let raw = match unsafe { self.raw().create_shader_module(&hal_desc, hal_shader) } {
1758            Ok(raw) => raw,
1759            Err(error) => {
1760                return Err(match error {
1761                    hal::ShaderError::Device(error) => {
1762                        pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error))
1763                    }
1764                    hal::ShaderError::Compilation(ref msg) => {
1765                        log::error!("Shader error: {}", msg);
1766                        pipeline::CreateShaderModuleError::Generation
1767                    }
1768                })
1769            }
1770        };
1771
1772        let module = pipeline::ShaderModule {
1773            raw: ManuallyDrop::new(raw),
1774            device: self.clone(),
1775            interface: Some(interface),
1776            label: desc.label.to_string(),
1777        };
1778
1779        let module = Arc::new(module);
1780
1781        Ok(module)
1782    }
1783
1784    #[allow(unused_unsafe)]
1785    pub(crate) unsafe fn create_shader_module_passthrough<'a>(
1786        self: &Arc<Self>,
1787        descriptor: &pipeline::ShaderModuleDescriptorPassthrough<'a>,
1788    ) -> Result<Arc<pipeline::ShaderModule>, pipeline::CreateShaderModuleError> {
1789        self.check_is_valid()?;
1790        let hal_shader = match descriptor {
1791            pipeline::ShaderModuleDescriptorPassthrough::SpirV(inner) => {
1792                self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?;
1793                hal::ShaderInput::SpirV(&inner.source)
1794            }
1795            pipeline::ShaderModuleDescriptorPassthrough::Msl(inner) => {
1796                self.require_features(wgt::Features::MSL_SHADER_PASSTHROUGH)?;
1797                hal::ShaderInput::Msl {
1798                    shader: inner.source.to_string(),
1799                    entry_point: inner.entry_point.to_string(),
1800                    num_workgroups: inner.num_workgroups,
1801                }
1802            }
1803            pipeline::ShaderModuleDescriptorPassthrough::Dxil(inner) => {
1804                self.require_features(wgt::Features::HLSL_DXIL_SHADER_PASSTHROUGH)?;
1805                hal::ShaderInput::Dxil {
1806                    shader: inner.source,
1807                    entry_point: inner.entry_point.clone(),
1808                    num_workgroups: inner.num_workgroups,
1809                }
1810            }
1811            pipeline::ShaderModuleDescriptorPassthrough::Hlsl(inner) => {
1812                self.require_features(wgt::Features::HLSL_DXIL_SHADER_PASSTHROUGH)?;
1813                hal::ShaderInput::Hlsl {
1814                    shader: inner.source,
1815                    entry_point: inner.entry_point.clone(),
1816                    num_workgroups: inner.num_workgroups,
1817                }
1818            }
1819        };
1820
1821        let hal_desc = hal::ShaderModuleDescriptor {
1822            label: descriptor.label().to_hal(self.instance_flags),
1823            runtime_checks: wgt::ShaderRuntimeChecks::unchecked(),
1824        };
1825
1826        let raw = match unsafe { self.raw().create_shader_module(&hal_desc, hal_shader) } {
1827            Ok(raw) => raw,
1828            Err(error) => {
1829                return Err(match error {
1830                    hal::ShaderError::Device(error) => {
1831                        pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error))
1832                    }
1833                    hal::ShaderError::Compilation(ref msg) => {
1834                        log::error!("Shader error: {}", msg);
1835                        pipeline::CreateShaderModuleError::Generation
1836                    }
1837                })
1838            }
1839        };
1840
1841        let module = pipeline::ShaderModule {
1842            raw: ManuallyDrop::new(raw),
1843            device: self.clone(),
1844            interface: None,
1845            label: descriptor.label().to_string(),
1846        };
1847
1848        Ok(Arc::new(module))
1849    }
1850
1851    pub(crate) fn create_command_encoder(
1852        self: &Arc<Self>,
1853        label: &crate::Label,
1854    ) -> Result<Arc<command::CommandBuffer>, DeviceError> {
1855        self.check_is_valid()?;
1856
1857        let queue = self.get_queue().unwrap();
1858
1859        let encoder = self
1860            .command_allocator
1861            .acquire_encoder(self.raw(), queue.raw())
1862            .map_err(|e| self.handle_hal_error(e))?;
1863
1864        let command_buffer = command::CommandBuffer::new(encoder, self, label);
1865
1866        let command_buffer = Arc::new(command_buffer);
1867
1868        Ok(command_buffer)
1869    }
1870
1871    /// Generate information about late-validated buffer bindings for pipelines.
1872    //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way?
1873    fn make_late_sized_buffer_groups(
1874        shader_binding_sizes: &FastHashMap<naga::ResourceBinding, wgt::BufferSize>,
1875        layout: &binding_model::PipelineLayout,
1876    ) -> ArrayVec<pipeline::LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }> {
1877        // Given the shader-required binding sizes and the pipeline layout,
1878        // return the filtered list of them in the layout order,
1879        // removing those with given `min_binding_size`.
1880        layout
1881            .bind_group_layouts
1882            .iter()
1883            .enumerate()
1884            .map(|(group_index, bgl)| pipeline::LateSizedBufferGroup {
1885                shader_sizes: bgl
1886                    .entries
1887                    .values()
1888                    .filter_map(|entry| match entry.ty {
1889                        wgt::BindingType::Buffer {
1890                            min_binding_size: None,
1891                            ..
1892                        } => {
1893                            let rb = naga::ResourceBinding {
1894                                group: group_index as u32,
1895                                binding: entry.binding,
1896                            };
1897                            let shader_size =
1898                                shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get());
1899                            Some(shader_size)
1900                        }
1901                        _ => None,
1902                    })
1903                    .collect(),
1904            })
1905            .collect()
1906    }
1907
1908    pub(crate) fn create_bind_group_layout(
1909        self: &Arc<Self>,
1910        label: &crate::Label,
1911        entry_map: bgl::EntryMap,
1912        origin: bgl::Origin,
1913    ) -> Result<Arc<BindGroupLayout>, binding_model::CreateBindGroupLayoutError> {
1914        #[derive(PartialEq)]
1915        enum WritableStorage {
1916            Yes,
1917            No,
1918        }
1919
1920        for entry in entry_map.values() {
1921            use wgt::BindingType as Bt;
1922
1923            let mut required_features = wgt::Features::empty();
1924            let mut required_downlevel_flags = wgt::DownlevelFlags::empty();
1925            let (array_feature, writable_storage) = match entry.ty {
1926                Bt::Buffer {
1927                    ty: wgt::BufferBindingType::Uniform,
1928                    has_dynamic_offset: false,
1929                    min_binding_size: _,
1930                } => (
1931                    Some(wgt::Features::BUFFER_BINDING_ARRAY),
1932                    WritableStorage::No,
1933                ),
1934                Bt::Buffer {
1935                    ty: wgt::BufferBindingType::Uniform,
1936                    has_dynamic_offset: true,
1937                    min_binding_size: _,
1938                } => (
1939                    Some(wgt::Features::BUFFER_BINDING_ARRAY),
1940                    WritableStorage::No,
1941                ),
1942                Bt::Buffer {
1943                    ty: wgt::BufferBindingType::Storage { read_only },
1944                    ..
1945                } => (
1946                    Some(
1947                        wgt::Features::BUFFER_BINDING_ARRAY
1948                            | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
1949                    ),
1950                    match read_only {
1951                        true => WritableStorage::No,
1952                        false => WritableStorage::Yes,
1953                    },
1954                ),
1955                Bt::Sampler { .. } => (
1956                    Some(wgt::Features::TEXTURE_BINDING_ARRAY),
1957                    WritableStorage::No,
1958                ),
1959                Bt::Texture {
1960                    multisampled: true,
1961                    sample_type: TextureSampleType::Float { filterable: true },
1962                    ..
1963                } => {
1964                    return Err(binding_model::CreateBindGroupLayoutError::Entry {
1965                        binding: entry.binding,
1966                        error:
1967                            BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled,
1968                    });
1969                }
1970                Bt::Texture {
1971                    multisampled,
1972                    view_dimension,
1973                    ..
1974                } => {
1975                    if multisampled && view_dimension != TextureViewDimension::D2 {
1976                        return Err(binding_model::CreateBindGroupLayoutError::Entry {
1977                            binding: entry.binding,
1978                            error: BindGroupLayoutEntryError::Non2DMultisampled(view_dimension),
1979                        });
1980                    }
1981
1982                    (
1983                        Some(wgt::Features::TEXTURE_BINDING_ARRAY),
1984                        WritableStorage::No,
1985                    )
1986                }
1987                Bt::StorageTexture {
1988                    access,
1989                    view_dimension,
1990                    format: _,
1991                } => {
1992                    match view_dimension {
1993                        TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
1994                            return Err(binding_model::CreateBindGroupLayoutError::Entry {
1995                                binding: entry.binding,
1996                                error: BindGroupLayoutEntryError::StorageTextureCube,
1997                            })
1998                        }
1999                        _ => (),
2000                    }
2001                    match access {
2002                        wgt::StorageTextureAccess::Atomic
2003                            if !self.features.contains(wgt::Features::TEXTURE_ATOMIC) =>
2004                        {
2005                            return Err(binding_model::CreateBindGroupLayoutError::Entry {
2006                                binding: entry.binding,
2007                                error: BindGroupLayoutEntryError::StorageTextureAtomic,
2008                            });
2009                        }
2010                        _ => (),
2011                    }
2012                    (
2013                        Some(
2014                            wgt::Features::TEXTURE_BINDING_ARRAY
2015                                | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
2016                        ),
2017                        match access {
2018                            wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes,
2019                            wgt::StorageTextureAccess::ReadOnly => WritableStorage::No,
2020                            wgt::StorageTextureAccess::ReadWrite => WritableStorage::Yes,
2021                            wgt::StorageTextureAccess::Atomic => {
2022                                required_features |= wgt::Features::TEXTURE_ATOMIC;
2023                                WritableStorage::Yes
2024                            }
2025                        },
2026                    )
2027                }
2028                Bt::AccelerationStructure { .. } => (None, WritableStorage::No),
2029                Bt::ExternalTexture => {
2030                    self.require_features(wgt::Features::EXTERNAL_TEXTURE)
2031                        .map_err(|e| binding_model::CreateBindGroupLayoutError::Entry {
2032                            binding: entry.binding,
2033                            error: e.into(),
2034                        })?;
2035                    (None, WritableStorage::No)
2036                }
2037            };
2038
2039            // Validate the count parameter
2040            if entry.count.is_some() {
2041                required_features |= array_feature
2042                    .ok_or(BindGroupLayoutEntryError::ArrayUnsupported)
2043                    .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
2044                        binding: entry.binding,
2045                        error,
2046                    })?;
2047            }
2048
2049            if entry.visibility.contains_unknown_bits() {
2050                return Err(
2051                    binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility),
2052                );
2053            }
2054
2055            if entry.visibility.contains(wgt::ShaderStages::VERTEX) {
2056                if writable_storage == WritableStorage::Yes {
2057                    required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE;
2058                }
2059                if let Bt::Buffer {
2060                    ty: wgt::BufferBindingType::Storage { .. },
2061                    ..
2062                } = entry.ty
2063                {
2064                    required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE;
2065                }
2066            }
2067            if writable_storage == WritableStorage::Yes
2068                && entry.visibility.contains(wgt::ShaderStages::FRAGMENT)
2069            {
2070                required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE;
2071            }
2072
2073            self.require_features(required_features)
2074                .map_err(BindGroupLayoutEntryError::MissingFeatures)
2075                .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
2076                    binding: entry.binding,
2077                    error,
2078                })?;
2079            self.require_downlevel_flags(required_downlevel_flags)
2080                .map_err(BindGroupLayoutEntryError::MissingDownlevelFlags)
2081                .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
2082                    binding: entry.binding,
2083                    error,
2084                })?;
2085        }
2086
2087        let bgl_flags = conv::bind_group_layout_flags(self.features);
2088
2089        let hal_bindings = entry_map.values().copied().collect::<Vec<_>>();
2090        let hal_desc = hal::BindGroupLayoutDescriptor {
2091            label: label.to_hal(self.instance_flags),
2092            flags: bgl_flags,
2093            entries: &hal_bindings,
2094        };
2095
2096        let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
2097        for entry in entry_map.values() {
2098            count_validator.add_binding(entry);
2099        }
2100        // If a single bind group layout violates limits, the pipeline layout is
2101        // definitely going to violate limits too, lets catch it now.
2102        count_validator
2103            .validate(&self.limits)
2104            .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?;
2105
2106        // Validate that binding arrays don't conflict with dynamic offsets.
2107        count_validator.validate_binding_arrays()?;
2108
2109        let raw = unsafe { self.raw().create_bind_group_layout(&hal_desc) }
2110            .map_err(|e| self.handle_hal_error(e))?;
2111
2112        let bgl = BindGroupLayout {
2113            raw: ManuallyDrop::new(raw),
2114            device: self.clone(),
2115            entries: entry_map,
2116            origin,
2117            exclusive_pipeline: OnceCellOrLock::new(),
2118            binding_count_validator: count_validator,
2119            label: label.to_string(),
2120        };
2121
2122        let bgl = Arc::new(bgl);
2123
2124        Ok(bgl)
2125    }
2126
2127    fn create_buffer_binding<'a>(
2128        &self,
2129        bb: &'a binding_model::ResolvedBufferBinding,
2130        binding: u32,
2131        decl: &wgt::BindGroupLayoutEntry,
2132        used_buffer_ranges: &mut Vec<BufferInitTrackerAction>,
2133        dynamic_binding_info: &mut Vec<binding_model::BindGroupDynamicBindingData>,
2134        late_buffer_binding_sizes: &mut FastHashMap<u32, wgt::BufferSize>,
2135        used: &mut BindGroupStates,
2136        snatch_guard: &'a SnatchGuard<'a>,
2137    ) -> Result<hal::BufferBinding<'a, dyn hal::DynBuffer>, binding_model::CreateBindGroupError>
2138    {
2139        use crate::binding_model::CreateBindGroupError as Error;
2140
2141        let (binding_ty, dynamic, min_size) = match decl.ty {
2142            wgt::BindingType::Buffer {
2143                ty,
2144                has_dynamic_offset,
2145                min_binding_size,
2146            } => (ty, has_dynamic_offset, min_binding_size),
2147            _ => {
2148                return Err(Error::WrongBindingType {
2149                    binding,
2150                    actual: decl.ty,
2151                    expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
2152                })
2153            }
2154        };
2155
2156        let (pub_usage, internal_use, range_limit) = match binding_ty {
2157            wgt::BufferBindingType::Uniform => (
2158                wgt::BufferUsages::UNIFORM,
2159                wgt::BufferUses::UNIFORM,
2160                self.limits.max_uniform_buffer_binding_size,
2161            ),
2162            wgt::BufferBindingType::Storage { read_only } => (
2163                wgt::BufferUsages::STORAGE,
2164                if read_only {
2165                    wgt::BufferUses::STORAGE_READ_ONLY
2166                } else {
2167                    wgt::BufferUses::STORAGE_READ_WRITE
2168                },
2169                self.limits.max_storage_buffer_binding_size,
2170            ),
2171        };
2172
2173        let (align, align_limit_name) =
2174            binding_model::buffer_binding_type_alignment(&self.limits, binding_ty);
2175        if bb.offset % align as u64 != 0 {
2176            return Err(Error::UnalignedBufferOffset(
2177                bb.offset,
2178                align_limit_name,
2179                align,
2180            ));
2181        }
2182
2183        let buffer = &bb.buffer;
2184
2185        used.buffers.insert_single(buffer.clone(), internal_use);
2186
2187        buffer.same_device(self)?;
2188
2189        buffer.check_usage(pub_usage)?;
2190        let raw_buffer = buffer.try_raw(snatch_guard)?;
2191
2192        let (bind_size, bind_end) = match bb.size {
2193            Some(size) => {
2194                let end = bb.offset + size.get();
2195                if end > buffer.size {
2196                    return Err(Error::BindingRangeTooLarge {
2197                        buffer: buffer.error_ident(),
2198                        range: bb.offset..end,
2199                        size: buffer.size,
2200                    });
2201                }
2202                (size.get(), end)
2203            }
2204            None => {
2205                if buffer.size < bb.offset {
2206                    return Err(Error::BindingRangeTooLarge {
2207                        buffer: buffer.error_ident(),
2208                        range: bb.offset..bb.offset,
2209                        size: buffer.size,
2210                    });
2211                }
2212                (buffer.size - bb.offset, buffer.size)
2213            }
2214        };
2215
2216        if bind_size > range_limit as u64 {
2217            return Err(Error::BufferRangeTooLarge {
2218                binding,
2219                given: bind_size as u32,
2220                limit: range_limit,
2221            });
2222        }
2223
2224        // Record binding info for validating dynamic offsets
2225        if dynamic {
2226            dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData {
2227                binding_idx: binding,
2228                buffer_size: buffer.size,
2229                binding_range: bb.offset..bind_end,
2230                maximum_dynamic_offset: buffer.size - bind_end,
2231                binding_type: binding_ty,
2232            });
2233        }
2234
2235        if let Some(non_zero) = min_size {
2236            let min_size = non_zero.get();
2237            if min_size > bind_size {
2238                return Err(Error::BindingSizeTooSmall {
2239                    buffer: buffer.error_ident(),
2240                    actual: bind_size,
2241                    min: min_size,
2242                });
2243            }
2244        } else {
2245            let late_size = wgt::BufferSize::new(bind_size)
2246                .ok_or_else(|| Error::BindingZeroSize(buffer.error_ident()))?;
2247            late_buffer_binding_sizes.insert(binding, late_size);
2248        }
2249
2250        // This was checked against the device's alignment requirements above,
2251        // which should always be a multiple of `COPY_BUFFER_ALIGNMENT`.
2252        assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0);
2253
2254        // `wgpu_hal` only restricts shader access to bound buffer regions with
2255        // a certain resolution. For the sake of lazy initialization, round up
2256        // the size of the bound range to reflect how much of the buffer is
2257        // actually going to be visible to the shader.
2258        let bounds_check_alignment =
2259            binding_model::buffer_binding_type_bounds_check_alignment(&self.alignments, binding_ty);
2260        let visible_size = align_to(bind_size, bounds_check_alignment);
2261
2262        used_buffer_ranges.extend(buffer.initialization_status.read().create_action(
2263            buffer,
2264            bb.offset..bb.offset + visible_size,
2265            MemoryInitKind::NeedsInitializedMemory,
2266        ));
2267
2268        Ok(hal::BufferBinding {
2269            buffer: raw_buffer,
2270            offset: bb.offset,
2271            size: bb.size,
2272        })
2273    }
2274
2275    fn create_sampler_binding<'a>(
2276        &self,
2277        used: &mut BindGroupStates,
2278        binding: u32,
2279        decl: &wgt::BindGroupLayoutEntry,
2280        sampler: &'a Arc<Sampler>,
2281    ) -> Result<&'a dyn hal::DynSampler, binding_model::CreateBindGroupError> {
2282        use crate::binding_model::CreateBindGroupError as Error;
2283
2284        used.samplers.insert_single(sampler.clone());
2285
2286        sampler.same_device(self)?;
2287
2288        match decl.ty {
2289            wgt::BindingType::Sampler(ty) => {
2290                let (allowed_filtering, allowed_comparison) = match ty {
2291                    wgt::SamplerBindingType::Filtering => (None, false),
2292                    wgt::SamplerBindingType::NonFiltering => (Some(false), false),
2293                    wgt::SamplerBindingType::Comparison => (None, true),
2294                };
2295                if let Some(allowed_filtering) = allowed_filtering {
2296                    if allowed_filtering != sampler.filtering {
2297                        return Err(Error::WrongSamplerFiltering {
2298                            binding,
2299                            layout_flt: allowed_filtering,
2300                            sampler_flt: sampler.filtering,
2301                        });
2302                    }
2303                }
2304                if allowed_comparison != sampler.comparison {
2305                    return Err(Error::WrongSamplerComparison {
2306                        binding,
2307                        layout_cmp: allowed_comparison,
2308                        sampler_cmp: sampler.comparison,
2309                    });
2310                }
2311            }
2312            _ => {
2313                return Err(Error::WrongBindingType {
2314                    binding,
2315                    actual: decl.ty,
2316                    expected: "Sampler",
2317                })
2318            }
2319        }
2320
2321        Ok(sampler.raw())
2322    }
2323
2324    fn create_texture_binding<'a>(
2325        &self,
2326        binding: u32,
2327        decl: &wgt::BindGroupLayoutEntry,
2328        view: &'a Arc<TextureView>,
2329        used: &mut BindGroupStates,
2330        used_texture_ranges: &mut Vec<TextureInitTrackerAction>,
2331        snatch_guard: &'a SnatchGuard<'a>,
2332    ) -> Result<hal::TextureBinding<'a, dyn hal::DynTextureView>, binding_model::CreateBindGroupError>
2333    {
2334        view.same_device(self)?;
2335
2336        let internal_use = self.texture_use_parameters(
2337            binding,
2338            decl,
2339            view,
2340            "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
2341        )?;
2342
2343        used.views.insert_single(view.clone(), internal_use);
2344
2345        let texture = &view.parent;
2346
2347        used_texture_ranges.push(TextureInitTrackerAction {
2348            texture: texture.clone(),
2349            range: TextureInitRange {
2350                mip_range: view.desc.range.mip_range(texture.desc.mip_level_count),
2351                layer_range: view
2352                    .desc
2353                    .range
2354                    .layer_range(texture.desc.array_layer_count()),
2355            },
2356            kind: MemoryInitKind::NeedsInitializedMemory,
2357        });
2358
2359        Ok(hal::TextureBinding {
2360            view: view.try_raw(snatch_guard)?,
2361            usage: internal_use,
2362        })
2363    }
2364
2365    fn create_tlas_binding<'a>(
2366        self: &Arc<Self>,
2367        used: &mut BindGroupStates,
2368        binding: u32,
2369        decl: &wgt::BindGroupLayoutEntry,
2370        tlas: &'a Arc<Tlas>,
2371        snatch_guard: &'a SnatchGuard<'a>,
2372    ) -> Result<&'a dyn hal::DynAccelerationStructure, binding_model::CreateBindGroupError> {
2373        use crate::binding_model::CreateBindGroupError as Error;
2374
2375        used.acceleration_structures.insert_single(tlas.clone());
2376
2377        tlas.same_device(self)?;
2378
2379        match decl.ty {
2380            wgt::BindingType::AccelerationStructure { vertex_return } => {
2381                if vertex_return
2382                    && !tlas.flags.contains(
2383                        wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN,
2384                    )
2385                {
2386                    return Err(Error::MissingTLASVertexReturn { binding });
2387                }
2388            }
2389            _ => {
2390                return Err(Error::WrongBindingType {
2391                    binding,
2392                    actual: decl.ty,
2393                    expected: "Tlas",
2394                });
2395            }
2396        }
2397
2398        Ok(tlas.try_raw(snatch_guard)?)
2399    }
2400
2401    // This function expects the provided bind group layout to be resolved
2402    // (not passing a duplicate) beforehand.
2403    pub(crate) fn create_bind_group(
2404        self: &Arc<Self>,
2405        desc: binding_model::ResolvedBindGroupDescriptor,
2406    ) -> Result<Arc<BindGroup>, binding_model::CreateBindGroupError> {
2407        use crate::binding_model::{CreateBindGroupError as Error, ResolvedBindingResource as Br};
2408
2409        let layout = desc.layout;
2410
2411        self.check_is_valid()?;
2412        layout.same_device(self)?;
2413
2414        {
2415            // Check that the number of entries in the descriptor matches
2416            // the number of entries in the layout.
2417            let actual = desc.entries.len();
2418            let expected = layout.entries.len();
2419            if actual != expected {
2420                return Err(Error::BindingsNumMismatch { expected, actual });
2421            }
2422        }
2423
2424        // TODO: arrayvec/smallvec, or re-use allocations
2425        // Record binding info for dynamic offset validation
2426        let mut dynamic_binding_info = Vec::new();
2427        // Map of binding -> shader reflected size
2428        //Note: we can't collect into a vector right away because
2429        // it needs to be in BGL iteration order, not BG entry order.
2430        let mut late_buffer_binding_sizes = FastHashMap::default();
2431        // fill out the descriptors
2432        let mut used = BindGroupStates::new();
2433
2434        let mut used_buffer_ranges = Vec::new();
2435        let mut used_texture_ranges = Vec::new();
2436        let mut hal_entries = Vec::with_capacity(desc.entries.len());
2437        let mut hal_buffers = Vec::new();
2438        let mut hal_samplers = Vec::new();
2439        let mut hal_textures = Vec::new();
2440        let mut hal_tlas_s = Vec::new();
2441        let snatch_guard = self.snatchable_lock.read();
2442        for entry in desc.entries.iter() {
2443            let binding = entry.binding;
2444            // Find the corresponding declaration in the layout
2445            let decl = layout
2446                .entries
2447                .get(binding)
2448                .ok_or(Error::MissingBindingDeclaration(binding))?;
2449            let (res_index, count) = match entry.resource {
2450                Br::Buffer(ref bb) => {
2451                    let bb = self.create_buffer_binding(
2452                        bb,
2453                        binding,
2454                        decl,
2455                        &mut used_buffer_ranges,
2456                        &mut dynamic_binding_info,
2457                        &mut late_buffer_binding_sizes,
2458                        &mut used,
2459                        &snatch_guard,
2460                    )?;
2461
2462                    let res_index = hal_buffers.len();
2463                    hal_buffers.push(bb);
2464                    (res_index, 1)
2465                }
2466                Br::BufferArray(ref bindings_array) => {
2467                    let num_bindings = bindings_array.len();
2468                    Self::check_array_binding(self.features, decl.count, num_bindings)?;
2469
2470                    let res_index = hal_buffers.len();
2471                    for bb in bindings_array.iter() {
2472                        let bb = self.create_buffer_binding(
2473                            bb,
2474                            binding,
2475                            decl,
2476                            &mut used_buffer_ranges,
2477                            &mut dynamic_binding_info,
2478                            &mut late_buffer_binding_sizes,
2479                            &mut used,
2480                            &snatch_guard,
2481                        )?;
2482                        hal_buffers.push(bb);
2483                    }
2484                    (res_index, num_bindings)
2485                }
2486                Br::Sampler(ref sampler) => {
2487                    let sampler = self.create_sampler_binding(&mut used, binding, decl, sampler)?;
2488
2489                    let res_index = hal_samplers.len();
2490                    hal_samplers.push(sampler);
2491                    (res_index, 1)
2492                }
2493                Br::SamplerArray(ref samplers) => {
2494                    let num_bindings = samplers.len();
2495                    Self::check_array_binding(self.features, decl.count, num_bindings)?;
2496
2497                    let res_index = hal_samplers.len();
2498                    for sampler in samplers.iter() {
2499                        let sampler =
2500                            self.create_sampler_binding(&mut used, binding, decl, sampler)?;
2501
2502                        hal_samplers.push(sampler);
2503                    }
2504
2505                    (res_index, num_bindings)
2506                }
2507                Br::TextureView(ref view) => {
2508                    let tb = self.create_texture_binding(
2509                        binding,
2510                        decl,
2511                        view,
2512                        &mut used,
2513                        &mut used_texture_ranges,
2514                        &snatch_guard,
2515                    )?;
2516                    let res_index = hal_textures.len();
2517                    hal_textures.push(tb);
2518                    (res_index, 1)
2519                }
2520                Br::TextureViewArray(ref views) => {
2521                    let num_bindings = views.len();
2522                    Self::check_array_binding(self.features, decl.count, num_bindings)?;
2523
2524                    let res_index = hal_textures.len();
2525                    for view in views.iter() {
2526                        let tb = self.create_texture_binding(
2527                            binding,
2528                            decl,
2529                            view,
2530                            &mut used,
2531                            &mut used_texture_ranges,
2532                            &snatch_guard,
2533                        )?;
2534
2535                        hal_textures.push(tb);
2536                    }
2537
2538                    (res_index, num_bindings)
2539                }
2540                Br::AccelerationStructure(ref tlas) => {
2541                    let tlas =
2542                        self.create_tlas_binding(&mut used, binding, decl, tlas, &snatch_guard)?;
2543                    let res_index = hal_tlas_s.len();
2544                    hal_tlas_s.push(tlas);
2545                    (res_index, 1)
2546                }
2547            };
2548
2549            hal_entries.push(hal::BindGroupEntry {
2550                binding,
2551                resource_index: res_index as u32,
2552                count: count as u32,
2553            });
2554        }
2555
2556        used.optimize();
2557
2558        hal_entries.sort_by_key(|entry| entry.binding);
2559        for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) {
2560            if a.binding == b.binding {
2561                return Err(Error::DuplicateBinding(a.binding));
2562            }
2563        }
2564        let hal_desc = hal::BindGroupDescriptor {
2565            label: desc.label.to_hal(self.instance_flags),
2566            layout: layout.raw(),
2567            entries: &hal_entries,
2568            buffers: &hal_buffers,
2569            samplers: &hal_samplers,
2570            textures: &hal_textures,
2571            acceleration_structures: &hal_tlas_s,
2572        };
2573        let raw = unsafe { self.raw().create_bind_group(&hal_desc) }
2574            .map_err(|e| self.handle_hal_error(e))?;
2575
2576        // collect in the order of BGL iteration
2577        let late_buffer_binding_sizes = layout
2578            .entries
2579            .indices()
2580            .flat_map(|binding| late_buffer_binding_sizes.get(&binding).cloned())
2581            .collect();
2582
2583        let bind_group = BindGroup {
2584            raw: Snatchable::new(raw),
2585            device: self.clone(),
2586            layout,
2587            label: desc.label.to_string(),
2588            tracking_data: TrackingData::new(self.tracker_indices.bind_groups.clone()),
2589            used,
2590            used_buffer_ranges,
2591            used_texture_ranges,
2592            dynamic_binding_info,
2593            late_buffer_binding_sizes,
2594        };
2595
2596        let bind_group = Arc::new(bind_group);
2597
2598        let weak_ref = Arc::downgrade(&bind_group);
2599        for range in &bind_group.used_texture_ranges {
2600            let mut bind_groups = range.texture.bind_groups.lock();
2601            bind_groups.push(weak_ref.clone());
2602        }
2603        for range in &bind_group.used_buffer_ranges {
2604            let mut bind_groups = range.buffer.bind_groups.lock();
2605            bind_groups.push(weak_ref.clone());
2606        }
2607
2608        Ok(bind_group)
2609    }
2610
2611    fn check_array_binding(
2612        features: wgt::Features,
2613        count: Option<NonZeroU32>,
2614        num_bindings: usize,
2615    ) -> Result<(), binding_model::CreateBindGroupError> {
2616        use super::binding_model::CreateBindGroupError as Error;
2617
2618        if let Some(count) = count {
2619            let count = count.get() as usize;
2620            if count < num_bindings {
2621                return Err(Error::BindingArrayPartialLengthMismatch {
2622                    actual: num_bindings,
2623                    expected: count,
2624                });
2625            }
2626            if count != num_bindings
2627                && !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY)
2628            {
2629                return Err(Error::BindingArrayLengthMismatch {
2630                    actual: num_bindings,
2631                    expected: count,
2632                });
2633            }
2634            if num_bindings == 0 {
2635                return Err(Error::BindingArrayZeroLength);
2636            }
2637        } else {
2638            return Err(Error::SingleBindingExpected);
2639        };
2640
2641        Ok(())
2642    }
2643
2644    fn texture_use_parameters(
2645        &self,
2646        binding: u32,
2647        decl: &wgt::BindGroupLayoutEntry,
2648        view: &TextureView,
2649        expected: &'static str,
2650    ) -> Result<wgt::TextureUses, binding_model::CreateBindGroupError> {
2651        use crate::binding_model::CreateBindGroupError as Error;
2652        if view
2653            .desc
2654            .aspects()
2655            .contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL)
2656        {
2657            return Err(Error::DepthStencilAspect);
2658        }
2659        match decl.ty {
2660            wgt::BindingType::Texture {
2661                sample_type,
2662                view_dimension,
2663                multisampled,
2664            } => {
2665                use wgt::TextureSampleType as Tst;
2666                if multisampled != (view.samples != 1) {
2667                    return Err(Error::InvalidTextureMultisample {
2668                        binding,
2669                        layout_multisampled: multisampled,
2670                        view_samples: view.samples,
2671                    });
2672                }
2673                let compat_sample_type = view
2674                    .desc
2675                    .format
2676                    .sample_type(Some(view.desc.range.aspect), Some(self.features))
2677                    .unwrap();
2678                match (sample_type, compat_sample_type) {
2679                    (Tst::Uint, Tst::Uint) |
2680                        (Tst::Sint, Tst::Sint) |
2681                        (Tst::Depth, Tst::Depth) |
2682                        // if we expect non-filterable, accept anything float
2683                        (Tst::Float { filterable: false }, Tst::Float { .. }) |
2684                        // if we expect filterable, require it
2685                        (Tst::Float { filterable: true }, Tst::Float { filterable: true }) |
2686                        // if we expect non-filterable, also accept depth
2687                        (Tst::Float { filterable: false }, Tst::Depth) => {}
2688                    // if we expect filterable, also accept Float that is defined as
2689                    // unfilterable if filterable feature is explicitly enabled (only hit
2690                    // if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is
2691                    // enabled)
2692                    (Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {}
2693                    _ => {
2694                        return Err(Error::InvalidTextureSampleType {
2695                            binding,
2696                            layout_sample_type: sample_type,
2697                            view_format: view.desc.format,
2698                            view_sample_type: compat_sample_type,
2699                        })
2700                    }
2701                }
2702                if view_dimension != view.desc.dimension {
2703                    return Err(Error::InvalidTextureDimension {
2704                        binding,
2705                        layout_dimension: view_dimension,
2706                        view_dimension: view.desc.dimension,
2707                    });
2708                }
2709                view.check_usage(wgt::TextureUsages::TEXTURE_BINDING)?;
2710                Ok(wgt::TextureUses::RESOURCE)
2711            }
2712            wgt::BindingType::StorageTexture {
2713                access,
2714                format,
2715                view_dimension,
2716            } => {
2717                if format != view.desc.format {
2718                    return Err(Error::InvalidStorageTextureFormat {
2719                        binding,
2720                        layout_format: format,
2721                        view_format: view.desc.format,
2722                    });
2723                }
2724                if view_dimension != view.desc.dimension {
2725                    return Err(Error::InvalidTextureDimension {
2726                        binding,
2727                        layout_dimension: view_dimension,
2728                        view_dimension: view.desc.dimension,
2729                    });
2730                }
2731
2732                let mip_level_count = view.selector.mips.end - view.selector.mips.start;
2733                if mip_level_count != 1 {
2734                    return Err(Error::InvalidStorageTextureMipLevelCount {
2735                        binding,
2736                        mip_level_count,
2737                    });
2738                }
2739
2740                let internal_use = match access {
2741                    wgt::StorageTextureAccess::WriteOnly => {
2742                        if !view
2743                            .format_features
2744                            .flags
2745                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_WRITE_ONLY)
2746                        {
2747                            return Err(Error::StorageWriteNotSupported(view.desc.format));
2748                        }
2749                        wgt::TextureUses::STORAGE_WRITE_ONLY
2750                    }
2751                    wgt::StorageTextureAccess::ReadOnly => {
2752                        if !view
2753                            .format_features
2754                            .flags
2755                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_ONLY)
2756                        {
2757                            return Err(Error::StorageReadNotSupported(view.desc.format));
2758                        }
2759                        wgt::TextureUses::STORAGE_READ_ONLY
2760                    }
2761                    wgt::StorageTextureAccess::ReadWrite => {
2762                        if !view
2763                            .format_features
2764                            .flags
2765                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE)
2766                        {
2767                            return Err(Error::StorageReadWriteNotSupported(view.desc.format));
2768                        }
2769
2770                        wgt::TextureUses::STORAGE_READ_WRITE
2771                    }
2772                    wgt::StorageTextureAccess::Atomic => {
2773                        if !view
2774                            .format_features
2775                            .flags
2776                            .contains(wgt::TextureFormatFeatureFlags::STORAGE_ATOMIC)
2777                        {
2778                            return Err(Error::StorageAtomicNotSupported(view.desc.format));
2779                        }
2780
2781                        wgt::TextureUses::STORAGE_ATOMIC
2782                    }
2783                };
2784                view.check_usage(wgt::TextureUsages::STORAGE_BINDING)?;
2785                Ok(internal_use)
2786            }
2787            wgt::BindingType::ExternalTexture => {
2788                if view.desc.dimension != TextureViewDimension::D2 {
2789                    return Err(Error::InvalidTextureDimension {
2790                        binding,
2791                        layout_dimension: TextureViewDimension::D2,
2792                        view_dimension: view.desc.dimension,
2793                    });
2794                }
2795                let mip_level_count = view.selector.mips.end - view.selector.mips.start;
2796                if mip_level_count != 1 {
2797                    return Err(Error::InvalidExternalTextureMipLevelCount {
2798                        binding,
2799                        mip_level_count,
2800                    });
2801                }
2802                if view.desc.format != TextureFormat::Rgba8Unorm
2803                    && view.desc.format != TextureFormat::Bgra8Unorm
2804                    && view.desc.format != TextureFormat::Rgba16Float
2805                {
2806                    return Err(Error::InvalidExternalTextureFormat {
2807                        binding,
2808                        format: view.desc.format,
2809                    });
2810                }
2811                if view.samples != 1 {
2812                    return Err(Error::InvalidTextureMultisample {
2813                        binding,
2814                        layout_multisampled: false,
2815                        view_samples: view.samples,
2816                    });
2817                }
2818
2819                view.check_usage(wgt::TextureUsages::TEXTURE_BINDING)?;
2820                Ok(wgt::TextureUses::RESOURCE)
2821            }
2822            _ => Err(Error::WrongBindingType {
2823                binding,
2824                actual: decl.ty,
2825                expected,
2826            }),
2827        }
2828    }
2829
2830    pub(crate) fn create_pipeline_layout(
2831        self: &Arc<Self>,
2832        desc: &binding_model::ResolvedPipelineLayoutDescriptor,
2833    ) -> Result<Arc<binding_model::PipelineLayout>, binding_model::CreatePipelineLayoutError> {
2834        use crate::binding_model::CreatePipelineLayoutError as Error;
2835
2836        self.check_is_valid()?;
2837
2838        let bind_group_layouts_count = desc.bind_group_layouts.len();
2839        let device_max_bind_groups = self.limits.max_bind_groups as usize;
2840        if bind_group_layouts_count > device_max_bind_groups {
2841            return Err(Error::TooManyGroups {
2842                actual: bind_group_layouts_count,
2843                max: device_max_bind_groups,
2844            });
2845        }
2846
2847        if !desc.push_constant_ranges.is_empty() {
2848            self.require_features(wgt::Features::PUSH_CONSTANTS)?;
2849        }
2850
2851        let mut used_stages = wgt::ShaderStages::empty();
2852        for (index, pc) in desc.push_constant_ranges.iter().enumerate() {
2853            if pc.stages.intersects(used_stages) {
2854                return Err(Error::MoreThanOnePushConstantRangePerStage {
2855                    index,
2856                    provided: pc.stages,
2857                    intersected: pc.stages & used_stages,
2858                });
2859            }
2860            used_stages |= pc.stages;
2861
2862            let device_max_pc_size = self.limits.max_push_constant_size;
2863            if device_max_pc_size < pc.range.end {
2864                return Err(Error::PushConstantRangeTooLarge {
2865                    index,
2866                    range: pc.range.clone(),
2867                    max: device_max_pc_size,
2868                });
2869            }
2870
2871            if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
2872                return Err(Error::MisalignedPushConstantRange {
2873                    index,
2874                    bound: pc.range.start,
2875                });
2876            }
2877            if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
2878                return Err(Error::MisalignedPushConstantRange {
2879                    index,
2880                    bound: pc.range.end,
2881                });
2882            }
2883        }
2884
2885        let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
2886
2887        for bgl in desc.bind_group_layouts.iter() {
2888            bgl.same_device(self)?;
2889            count_validator.merge(&bgl.binding_count_validator);
2890        }
2891
2892        count_validator
2893            .validate(&self.limits)
2894            .map_err(Error::TooManyBindings)?;
2895
2896        let bind_group_layouts = desc
2897            .bind_group_layouts
2898            .iter()
2899            .cloned()
2900            .collect::<ArrayVec<_, { hal::MAX_BIND_GROUPS }>>();
2901
2902        let raw_bind_group_layouts = desc
2903            .bind_group_layouts
2904            .iter()
2905            .map(|bgl| bgl.raw())
2906            .collect::<ArrayVec<_, { hal::MAX_BIND_GROUPS }>>();
2907
2908        let additional_flags = if self.indirect_validation.is_some() {
2909            hal::PipelineLayoutFlags::INDIRECT_BUILTIN_UPDATE
2910        } else {
2911            hal::PipelineLayoutFlags::empty()
2912        };
2913
2914        let hal_desc = hal::PipelineLayoutDescriptor {
2915            label: desc.label.to_hal(self.instance_flags),
2916            flags: hal::PipelineLayoutFlags::FIRST_VERTEX_INSTANCE
2917                | hal::PipelineLayoutFlags::NUM_WORK_GROUPS
2918                | additional_flags,
2919            bind_group_layouts: &raw_bind_group_layouts,
2920            push_constant_ranges: desc.push_constant_ranges.as_ref(),
2921        };
2922
2923        let raw = unsafe { self.raw().create_pipeline_layout(&hal_desc) }
2924            .map_err(|e| self.handle_hal_error(e))?;
2925
2926        drop(raw_bind_group_layouts);
2927
2928        let layout = binding_model::PipelineLayout {
2929            raw: ManuallyDrop::new(raw),
2930            device: self.clone(),
2931            label: desc.label.to_string(),
2932            bind_group_layouts,
2933            push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
2934        };
2935
2936        let layout = Arc::new(layout);
2937
2938        Ok(layout)
2939    }
2940
2941    pub(crate) fn derive_pipeline_layout(
2942        self: &Arc<Self>,
2943        mut derived_group_layouts: Box<ArrayVec<bgl::EntryMap, { hal::MAX_BIND_GROUPS }>>,
2944    ) -> Result<Arc<binding_model::PipelineLayout>, pipeline::ImplicitLayoutError> {
2945        while derived_group_layouts
2946            .last()
2947            .is_some_and(|map| map.is_empty())
2948        {
2949            derived_group_layouts.pop();
2950        }
2951
2952        let mut unique_bind_group_layouts = FastHashMap::default();
2953
2954        let bind_group_layouts = derived_group_layouts
2955            .into_iter()
2956            .map(|mut bgl_entry_map| {
2957                bgl_entry_map.sort();
2958                match unique_bind_group_layouts.entry(bgl_entry_map) {
2959                    hashbrown::hash_map::Entry::Occupied(v) => Ok(Arc::clone(v.get())),
2960                    hashbrown::hash_map::Entry::Vacant(e) => {
2961                        match self.create_bind_group_layout(
2962                            &None,
2963                            e.key().clone(),
2964                            bgl::Origin::Derived,
2965                        ) {
2966                            Ok(bgl) => {
2967                                e.insert(bgl.clone());
2968                                Ok(bgl)
2969                            }
2970                            Err(e) => Err(e),
2971                        }
2972                    }
2973                }
2974            })
2975            .collect::<Result<Vec<_>, _>>()?;
2976
2977        let layout_desc = binding_model::ResolvedPipelineLayoutDescriptor {
2978            label: None,
2979            bind_group_layouts: Cow::Owned(bind_group_layouts),
2980            push_constant_ranges: Cow::Borrowed(&[]), //TODO?
2981        };
2982
2983        let layout = self.create_pipeline_layout(&layout_desc)?;
2984        Ok(layout)
2985    }
2986
2987    pub(crate) fn create_compute_pipeline(
2988        self: &Arc<Self>,
2989        desc: pipeline::ResolvedComputePipelineDescriptor,
2990    ) -> Result<Arc<pipeline::ComputePipeline>, pipeline::CreateComputePipelineError> {
2991        self.check_is_valid()?;
2992
2993        self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?;
2994
2995        let shader_module = desc.stage.module;
2996
2997        shader_module.same_device(self)?;
2998
2999        let is_auto_layout = desc.layout.is_none();
3000
3001        // Get the pipeline layout from the desc if it is provided.
3002        let pipeline_layout = match desc.layout {
3003            Some(pipeline_layout) => {
3004                pipeline_layout.same_device(self)?;
3005                Some(pipeline_layout)
3006            }
3007            None => None,
3008        };
3009
3010        let mut binding_layout_source = match pipeline_layout {
3011            Some(ref pipeline_layout) => {
3012                validation::BindingLayoutSource::Provided(pipeline_layout.get_binding_maps())
3013            }
3014            None => validation::BindingLayoutSource::new_derived(&self.limits),
3015        };
3016        let mut shader_binding_sizes = FastHashMap::default();
3017        let io = validation::StageIo::default();
3018
3019        let final_entry_point_name;
3020
3021        {
3022            let stage = wgt::ShaderStages::COMPUTE;
3023
3024            final_entry_point_name = shader_module.finalize_entry_point_name(
3025                stage,
3026                desc.stage.entry_point.as_ref().map(|ep| ep.as_ref()),
3027            )?;
3028
3029            if let Some(ref interface) = shader_module.interface {
3030                let _ = interface.check_stage(
3031                    &mut binding_layout_source,
3032                    &mut shader_binding_sizes,
3033                    &final_entry_point_name,
3034                    stage,
3035                    io,
3036                    None,
3037                )?;
3038            }
3039        }
3040
3041        let pipeline_layout = match binding_layout_source {
3042            validation::BindingLayoutSource::Provided(_) => {
3043                drop(binding_layout_source);
3044                pipeline_layout.unwrap()
3045            }
3046            validation::BindingLayoutSource::Derived(entries) => {
3047                self.derive_pipeline_layout(entries)?
3048            }
3049        };
3050
3051        let late_sized_buffer_groups =
3052            Device::make_late_sized_buffer_groups(&shader_binding_sizes, &pipeline_layout);
3053
3054        let cache = match desc.cache {
3055            Some(cache) => {
3056                cache.same_device(self)?;
3057                Some(cache)
3058            }
3059            None => None,
3060        };
3061
3062        let pipeline_desc = hal::ComputePipelineDescriptor {
3063            label: desc.label.to_hal(self.instance_flags),
3064            layout: pipeline_layout.raw(),
3065            stage: hal::ProgrammableStage {
3066                module: shader_module.raw(),
3067                entry_point: final_entry_point_name.as_ref(),
3068                constants: &desc.stage.constants,
3069                zero_initialize_workgroup_memory: desc.stage.zero_initialize_workgroup_memory,
3070            },
3071            cache: cache.as_ref().map(|it| it.raw()),
3072        };
3073
3074        let raw =
3075            unsafe { self.raw().create_compute_pipeline(&pipeline_desc) }.map_err(
3076                |err| match err {
3077                    hal::PipelineError::Device(error) => {
3078                        pipeline::CreateComputePipelineError::Device(self.handle_hal_error(error))
3079                    }
3080                    hal::PipelineError::Linkage(_stages, msg) => {
3081                        pipeline::CreateComputePipelineError::Internal(msg)
3082                    }
3083                    hal::PipelineError::EntryPoint(_stage) => {
3084                        pipeline::CreateComputePipelineError::Internal(
3085                            ENTRYPOINT_FAILURE_ERROR.to_string(),
3086                        )
3087                    }
3088                    hal::PipelineError::PipelineConstants(_stages, msg) => {
3089                        pipeline::CreateComputePipelineError::PipelineConstants(msg)
3090                    }
3091                },
3092            )?;
3093
3094        let pipeline = pipeline::ComputePipeline {
3095            raw: ManuallyDrop::new(raw),
3096            layout: pipeline_layout,
3097            device: self.clone(),
3098            _shader_module: shader_module,
3099            late_sized_buffer_groups,
3100            label: desc.label.to_string(),
3101            tracking_data: TrackingData::new(self.tracker_indices.compute_pipelines.clone()),
3102        };
3103
3104        let pipeline = Arc::new(pipeline);
3105
3106        if is_auto_layout {
3107            for bgl in pipeline.layout.bind_group_layouts.iter() {
3108                // `bind_group_layouts` might contain duplicate entries, so we need to ignore the result.
3109                let _ = bgl
3110                    .exclusive_pipeline
3111                    .set(binding_model::ExclusivePipeline::Compute(Arc::downgrade(
3112                        &pipeline,
3113                    )));
3114            }
3115        }
3116
3117        Ok(pipeline)
3118    }
3119
3120    pub(crate) fn create_render_pipeline(
3121        self: &Arc<Self>,
3122        desc: pipeline::ResolvedRenderPipelineDescriptor,
3123    ) -> Result<Arc<pipeline::RenderPipeline>, pipeline::CreateRenderPipelineError> {
3124        use wgt::TextureFormatFeatureFlags as Tfff;
3125
3126        self.check_is_valid()?;
3127
3128        let mut shader_binding_sizes = FastHashMap::default();
3129
3130        let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0);
3131        let max_attachments = self.limits.max_color_attachments as usize;
3132        if num_attachments > max_attachments {
3133            return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
3134                command::ColorAttachmentError::TooMany {
3135                    given: num_attachments,
3136                    limit: max_attachments,
3137                },
3138            ));
3139        }
3140
3141        let color_targets = desc
3142            .fragment
3143            .as_ref()
3144            .map_or(&[][..], |fragment| &fragment.targets);
3145        let depth_stencil_state = desc.depth_stencil.as_ref();
3146
3147        {
3148            let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> =
3149                color_targets.iter().filter_map(|x| x.as_ref()).collect();
3150            if !cts.is_empty() && {
3151                let first = &cts[0];
3152                cts[1..]
3153                    .iter()
3154                    .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend)
3155            } {
3156                self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?;
3157            }
3158        }
3159
3160        let mut io = validation::StageIo::default();
3161        let mut validated_stages = wgt::ShaderStages::empty();
3162
3163        let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len());
3164        let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len());
3165        let mut total_attributes = 0;
3166        let mut shader_expects_dual_source_blending = false;
3167        let mut pipeline_expects_dual_source_blending = false;
3168        for (i, vb_state) in desc.vertex.buffers.iter().enumerate() {
3169            // https://gpuweb.github.io/gpuweb/#abstract-opdef-validating-gpuvertexbufferlayout
3170
3171            if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 {
3172                return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge {
3173                    index: i as u32,
3174                    given: vb_state.array_stride as u32,
3175                    limit: self.limits.max_vertex_buffer_array_stride,
3176                });
3177            }
3178            if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 {
3179                return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride {
3180                    index: i as u32,
3181                    stride: vb_state.array_stride,
3182                });
3183            }
3184
3185            let max_stride = if vb_state.array_stride == 0 {
3186                self.limits.max_vertex_buffer_array_stride as u64
3187            } else {
3188                vb_state.array_stride
3189            };
3190            let mut last_stride = 0;
3191            for attribute in vb_state.attributes.iter() {
3192                let attribute_stride = attribute.offset + attribute.format.size();
3193                if attribute_stride > max_stride {
3194                    return Err(
3195                        pipeline::CreateRenderPipelineError::VertexAttributeStrideTooLarge {
3196                            location: attribute.shader_location,
3197                            given: attribute_stride as u32,
3198                            limit: max_stride as u32,
3199                        },
3200                    );
3201                }
3202
3203                let required_offset_alignment = attribute.format.size().min(4);
3204                if attribute.offset % required_offset_alignment != 0 {
3205                    return Err(
3206                        pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
3207                            location: attribute.shader_location,
3208                            offset: attribute.offset,
3209                        },
3210                    );
3211                }
3212
3213                if attribute.shader_location >= self.limits.max_vertex_attributes {
3214                    return Err(
3215                        pipeline::CreateRenderPipelineError::TooManyVertexAttributes {
3216                            given: attribute.shader_location,
3217                            limit: self.limits.max_vertex_attributes,
3218                        },
3219                    );
3220                }
3221
3222                last_stride = last_stride.max(attribute_stride);
3223            }
3224            vertex_steps.push(pipeline::VertexStep {
3225                stride: vb_state.array_stride,
3226                last_stride,
3227                mode: vb_state.step_mode,
3228            });
3229            if vb_state.attributes.is_empty() {
3230                continue;
3231            }
3232            vertex_buffers.push(hal::VertexBufferLayout {
3233                array_stride: vb_state.array_stride,
3234                step_mode: vb_state.step_mode,
3235                attributes: vb_state.attributes.as_ref(),
3236            });
3237
3238            for attribute in vb_state.attributes.iter() {
3239                if attribute.offset >= 0x10000000 {
3240                    return Err(
3241                        pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
3242                            location: attribute.shader_location,
3243                            offset: attribute.offset,
3244                        },
3245                    );
3246                }
3247
3248                if let wgt::VertexFormat::Float64
3249                | wgt::VertexFormat::Float64x2
3250                | wgt::VertexFormat::Float64x3
3251                | wgt::VertexFormat::Float64x4 = attribute.format
3252                {
3253                    self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?;
3254                }
3255
3256                let previous = io.insert(
3257                    attribute.shader_location,
3258                    validation::InterfaceVar::vertex_attribute(attribute.format),
3259                );
3260
3261                if previous.is_some() {
3262                    return Err(pipeline::CreateRenderPipelineError::ShaderLocationClash(
3263                        attribute.shader_location,
3264                    ));
3265                }
3266            }
3267            total_attributes += vb_state.attributes.len();
3268        }
3269
3270        if vertex_buffers.len() > self.limits.max_vertex_buffers as usize {
3271            return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers {
3272                given: vertex_buffers.len() as u32,
3273                limit: self.limits.max_vertex_buffers,
3274            });
3275        }
3276        if total_attributes > self.limits.max_vertex_attributes as usize {
3277            return Err(
3278                pipeline::CreateRenderPipelineError::TooManyVertexAttributes {
3279                    given: total_attributes as u32,
3280                    limit: self.limits.max_vertex_attributes,
3281                },
3282            );
3283        }
3284
3285        if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() {
3286            return Err(
3287                pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology {
3288                    strip_index_format: desc.primitive.strip_index_format,
3289                    topology: desc.primitive.topology,
3290                },
3291            );
3292        }
3293
3294        if desc.primitive.unclipped_depth {
3295            self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?;
3296        }
3297
3298        if desc.primitive.polygon_mode == wgt::PolygonMode::Line {
3299            self.require_features(wgt::Features::POLYGON_MODE_LINE)?;
3300        }
3301        if desc.primitive.polygon_mode == wgt::PolygonMode::Point {
3302            self.require_features(wgt::Features::POLYGON_MODE_POINT)?;
3303        }
3304
3305        if desc.primitive.conservative {
3306            self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?;
3307        }
3308
3309        if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill {
3310            return Err(
3311                pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode,
3312            );
3313        }
3314
3315        let mut target_specified = false;
3316
3317        for (i, cs) in color_targets.iter().enumerate() {
3318            if let Some(cs) = cs.as_ref() {
3319                target_specified = true;
3320                let error = 'error: {
3321                    if cs.write_mask.contains_unknown_bits() {
3322                        break 'error Some(pipeline::ColorStateError::InvalidWriteMask(
3323                            cs.write_mask,
3324                        ));
3325                    }
3326
3327                    let format_features = self.describe_format_features(cs.format)?;
3328                    if !format_features
3329                        .allowed_usages
3330                        .contains(wgt::TextureUsages::RENDER_ATTACHMENT)
3331                    {
3332                        break 'error Some(pipeline::ColorStateError::FormatNotRenderable(
3333                            cs.format,
3334                        ));
3335                    }
3336                    let blendable = format_features.flags.contains(Tfff::BLENDABLE);
3337                    let filterable = format_features.flags.contains(Tfff::FILTERABLE);
3338                    let adapter_specific = self
3339                        .features
3340                        .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES);
3341                    // according to WebGPU specifications the texture needs to be
3342                    // [`TextureFormatFeatureFlags::FILTERABLE`] if blending is set - use
3343                    // [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude
3344                    // this limitation
3345                    if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) {
3346                        break 'error Some(pipeline::ColorStateError::FormatNotBlendable(
3347                            cs.format,
3348                        ));
3349                    }
3350                    if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) {
3351                        break 'error Some(pipeline::ColorStateError::FormatNotColor(cs.format));
3352                    }
3353
3354                    if desc.multisample.count > 1
3355                        && !format_features
3356                            .flags
3357                            .sample_count_supported(desc.multisample.count)
3358                    {
3359                        break 'error Some(pipeline::ColorStateError::InvalidSampleCount(
3360                            desc.multisample.count,
3361                            cs.format,
3362                            cs.format
3363                                .guaranteed_format_features(self.features)
3364                                .flags
3365                                .supported_sample_counts(),
3366                            self.adapter
3367                                .get_texture_format_features(cs.format)
3368                                .flags
3369                                .supported_sample_counts(),
3370                        ));
3371                    }
3372
3373                    if let Some(blend_mode) = cs.blend {
3374                        for factor in [
3375                            blend_mode.color.src_factor,
3376                            blend_mode.color.dst_factor,
3377                            blend_mode.alpha.src_factor,
3378                            blend_mode.alpha.dst_factor,
3379                        ] {
3380                            if factor.ref_second_blend_source() {
3381                                self.require_features(wgt::Features::DUAL_SOURCE_BLENDING)?;
3382                                if i == 0 {
3383                                    pipeline_expects_dual_source_blending = true;
3384                                    break;
3385                                } else {
3386                                    return Err(pipeline::CreateRenderPipelineError
3387                                        ::BlendFactorOnUnsupportedTarget { factor, target: i as u32 });
3388                                }
3389                            }
3390                        }
3391                    }
3392
3393                    break 'error None;
3394                };
3395                if let Some(e) = error {
3396                    return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e));
3397                }
3398            }
3399        }
3400
3401        let limit = self.limits.max_color_attachment_bytes_per_sample;
3402        let formats = color_targets
3403            .iter()
3404            .map(|cs| cs.as_ref().map(|cs| cs.format));
3405        if let Err(total) = validate_color_attachment_bytes_per_sample(formats, limit) {
3406            return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
3407                command::ColorAttachmentError::TooManyBytesPerSample { total, limit },
3408            ));
3409        }
3410
3411        if let Some(ds) = depth_stencil_state {
3412            target_specified = true;
3413            let error = 'error: {
3414                let format_features = self.describe_format_features(ds.format)?;
3415                if !format_features
3416                    .allowed_usages
3417                    .contains(wgt::TextureUsages::RENDER_ATTACHMENT)
3418                {
3419                    break 'error Some(pipeline::DepthStencilStateError::FormatNotRenderable(
3420                        ds.format,
3421                    ));
3422                }
3423
3424                let aspect = hal::FormatAspects::from(ds.format);
3425                if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) {
3426                    break 'error Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format));
3427                }
3428                if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) {
3429                    break 'error Some(pipeline::DepthStencilStateError::FormatNotStencil(
3430                        ds.format,
3431                    ));
3432                }
3433                if desc.multisample.count > 1
3434                    && !format_features
3435                        .flags
3436                        .sample_count_supported(desc.multisample.count)
3437                {
3438                    break 'error Some(pipeline::DepthStencilStateError::InvalidSampleCount(
3439                        desc.multisample.count,
3440                        ds.format,
3441                        ds.format
3442                            .guaranteed_format_features(self.features)
3443                            .flags
3444                            .supported_sample_counts(),
3445                        self.adapter
3446                            .get_texture_format_features(ds.format)
3447                            .flags
3448                            .supported_sample_counts(),
3449                    ));
3450                }
3451
3452                break 'error None;
3453            };
3454            if let Some(e) = error {
3455                return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e));
3456            }
3457
3458            if ds.bias.clamp != 0.0 {
3459                self.require_downlevel_flags(wgt::DownlevelFlags::DEPTH_BIAS_CLAMP)?;
3460            }
3461        }
3462
3463        if !target_specified {
3464            return Err(pipeline::CreateRenderPipelineError::NoTargetSpecified);
3465        }
3466
3467        let is_auto_layout = desc.layout.is_none();
3468
3469        // Get the pipeline layout from the desc if it is provided.
3470        let pipeline_layout = match desc.layout {
3471            Some(pipeline_layout) => {
3472                pipeline_layout.same_device(self)?;
3473                Some(pipeline_layout)
3474            }
3475            None => None,
3476        };
3477
3478        let mut binding_layout_source = match pipeline_layout {
3479            Some(ref pipeline_layout) => {
3480                validation::BindingLayoutSource::Provided(pipeline_layout.get_binding_maps())
3481            }
3482            None => validation::BindingLayoutSource::new_derived(&self.limits),
3483        };
3484
3485        let samples = {
3486            let sc = desc.multisample.count;
3487            if sc == 0 || sc > 32 || !sc.is_power_of_two() {
3488                return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
3489            }
3490            sc
3491        };
3492
3493        let vertex_entry_point_name;
3494        let vertex_stage = {
3495            let stage_desc = &desc.vertex.stage;
3496            let stage = wgt::ShaderStages::VERTEX;
3497
3498            let vertex_shader_module = &stage_desc.module;
3499            vertex_shader_module.same_device(self)?;
3500
3501            let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
3502
3503            vertex_entry_point_name = vertex_shader_module
3504                .finalize_entry_point_name(
3505                    stage,
3506                    stage_desc.entry_point.as_ref().map(|ep| ep.as_ref()),
3507                )
3508                .map_err(stage_err)?;
3509
3510            if let Some(ref interface) = vertex_shader_module.interface {
3511                io = interface
3512                    .check_stage(
3513                        &mut binding_layout_source,
3514                        &mut shader_binding_sizes,
3515                        &vertex_entry_point_name,
3516                        stage,
3517                        io,
3518                        desc.depth_stencil.as_ref().map(|d| d.depth_compare),
3519                    )
3520                    .map_err(stage_err)?;
3521                validated_stages |= stage;
3522            }
3523
3524            hal::ProgrammableStage {
3525                module: vertex_shader_module.raw(),
3526                entry_point: &vertex_entry_point_name,
3527                constants: &stage_desc.constants,
3528                zero_initialize_workgroup_memory: stage_desc.zero_initialize_workgroup_memory,
3529            }
3530        };
3531
3532        let fragment_entry_point_name;
3533        let fragment_stage = match desc.fragment {
3534            Some(ref fragment_state) => {
3535                let stage = wgt::ShaderStages::FRAGMENT;
3536
3537                let shader_module = &fragment_state.stage.module;
3538                shader_module.same_device(self)?;
3539
3540                let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
3541
3542                fragment_entry_point_name = shader_module
3543                    .finalize_entry_point_name(
3544                        stage,
3545                        fragment_state
3546                            .stage
3547                            .entry_point
3548                            .as_ref()
3549                            .map(|ep| ep.as_ref()),
3550                    )
3551                    .map_err(stage_err)?;
3552
3553                if validated_stages == wgt::ShaderStages::VERTEX {
3554                    if let Some(ref interface) = shader_module.interface {
3555                        io = interface
3556                            .check_stage(
3557                                &mut binding_layout_source,
3558                                &mut shader_binding_sizes,
3559                                &fragment_entry_point_name,
3560                                stage,
3561                                io,
3562                                desc.depth_stencil.as_ref().map(|d| d.depth_compare),
3563                            )
3564                            .map_err(stage_err)?;
3565                        validated_stages |= stage;
3566                    }
3567                }
3568
3569                if let Some(ref interface) = shader_module.interface {
3570                    shader_expects_dual_source_blending = interface
3571                        .fragment_uses_dual_source_blending(&fragment_entry_point_name)
3572                        .map_err(|error| pipeline::CreateRenderPipelineError::Stage {
3573                            stage,
3574                            error,
3575                        })?;
3576                }
3577
3578                Some(hal::ProgrammableStage {
3579                    module: shader_module.raw(),
3580                    entry_point: &fragment_entry_point_name,
3581                    constants: &fragment_state.stage.constants,
3582                    zero_initialize_workgroup_memory: fragment_state
3583                        .stage
3584                        .zero_initialize_workgroup_memory,
3585                })
3586            }
3587            None => None,
3588        };
3589
3590        if !pipeline_expects_dual_source_blending && shader_expects_dual_source_blending {
3591            return Err(
3592                pipeline::CreateRenderPipelineError::ShaderExpectsPipelineToUseDualSourceBlending,
3593            );
3594        }
3595        if pipeline_expects_dual_source_blending && !shader_expects_dual_source_blending {
3596            return Err(
3597                pipeline::CreateRenderPipelineError::PipelineExpectsShaderToUseDualSourceBlending,
3598            );
3599        }
3600
3601        if validated_stages.contains(wgt::ShaderStages::FRAGMENT) {
3602            for (i, output) in io.iter() {
3603                match color_targets.get(*i as usize) {
3604                    Some(Some(state)) => {
3605                        validation::check_texture_format(state.format, &output.ty).map_err(
3606                            |pipeline| {
3607                                pipeline::CreateRenderPipelineError::ColorState(
3608                                    *i as u8,
3609                                    pipeline::ColorStateError::IncompatibleFormat {
3610                                        pipeline,
3611                                        shader: output.ty,
3612                                    },
3613                                )
3614                            },
3615                        )?;
3616                    }
3617                    _ => {
3618                        log::warn!(
3619                            "The fragment stage {:?} output @location({}) values are ignored",
3620                            fragment_stage
3621                                .as_ref()
3622                                .map_or("", |stage| stage.entry_point),
3623                            i
3624                        );
3625                    }
3626                }
3627            }
3628        }
3629        let last_stage = match desc.fragment {
3630            Some(_) => wgt::ShaderStages::FRAGMENT,
3631            None => wgt::ShaderStages::VERTEX,
3632        };
3633        if is_auto_layout && !validated_stages.contains(last_stage) {
3634            return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into());
3635        }
3636
3637        let pipeline_layout = match binding_layout_source {
3638            validation::BindingLayoutSource::Provided(_) => {
3639                drop(binding_layout_source);
3640                pipeline_layout.unwrap()
3641            }
3642            validation::BindingLayoutSource::Derived(entries) => {
3643                self.derive_pipeline_layout(entries)?
3644            }
3645        };
3646
3647        // Multiview is only supported if the feature is enabled
3648        if desc.multiview.is_some() {
3649            self.require_features(wgt::Features::MULTIVIEW)?;
3650        }
3651
3652        if !self
3653            .downlevel
3654            .flags
3655            .contains(wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED)
3656        {
3657            for (binding, size) in shader_binding_sizes.iter() {
3658                if size.get() % 16 != 0 {
3659                    return Err(pipeline::CreateRenderPipelineError::UnalignedShader {
3660                        binding: binding.binding,
3661                        group: binding.group,
3662                        size: size.get(),
3663                    });
3664                }
3665            }
3666        }
3667
3668        let late_sized_buffer_groups =
3669            Device::make_late_sized_buffer_groups(&shader_binding_sizes, &pipeline_layout);
3670
3671        let cache = match desc.cache {
3672            Some(cache) => {
3673                cache.same_device(self)?;
3674                Some(cache)
3675            }
3676            None => None,
3677        };
3678
3679        let pipeline_desc = hal::RenderPipelineDescriptor {
3680            label: desc.label.to_hal(self.instance_flags),
3681            layout: pipeline_layout.raw(),
3682            vertex_buffers: &vertex_buffers,
3683            vertex_stage,
3684            primitive: desc.primitive,
3685            depth_stencil: desc.depth_stencil.clone(),
3686            multisample: desc.multisample,
3687            fragment_stage,
3688            color_targets,
3689            multiview: desc.multiview,
3690            cache: cache.as_ref().map(|it| it.raw()),
3691        };
3692        let raw =
3693            unsafe { self.raw().create_render_pipeline(&pipeline_desc) }.map_err(
3694                |err| match err {
3695                    hal::PipelineError::Device(error) => {
3696                        pipeline::CreateRenderPipelineError::Device(self.handle_hal_error(error))
3697                    }
3698                    hal::PipelineError::Linkage(stage, msg) => {
3699                        pipeline::CreateRenderPipelineError::Internal { stage, error: msg }
3700                    }
3701                    hal::PipelineError::EntryPoint(stage) => {
3702                        pipeline::CreateRenderPipelineError::Internal {
3703                            stage: hal::auxil::map_naga_stage(stage),
3704                            error: ENTRYPOINT_FAILURE_ERROR.to_string(),
3705                        }
3706                    }
3707                    hal::PipelineError::PipelineConstants(stage, error) => {
3708                        pipeline::CreateRenderPipelineError::PipelineConstants { stage, error }
3709                    }
3710                },
3711            )?;
3712
3713        let pass_context = RenderPassContext {
3714            attachments: AttachmentData {
3715                colors: color_targets
3716                    .iter()
3717                    .map(|state| state.as_ref().map(|s| s.format))
3718                    .collect(),
3719                resolves: ArrayVec::new(),
3720                depth_stencil: depth_stencil_state.as_ref().map(|state| state.format),
3721            },
3722            sample_count: samples,
3723            multiview: desc.multiview,
3724        };
3725
3726        let mut flags = pipeline::PipelineFlags::empty();
3727        for state in color_targets.iter().filter_map(|s| s.as_ref()) {
3728            if let Some(ref bs) = state.blend {
3729                if bs.color.uses_constant() | bs.alpha.uses_constant() {
3730                    flags |= pipeline::PipelineFlags::BLEND_CONSTANT;
3731                }
3732            }
3733        }
3734        if let Some(ds) = depth_stencil_state.as_ref() {
3735            if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
3736                flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
3737            }
3738            if !ds.is_depth_read_only() {
3739                flags |= pipeline::PipelineFlags::WRITES_DEPTH;
3740            }
3741            if !ds.is_stencil_read_only(desc.primitive.cull_mode) {
3742                flags |= pipeline::PipelineFlags::WRITES_STENCIL;
3743            }
3744        }
3745
3746        let shader_modules = {
3747            let mut shader_modules = ArrayVec::new();
3748            shader_modules.push(desc.vertex.stage.module);
3749            shader_modules.extend(desc.fragment.map(|f| f.stage.module));
3750            shader_modules
3751        };
3752
3753        let pipeline = pipeline::RenderPipeline {
3754            raw: ManuallyDrop::new(raw),
3755            layout: pipeline_layout,
3756            device: self.clone(),
3757            pass_context,
3758            _shader_modules: shader_modules,
3759            flags,
3760            strip_index_format: desc.primitive.strip_index_format,
3761            vertex_steps,
3762            late_sized_buffer_groups,
3763            label: desc.label.to_string(),
3764            tracking_data: TrackingData::new(self.tracker_indices.render_pipelines.clone()),
3765        };
3766
3767        let pipeline = Arc::new(pipeline);
3768
3769        if is_auto_layout {
3770            for bgl in pipeline.layout.bind_group_layouts.iter() {
3771                // `bind_group_layouts` might contain duplicate entries, so we need to ignore the result.
3772                let _ = bgl
3773                    .exclusive_pipeline
3774                    .set(binding_model::ExclusivePipeline::Render(Arc::downgrade(
3775                        &pipeline,
3776                    )));
3777            }
3778        }
3779
3780        Ok(pipeline)
3781    }
3782
3783    /// # Safety
3784    /// The `data` field on `desc` must have previously been returned from [`crate::global::Global::pipeline_cache_get_data`]
3785    pub unsafe fn create_pipeline_cache(
3786        self: &Arc<Self>,
3787        desc: &pipeline::PipelineCacheDescriptor,
3788    ) -> Result<Arc<pipeline::PipelineCache>, pipeline::CreatePipelineCacheError> {
3789        use crate::pipeline_cache;
3790
3791        self.check_is_valid()?;
3792
3793        self.require_features(wgt::Features::PIPELINE_CACHE)?;
3794        let data = if let Some((data, validation_key)) = desc
3795            .data
3796            .as_ref()
3797            .zip(self.raw().pipeline_cache_validation_key())
3798        {
3799            let data = pipeline_cache::validate_pipeline_cache(
3800                data,
3801                &self.adapter.raw.info,
3802                validation_key,
3803            );
3804            match data {
3805                Ok(data) => Some(data),
3806                Err(e) if e.was_avoidable() || !desc.fallback => return Err(e.into()),
3807                // If the error was unavoidable and we are asked to fallback, do so
3808                Err(_) => None,
3809            }
3810        } else {
3811            None
3812        };
3813        let cache_desc = hal::PipelineCacheDescriptor {
3814            data,
3815            label: desc.label.to_hal(self.instance_flags),
3816        };
3817        let raw = match unsafe { self.raw().create_pipeline_cache(&cache_desc) } {
3818            Ok(raw) => raw,
3819            Err(e) => match e {
3820                hal::PipelineCacheError::Device(e) => return Err(self.handle_hal_error(e).into()),
3821            },
3822        };
3823        let cache = pipeline::PipelineCache {
3824            device: self.clone(),
3825            label: desc.label.to_string(),
3826            // This would be none in the error condition, which we don't implement yet
3827            raw: ManuallyDrop::new(raw),
3828        };
3829
3830        let cache = Arc::new(cache);
3831
3832        Ok(cache)
3833    }
3834
3835    fn get_texture_format_features(&self, format: TextureFormat) -> wgt::TextureFormatFeatures {
3836        // Variant of adapter.get_texture_format_features that takes device features into account
3837        use wgt::TextureFormatFeatureFlags as tfsc;
3838        let mut format_features = self.adapter.get_texture_format_features(format);
3839        if (format == TextureFormat::R32Float
3840            || format == TextureFormat::Rg32Float
3841            || format == TextureFormat::Rgba32Float)
3842            && !self.features.contains(wgt::Features::FLOAT32_FILTERABLE)
3843        {
3844            format_features.flags.set(tfsc::FILTERABLE, false);
3845        }
3846        format_features
3847    }
3848
3849    fn describe_format_features(
3850        &self,
3851        format: TextureFormat,
3852    ) -> Result<wgt::TextureFormatFeatures, MissingFeatures> {
3853        self.require_features(format.required_features())?;
3854
3855        let using_device_features = self
3856            .features
3857            .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES);
3858        // If we're running downlevel, we need to manually ask the backend what
3859        // we can use as we can't trust WebGPU.
3860        let downlevel = !self
3861            .downlevel
3862            .flags
3863            .contains(wgt::DownlevelFlags::WEBGPU_TEXTURE_FORMAT_SUPPORT);
3864
3865        if using_device_features || downlevel {
3866            Ok(self.get_texture_format_features(format))
3867        } else {
3868            Ok(format.guaranteed_format_features(self.features))
3869        }
3870    }
3871
3872    #[cfg(feature = "replay")]
3873    pub(crate) fn wait_for_submit(
3874        &self,
3875        submission_index: crate::SubmissionIndex,
3876    ) -> Result<(), DeviceError> {
3877        let fence = self.fence.read();
3878        let last_done_index = unsafe { self.raw().get_fence_value(fence.as_ref()) }
3879            .map_err(|e| self.handle_hal_error(e))?;
3880        if last_done_index < submission_index {
3881            unsafe { self.raw().wait(fence.as_ref(), submission_index, !0) }
3882                .map_err(|e| self.handle_hal_error(e))?;
3883            drop(fence);
3884            if let Some(queue) = self.get_queue() {
3885                let closures = queue.lock_life().triage_submissions(submission_index);
3886                assert!(
3887                    closures.is_empty(),
3888                    "wait_for_submit is not expected to work with closures"
3889                );
3890            }
3891        }
3892        Ok(())
3893    }
3894
3895    pub(crate) fn create_query_set(
3896        self: &Arc<Self>,
3897        desc: &resource::QuerySetDescriptor,
3898    ) -> Result<Arc<QuerySet>, resource::CreateQuerySetError> {
3899        use resource::CreateQuerySetError as Error;
3900
3901        self.check_is_valid()?;
3902
3903        match desc.ty {
3904            wgt::QueryType::Occlusion => {}
3905            wgt::QueryType::Timestamp => {
3906                self.require_features(wgt::Features::TIMESTAMP_QUERY)?;
3907            }
3908            wgt::QueryType::PipelineStatistics(..) => {
3909                self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?;
3910            }
3911        }
3912
3913        if desc.count == 0 {
3914            return Err(Error::ZeroCount);
3915        }
3916
3917        if desc.count > wgt::QUERY_SET_MAX_QUERIES {
3918            return Err(Error::TooManyQueries {
3919                count: desc.count,
3920                maximum: wgt::QUERY_SET_MAX_QUERIES,
3921            });
3922        }
3923
3924        let hal_desc = desc.map_label(|label| label.to_hal(self.instance_flags));
3925
3926        let raw = unsafe { self.raw().create_query_set(&hal_desc) }
3927            .map_err(|e| self.handle_hal_error_with_nonfatal_oom(e))?;
3928
3929        let query_set = QuerySet {
3930            raw: ManuallyDrop::new(raw),
3931            device: self.clone(),
3932            label: desc.label.to_string(),
3933            tracking_data: TrackingData::new(self.tracker_indices.query_sets.clone()),
3934            desc: desc.map_label(|_| ()),
3935        };
3936
3937        let query_set = Arc::new(query_set);
3938
3939        Ok(query_set)
3940    }
3941
3942    fn lose(&self, message: &str) {
3943        // Follow the steps at https://gpuweb.github.io/gpuweb/#lose-the-device.
3944
3945        // Mark the device explicitly as invalid. This is checked in various
3946        // places to prevent new work from being submitted.
3947        self.valid.store(false, Ordering::Release);
3948
3949        // 1) Resolve the GPUDevice device.lost promise.
3950        if let Some(device_lost_closure) = self.device_lost_closure.lock().take() {
3951            device_lost_closure(DeviceLostReason::Unknown, message.to_string());
3952        }
3953
3954        // 2) Complete any outstanding mapAsync() steps.
3955        // 3) Complete any outstanding onSubmittedWorkDone() steps.
3956
3957        // These parts are passively accomplished by setting valid to false,
3958        // since that will prevent any new work from being added to the queues.
3959        // Future calls to poll_devices will continue to check the work queues
3960        // until they are cleared, and then drop the device.
3961    }
3962
3963    fn release_gpu_resources(&self) {
3964        // This is called when the device is lost, which makes every associated
3965        // resource invalid and unusable. This is an opportunity to release all of
3966        // the underlying gpu resources, even though the objects remain visible to
3967        // the user agent. We purge this memory naturally when resources have been
3968        // moved into the appropriate buckets, so this function just needs to
3969        // initiate movement into those buckets, and it can do that by calling
3970        // "destroy" on all the resources we know about.
3971
3972        // During these iterations, we discard all errors. We don't care!
3973        let trackers = self.trackers.lock();
3974        for buffer in trackers.buffers.used_resources() {
3975            if let Some(buffer) = Weak::upgrade(buffer) {
3976                buffer.destroy();
3977            }
3978        }
3979        for texture in trackers.textures.used_resources() {
3980            if let Some(texture) = Weak::upgrade(texture) {
3981                texture.destroy();
3982            }
3983        }
3984    }
3985
3986    pub(crate) fn new_usage_scope(&self) -> UsageScope<'_> {
3987        UsageScope::new_pooled(&self.usage_scopes, &self.tracker_indices)
3988    }
3989
3990    pub fn get_hal_counters(&self) -> wgt::HalCounters {
3991        self.raw().get_internal_counters()
3992    }
3993
3994    pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
3995        self.raw().generate_allocator_report()
3996    }
3997}
3998
3999crate::impl_resource_type!(Device);
4000crate::impl_labeled!(Device);
4001crate::impl_storage_item!(Device);