wgpu_core/
snatch.rs

1use core::{cell::UnsafeCell, fmt};
2
3use crate::lock::{rank, RwLock, RwLockReadGuard, RwLockWriteGuard};
4
5/// A guard that provides read access to snatchable data.
6pub struct SnatchGuard<'a>(#[expect(dead_code)] RwLockReadGuard<'a, ()>);
7/// A guard that allows snatching the snatchable data.
8pub struct ExclusiveSnatchGuard<'a>(#[expect(dead_code)] RwLockWriteGuard<'a, ()>);
9
10/// A value that is mostly immutable but can be "snatched" if we need to destroy
11/// it early.
12///
13/// In order to safely access the underlying data, the device's global snatchable
14/// lock must be taken. To guarantee it, methods take a read or write guard of that
15/// special lock.
16pub struct Snatchable<T> {
17    value: UnsafeCell<Option<T>>,
18}
19
20impl<T> Snatchable<T> {
21    pub fn new(val: T) -> Self {
22        Snatchable {
23            value: UnsafeCell::new(Some(val)),
24        }
25    }
26
27    #[allow(dead_code)]
28    pub fn empty() -> Self {
29        Snatchable {
30            value: UnsafeCell::new(None),
31        }
32    }
33
34    /// Get read access to the value. Requires a the snatchable lock's read guard.
35    pub fn get<'a>(&'a self, _guard: &'a SnatchGuard) -> Option<&'a T> {
36        unsafe { (*self.value.get()).as_ref() }
37    }
38
39    /// Take the value. Requires a the snatchable lock's write guard.
40    pub fn snatch(&self, _guard: &mut ExclusiveSnatchGuard) -> Option<T> {
41        unsafe { (*self.value.get()).take() }
42    }
43
44    /// Take the value without a guard. This can only be used with exclusive access
45    /// to self, so it does not require locking.
46    ///
47    /// Typically useful in a drop implementation.
48    pub fn take(&mut self) -> Option<T> {
49        self.value.get_mut().take()
50    }
51}
52
53// Can't safely print the contents of a snatchable object without holding
54// the lock.
55impl<T> fmt::Debug for Snatchable<T> {
56    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
57        write!(f, "<snatchable>")
58    }
59}
60
61unsafe impl<T> Sync for Snatchable<T> {}
62
63use trace::LockTrace;
64#[cfg(all(debug_assertions, feature = "std"))]
65mod trace {
66    use core::{cell::Cell, fmt, panic::Location};
67    use std::{backtrace::Backtrace, thread};
68
69    pub(super) struct LockTrace {
70        purpose: &'static str,
71        caller: &'static Location<'static>,
72        backtrace: Backtrace,
73    }
74
75    impl fmt::Display for LockTrace {
76        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
77            write!(
78                f,
79                "a {} lock at {}\n{}",
80                self.purpose, self.caller, self.backtrace
81            )
82        }
83    }
84
85    impl LockTrace {
86        #[track_caller]
87        pub(super) fn enter(purpose: &'static str) {
88            let new = LockTrace {
89                purpose,
90                caller: Location::caller(),
91                backtrace: Backtrace::capture(),
92            };
93
94            if let Some(prev) = SNATCH_LOCK_TRACE.take() {
95                let current = thread::current();
96                let name = current.name().unwrap_or("<unnamed>");
97                panic!(
98                    "thread '{name}' attempted to acquire a snatch lock recursively.\n\
99                 - Currently trying to acquire {new}\n\
100                 - Previously acquired {prev}",
101                );
102            } else {
103                SNATCH_LOCK_TRACE.set(Some(new));
104            }
105        }
106
107        pub(super) fn exit() {
108            SNATCH_LOCK_TRACE.take();
109        }
110    }
111
112    std::thread_local! {
113        static SNATCH_LOCK_TRACE: Cell<Option<LockTrace>> = const { Cell::new(None) };
114    }
115}
116#[cfg(not(all(debug_assertions, feature = "std")))]
117mod trace {
118    pub(super) struct LockTrace {
119        _private: (),
120    }
121
122    impl LockTrace {
123        pub(super) fn enter(_purpose: &'static str) {}
124        pub(super) fn exit() {}
125    }
126}
127
128/// A Device-global lock for all snatchable data.
129pub struct SnatchLock {
130    lock: RwLock<()>,
131}
132
133impl SnatchLock {
134    /// The safety of `Snatchable::get` and `Snatchable::snatch` rely on their using of the
135    /// right SnatchLock (the one associated to the same device). This method is unsafe
136    /// to force force sers to think twice about creating a SnatchLock. The only place this
137    /// method should be called is when creating the device.
138    pub unsafe fn new(rank: rank::LockRank) -> Self {
139        SnatchLock {
140            lock: RwLock::new(rank, ()),
141        }
142    }
143
144    /// Request read access to snatchable resources.
145    #[track_caller]
146    pub fn read(&self) -> SnatchGuard {
147        LockTrace::enter("read");
148        SnatchGuard(self.lock.read())
149    }
150
151    /// Request write access to snatchable resources.
152    ///
153    /// This should only be called when a resource needs to be snatched. This has
154    /// a high risk of causing lock contention if called concurrently with other
155    /// wgpu work.
156    #[track_caller]
157    pub fn write(&self) -> ExclusiveSnatchGuard {
158        LockTrace::enter("write");
159        ExclusiveSnatchGuard(self.lock.write())
160    }
161}
162
163impl Drop for SnatchGuard<'_> {
164    fn drop(&mut self) {
165        LockTrace::exit();
166    }
167}
168
169impl Drop for ExclusiveSnatchGuard<'_> {
170    fn drop(&mut self) {
171        LockTrace::exit();
172    }
173}