wgpu_core/lock/ranked.rs
1//! Lock types that enforce well-ranked lock acquisition order.
2//!
3//! This module's [`Mutex`] and [`RwLock` types are instrumented to check that
4//! `wgpu-core` acquires locks according to their rank, to prevent deadlocks. To
5//! use it, put `--cfg wgpu_validate_locks` in `RUSTFLAGS`.
6//!
7//! The [`LockRank`] constants in the [`lock::rank`] module describe edges in a
8//! directed graph of lock acquisitions: each lock's rank says, if this is the most
9//! recently acquired lock that you are still holding, then these are the locks you
10//! are allowed to acquire next.
11//!
12//! As long as this graph doesn't have cycles, any number of threads can acquire
13//! locks along paths through the graph without deadlock:
14//!
15//! - Assume that if a thread is holding a lock, then it will either release it,
16//! or block trying to acquire another one. No thread just sits on its locks
17//! forever for unrelated reasons. If it did, then that would be a source of
18//! deadlock "outside the system" that we can't do anything about.
19//!
20//! - This module asserts that threads acquire and release locks in a stack-like
21//! order: a lock is dropped only when it is the *most recently acquired* lock
22//! *still held* - call this the "youngest" lock. This stack-like ordering
23//! isn't a Rust requirement; Rust lets you drop guards in any order you like.
24//! This is a restriction we impose.
25//!
26//! - Consider the directed graph whose nodes are locks, and whose edges go from
27//! each lock to its permitted followers, the locks in its [`LockRank::followers`]
28//! set. The definition of the [`lock::rank`] module's [`LockRank`] constants
29//! ensures that this graph has no cycles, including trivial cycles from a node to
30//! itself.
31//!
32//! - This module then asserts that each thread attempts to acquire a lock only if
33//! it is among its youngest lock's permitted followers. Thus, as a thread
34//! acquires locks, it must be traversing a path through the graph along its
35//! edges.
36//!
37//! - Because there are no cycles in the graph, whenever one thread is blocked
38//! waiting to acquire a lock, that lock must be held by a different thread: if
39//! you were allowed to acquire a lock you already hold, that would be a cycle in
40//! the graph.
41//!
42//! - Furthermore, because the graph has no cycles, as we work our way from each
43//! thread to the thread it is blocked waiting for, we must eventually reach an
44//! end point: there must be some thread that is able to acquire its next lock, or
45//! that is about to release a lock.
46//!
47//! Thus, the system as a whole is always able to make progress: it is free of
48//! deadlocks.
49//!
50//! Note that this validation only monitors each thread's behavior in isolation:
51//! there's only thread-local state, nothing communicated between threads. So we
52//! don't detect deadlocks, per se, only the potential to cause deadlocks. This
53//! means that the validation is conservative, but more reproducible, since it's not
54//! dependent on any particular interleaving of execution.
55//!
56//! [`lock::rank`]: crate::lock::rank
57
58use core::{cell::Cell, fmt, ops, panic::Location};
59
60use super::rank::LockRank;
61
62/// A `Mutex` instrumented for deadlock prevention.
63///
64/// This is just a wrapper around a [`parking_lot::Mutex`], along with
65/// its rank in the `wgpu_core` lock ordering.
66///
67/// For details, see [the module documentation][self].
68pub struct Mutex<T> {
69 inner: parking_lot::Mutex<T>,
70 rank: LockRank,
71}
72
73/// A guard produced by locking [`Mutex`].
74///
75/// This is just a wrapper around a [`parking_lot::MutexGuard`], along
76/// with the state needed to track lock acquisition.
77///
78/// For details, see [the module documentation][self].
79pub struct MutexGuard<'a, T> {
80 inner: parking_lot::MutexGuard<'a, T>,
81 saved: LockStateGuard,
82}
83
84std::thread_local! {
85 static LOCK_STATE: Cell<LockState> = const { Cell::new(LockState::INITIAL) };
86}
87
88/// Per-thread state for the deadlock checker.
89#[derive(Debug, Copy, Clone)]
90struct LockState {
91 /// The last lock we acquired, and where.
92 last_acquired: Option<(LockRank, &'static Location<'static>)>,
93
94 /// The number of locks currently held.
95 ///
96 /// This is used to enforce stack-like lock acquisition and release.
97 depth: u32,
98}
99
100impl LockState {
101 const INITIAL: LockState = LockState {
102 last_acquired: None,
103 depth: 0,
104 };
105}
106
107/// A container that restores a [`LockState`] when dropped.
108///
109/// This type serves two purposes:
110///
111/// - Operations like `RwLockWriteGuard::downgrade` would like to be able to
112/// destructure lock guards and reassemble their pieces into new guards, but
113/// if the guard type itself implements `Drop`, we can't destructure it
114/// without unsafe code or pointless `Option`s whose state is almost always
115/// statically known.
116///
117/// - We can just implement `Drop` for this type once, and then use it in lock
118/// guards, rather than implementing `Drop` separately for each guard type.
119struct LockStateGuard(LockState);
120
121impl Drop for LockStateGuard {
122 fn drop(&mut self) {
123 release(self.0)
124 }
125}
126
127/// Check and record the acquisition of a lock with `new_rank`.
128///
129/// Check that acquiring a lock with `new_rank` is permitted at this point, and
130/// update the per-thread state accordingly.
131///
132/// Return the `LockState` that must be restored when this thread is released.
133fn acquire(new_rank: LockRank, location: &'static Location<'static>) -> LockState {
134 let state = LOCK_STATE.get();
135 // Initially, it's fine to acquire any lock. So we only
136 // need to check when `last_acquired` is `Some`.
137 if let Some((ref last_rank, ref last_location)) = state.last_acquired {
138 assert!(
139 last_rank.followers.contains(new_rank.bit),
140 "Attempt to acquire nested mutexes in wrong order:\n\
141 last locked {:<35} at {}\n\
142 now locking {:<35} at {}\n\
143 Locking {} after locking {} is not permitted.",
144 last_rank.bit.member_name(),
145 last_location,
146 new_rank.bit.member_name(),
147 location,
148 new_rank.bit.member_name(),
149 last_rank.bit.member_name(),
150 );
151 }
152 LOCK_STATE.set(LockState {
153 last_acquired: Some((new_rank, location)),
154 depth: state.depth + 1,
155 });
156 state
157}
158
159/// Record the release of a lock whose saved state was `saved`.
160///
161/// Check that locks are being acquired in stacking order, and update the
162/// per-thread state accordingly.
163fn release(saved: LockState) {
164 let prior = LOCK_STATE.replace(saved);
165
166 // Although Rust allows mutex guards to be dropped in any
167 // order, this analysis requires that locks be acquired and
168 // released in stack order: the next lock to be released must be
169 // the most recently acquired lock still held.
170 assert_eq!(
171 prior.depth,
172 saved.depth + 1,
173 "Lock not released in stacking order"
174 );
175}
176
177impl<T> Mutex<T> {
178 pub fn new(rank: LockRank, value: T) -> Mutex<T> {
179 Mutex {
180 inner: parking_lot::Mutex::new(value),
181 rank,
182 }
183 }
184
185 #[track_caller]
186 pub fn lock(&self) -> MutexGuard<T> {
187 let saved = acquire(self.rank, Location::caller());
188 MutexGuard {
189 inner: self.inner.lock(),
190 saved: LockStateGuard(saved),
191 }
192 }
193}
194
195impl<'a, T> ops::Deref for MutexGuard<'a, T> {
196 type Target = T;
197
198 fn deref(&self) -> &Self::Target {
199 self.inner.deref()
200 }
201}
202
203impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
204 fn deref_mut(&mut self) -> &mut Self::Target {
205 self.inner.deref_mut()
206 }
207}
208
209impl<T: fmt::Debug> fmt::Debug for Mutex<T> {
210 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
211 self.inner.fmt(f)
212 }
213}
214
215/// An `RwLock` instrumented for deadlock prevention.
216///
217/// This is just a wrapper around a [`parking_lot::RwLock`], along with
218/// its rank in the `wgpu_core` lock ordering.
219///
220/// For details, see [the module documentation][self].
221pub struct RwLock<T> {
222 inner: parking_lot::RwLock<T>,
223 rank: LockRank,
224}
225
226/// A read guard produced by locking [`RwLock`] for reading.
227///
228/// This is just a wrapper around a [`parking_lot::RwLockReadGuard`], along with
229/// the state needed to track lock acquisition.
230///
231/// For details, see [the module documentation][self].
232pub struct RwLockReadGuard<'a, T> {
233 inner: parking_lot::RwLockReadGuard<'a, T>,
234 saved: LockStateGuard,
235}
236
237/// A write guard produced by locking [`RwLock`] for writing.
238///
239/// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`], along
240/// with the state needed to track lock acquisition.
241///
242/// For details, see [the module documentation][self].
243pub struct RwLockWriteGuard<'a, T> {
244 inner: parking_lot::RwLockWriteGuard<'a, T>,
245 saved: LockStateGuard,
246}
247
248impl<T> RwLock<T> {
249 pub fn new(rank: LockRank, value: T) -> RwLock<T> {
250 RwLock {
251 inner: parking_lot::RwLock::new(value),
252 rank,
253 }
254 }
255
256 #[track_caller]
257 pub fn read(&self) -> RwLockReadGuard<T> {
258 let saved = acquire(self.rank, Location::caller());
259 RwLockReadGuard {
260 inner: self.inner.read(),
261 saved: LockStateGuard(saved),
262 }
263 }
264
265 #[track_caller]
266 pub fn write(&self) -> RwLockWriteGuard<T> {
267 let saved = acquire(self.rank, Location::caller());
268 RwLockWriteGuard {
269 inner: self.inner.write(),
270 saved: LockStateGuard(saved),
271 }
272 }
273}
274
275impl<'a, T> RwLockWriteGuard<'a, T> {
276 pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> {
277 RwLockReadGuard {
278 inner: parking_lot::RwLockWriteGuard::downgrade(this.inner),
279 saved: this.saved,
280 }
281 }
282}
283
284impl<T: fmt::Debug> fmt::Debug for RwLock<T> {
285 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
286 self.inner.fmt(f)
287 }
288}
289
290impl<'a, T> ops::Deref for RwLockReadGuard<'a, T> {
291 type Target = T;
292
293 fn deref(&self) -> &Self::Target {
294 self.inner.deref()
295 }
296}
297
298impl<'a, T> ops::Deref for RwLockWriteGuard<'a, T> {
299 type Target = T;
300
301 fn deref(&self) -> &Self::Target {
302 self.inner.deref()
303 }
304}
305
306impl<'a, T> ops::DerefMut for RwLockWriteGuard<'a, T> {
307 fn deref_mut(&mut self) -> &mut Self::Target {
308 self.inner.deref_mut()
309 }
310}
311
312/// Locks can be acquired in the order indicated by their ranks.
313#[test]
314fn permitted() {
315 use super::rank;
316
317 let lock1 = Mutex::new(rank::PAWN, ());
318 let lock2 = Mutex::new(rank::ROOK, ());
319
320 let _guard1 = lock1.lock();
321 let _guard2 = lock2.lock();
322}
323
324/// Locks can only be acquired in the order indicated by their ranks.
325#[test]
326#[should_panic(expected = "Locking pawn after locking rook")]
327fn forbidden_unrelated() {
328 use super::rank;
329
330 let lock1 = Mutex::new(rank::ROOK, ());
331 let lock2 = Mutex::new(rank::PAWN, ());
332
333 let _guard1 = lock1.lock();
334 let _guard2 = lock2.lock();
335}
336
337/// Lock acquisitions can't skip ranks.
338///
339/// These two locks *could* be acquired in this order, but only if other locks
340/// are acquired in between them. Skipping ranks isn't allowed.
341#[test]
342#[should_panic(expected = "Locking knight after locking pawn")]
343fn forbidden_skip() {
344 use super::rank;
345
346 let lock1 = Mutex::new(rank::PAWN, ());
347 let lock2 = Mutex::new(rank::KNIGHT, ());
348
349 let _guard1 = lock1.lock();
350 let _guard2 = lock2.lock();
351}
352
353/// Locks can be acquired and released in a stack-like order.
354#[test]
355fn stack_like() {
356 use super::rank;
357
358 let lock1 = Mutex::new(rank::PAWN, ());
359 let lock2 = Mutex::new(rank::ROOK, ());
360 let lock3 = Mutex::new(rank::BISHOP, ());
361
362 let guard1 = lock1.lock();
363 let guard2 = lock2.lock();
364 drop(guard2);
365
366 let guard3 = lock3.lock();
367 drop(guard3);
368 drop(guard1);
369}
370
371/// Locks can only be acquired and released in a stack-like order.
372#[test]
373#[should_panic(expected = "Lock not released in stacking order")]
374fn non_stack_like() {
375 use super::rank;
376
377 let lock1 = Mutex::new(rank::PAWN, ());
378 let lock2 = Mutex::new(rank::ROOK, ());
379
380 let guard1 = lock1.lock();
381 let guard2 = lock2.lock();
382
383 // Avoid a double panic from dropping this while unwinding due to the panic
384 // we're testing for.
385 core::mem::forget(guard2);
386
387 drop(guard1);
388}