epaint/
mutex.rs

1//! Helper module that adds extra checks when the `deadlock_detection` feature is turned on.
2
3// ----------------------------------------------------------------------------
4
5#[cfg(not(feature = "deadlock_detection"))]
6const DEADLOCK_DURATION: std::time::Duration = std::time::Duration::from_secs(30);
7
8#[cfg(not(feature = "deadlock_detection"))]
9mod mutex_impl {
10    use super::DEADLOCK_DURATION;
11
12    /// Provides interior mutability.
13    ///
14    /// This is a thin wrapper around [`parking_lot::Mutex`], except if
15    /// the feature `deadlock_detection` is turned enabled, in which case
16    /// extra checks are added to detect deadlocks.
17    #[derive(Default)]
18    pub struct Mutex<T>(parking_lot::Mutex<T>);
19
20    /// The lock you get from [`Mutex`].
21    pub use parking_lot::MutexGuard;
22
23    impl<T> Mutex<T> {
24        #[inline(always)]
25        pub fn new(val: T) -> Self {
26            Self(parking_lot::Mutex::new(val))
27        }
28
29        #[inline(always)]
30        #[cfg_attr(debug_assertions, track_caller)]
31        pub fn lock(&self) -> MutexGuard<'_, T> {
32            if cfg!(debug_assertions) {
33                self.0.try_lock_for(DEADLOCK_DURATION).unwrap_or_else(|| {
34                    panic!(
35                        "DEBUG PANIC: Failed to acquire Mutex after {}s. Deadlock?",
36                        DEADLOCK_DURATION.as_secs()
37                    )
38                })
39            } else {
40                self.0.lock()
41            }
42        }
43    }
44}
45
46#[cfg(feature = "deadlock_detection")]
47mod mutex_impl {
48    /// Provides interior mutability.
49    ///
50    /// This is a thin wrapper around [`parking_lot::Mutex`], except if
51    /// the feature `deadlock_detection` is turned enabled, in which case
52    /// extra checks are added to detect deadlocks.
53    #[derive(Default)]
54    pub struct Mutex<T>(parking_lot::Mutex<T>);
55
56    /// The lock you get from [`Mutex`].
57    pub struct MutexGuard<'a, T>(parking_lot::MutexGuard<'a, T>, *const ());
58
59    #[derive(Default)]
60    struct HeldLocks(Vec<*const ()>);
61
62    impl HeldLocks {
63        #[inline(always)]
64        fn insert(&mut self, lock: *const ()) {
65            // Very few locks will ever be held at the same time, so a linear search is fast
66            assert!(
67                !self.0.contains(&lock),
68                "Recursively locking a Mutex in the same thread is not supported"
69            );
70            self.0.push(lock);
71        }
72
73        #[inline(always)]
74        fn remove(&mut self, lock: *const ()) {
75            self.0.retain(|&ptr| ptr != lock);
76        }
77    }
78
79    thread_local! {
80        static HELD_LOCKS_TLS: std::cell::RefCell<HeldLocks> = Default::default();
81    }
82
83    impl<T> Mutex<T> {
84        #[inline(always)]
85        pub fn new(val: T) -> Self {
86            Self(parking_lot::Mutex::new(val))
87        }
88
89        pub fn lock(&self) -> MutexGuard<'_, T> {
90            // Detect if we are recursively taking out a lock on this mutex.
91
92            // use a pointer to the inner data as an id for this lock
93            let ptr = std::ptr::from_ref::<parking_lot::Mutex<_>>(&self.0).cast::<()>();
94
95            // Store it in thread local storage while we have a lock guard taken out
96            HELD_LOCKS_TLS.with(|held_locks| {
97                held_locks.borrow_mut().insert(ptr);
98            });
99
100            MutexGuard(self.0.lock(), ptr)
101        }
102
103        #[inline(always)]
104        pub fn into_inner(self) -> T {
105            self.0.into_inner()
106        }
107    }
108
109    impl<T> Drop for MutexGuard<'_, T> {
110        fn drop(&mut self) {
111            let ptr = self.1;
112            HELD_LOCKS_TLS.with(|held_locks| {
113                held_locks.borrow_mut().remove(ptr);
114            });
115        }
116    }
117
118    impl<T> std::ops::Deref for MutexGuard<'_, T> {
119        type Target = T;
120
121        #[inline(always)]
122        fn deref(&self) -> &Self::Target {
123            &self.0
124        }
125    }
126
127    impl<T> std::ops::DerefMut for MutexGuard<'_, T> {
128        #[inline(always)]
129        fn deref_mut(&mut self) -> &mut Self::Target {
130            &mut self.0
131        }
132    }
133}
134
135// ----------------------------------------------------------------------------
136
137#[cfg(not(feature = "deadlock_detection"))]
138mod rw_lock_impl {
139    use super::DEADLOCK_DURATION;
140
141    /// The lock you get from [`RwLock::read`].
142    pub use parking_lot::MappedRwLockReadGuard as RwLockReadGuard;
143
144    /// The lock you get from [`RwLock::write`].
145    pub use parking_lot::MappedRwLockWriteGuard as RwLockWriteGuard;
146
147    /// Provides interior mutability.
148    ///
149    /// This is a thin wrapper around [`parking_lot::RwLock`], except if
150    /// the feature `deadlock_detection` is turned enabled, in which case
151    /// extra checks are added to detect deadlocks.
152    #[derive(Default)]
153    pub struct RwLock<T: ?Sized>(parking_lot::RwLock<T>);
154
155    impl<T> RwLock<T> {
156        #[inline(always)]
157        pub fn new(val: T) -> Self {
158            Self(parking_lot::RwLock::new(val))
159        }
160    }
161
162    impl<T: ?Sized> RwLock<T> {
163        #[inline(always)]
164        #[cfg_attr(debug_assertions, track_caller)]
165        pub fn read(&self) -> RwLockReadGuard<'_, T> {
166            let guard = if cfg!(debug_assertions) {
167                self.0.try_read_for(DEADLOCK_DURATION).unwrap_or_else(|| {
168                    panic!(
169                        "DEBUG PANIC: Failed to acquire RwLock read after {}s. Deadlock?",
170                        DEADLOCK_DURATION.as_secs()
171                    )
172                })
173            } else {
174                self.0.read()
175            };
176            parking_lot::RwLockReadGuard::map(guard, |v| v)
177        }
178
179        #[inline(always)]
180        #[cfg_attr(debug_assertions, track_caller)]
181        pub fn write(&self) -> RwLockWriteGuard<'_, T> {
182            let guard = if cfg!(debug_assertions) {
183                self.0.try_write_for(DEADLOCK_DURATION).unwrap_or_else(|| {
184                    panic!(
185                        "DEBUG PANIC: Failed to acquire RwLock write after {}s. Deadlock?",
186                        DEADLOCK_DURATION.as_secs()
187                    )
188                })
189            } else {
190                self.0.write()
191            };
192            parking_lot::RwLockWriteGuard::map(guard, |v| v)
193        }
194    }
195}
196
197#[cfg(feature = "deadlock_detection")]
198mod rw_lock_impl {
199    use std::{
200        ops::{Deref, DerefMut},
201        sync::Arc,
202        thread::ThreadId,
203    };
204
205    use ahash::HashMap;
206    use parking_lot::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
207
208    /// The lock you get from [`RwLock::read`].
209    pub struct RwLockReadGuard<'a, T> {
210        // The option is used only because we need to `take()` the guard out of self
211        // when doing remappings (`map()`), i.e. it's used as a safe `ManuallyDrop`.
212        guard: Option<MappedRwLockReadGuard<'a, T>>,
213        holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
214    }
215
216    impl<'a, T> RwLockReadGuard<'a, T> {
217        #[inline]
218        pub fn map<U, F>(mut s: Self, f: F) -> RwLockReadGuard<'a, U>
219        where
220            F: FnOnce(&T) -> &U,
221        {
222            RwLockReadGuard {
223                guard: s
224                    .guard
225                    .take()
226                    .map(|g| parking_lot::MappedRwLockReadGuard::map(g, f)),
227                holders: Arc::clone(&s.holders),
228            }
229        }
230    }
231
232    impl<T> Deref for RwLockReadGuard<'_, T> {
233        type Target = T;
234
235        fn deref(&self) -> &Self::Target {
236            self.guard.as_ref().unwrap()
237        }
238    }
239
240    impl<T> Drop for RwLockReadGuard<'_, T> {
241        fn drop(&mut self) {
242            let tid = std::thread::current().id();
243            self.holders.lock().remove(&tid);
244        }
245    }
246
247    /// The lock you get from [`RwLock::write`].
248    pub struct RwLockWriteGuard<'a, T> {
249        // The option is used only because we need to `take()` the guard out of self
250        // when doing remappings (`map()`), i.e. it's used as a safe `ManuallyDrop`.
251        guard: Option<MappedRwLockWriteGuard<'a, T>>,
252        holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
253    }
254
255    impl<'a, T> RwLockWriteGuard<'a, T> {
256        #[inline]
257        pub fn map<U, F>(mut s: Self, f: F) -> RwLockWriteGuard<'a, U>
258        where
259            F: FnOnce(&mut T) -> &mut U,
260        {
261            RwLockWriteGuard {
262                guard: s
263                    .guard
264                    .take()
265                    .map(|g| parking_lot::MappedRwLockWriteGuard::map(g, f)),
266                holders: Arc::clone(&s.holders),
267            }
268        }
269    }
270
271    impl<T> Deref for RwLockWriteGuard<'_, T> {
272        type Target = T;
273
274        fn deref(&self) -> &Self::Target {
275            self.guard.as_ref().unwrap()
276        }
277    }
278
279    impl<T> DerefMut for RwLockWriteGuard<'_, T> {
280        fn deref_mut(&mut self) -> &mut Self::Target {
281            self.guard.as_mut().unwrap()
282        }
283    }
284
285    impl<T> Drop for RwLockWriteGuard<'_, T> {
286        fn drop(&mut self) {
287            let tid = std::thread::current().id();
288            self.holders.lock().remove(&tid);
289        }
290    }
291
292    /// Provides interior mutability.
293    ///
294    /// This is a thin wrapper around [`parking_lot::RwLock`], except if
295    /// the feature `deadlock_detection` is turned enabled, in which case
296    /// extra checks are added to detect deadlocks.
297    #[derive(Default)]
298    pub struct RwLock<T> {
299        lock: parking_lot::RwLock<T>,
300        // Technically we'd need a list of backtraces per thread-id since parking_lot's
301        // read-locks are reentrant.
302        // In practice it's not that useful to have the whole list though, so we only
303        // keep track of the first backtrace for now.
304        holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
305    }
306
307    impl<T> RwLock<T> {
308        pub fn new(val: T) -> Self {
309            Self {
310                lock: parking_lot::RwLock::new(val),
311                holders: Default::default(),
312            }
313        }
314
315        pub fn read(&self) -> RwLockReadGuard<'_, T> {
316            let tid = std::thread::current().id();
317
318            // If it is write-locked, and we locked it (reentrancy deadlock)
319            let would_deadlock =
320                self.lock.is_locked_exclusive() && self.holders.lock().contains_key(&tid);
321            assert!(
322                !would_deadlock,
323                "{} DEAD-LOCK DETECTED ({:?})!\n\
324                    Trying to grab read-lock at:\n{}\n\
325                    which is already exclusively held by current thread at:\n{}\n\n",
326                std::any::type_name::<Self>(),
327                tid,
328                format_backtrace(&mut make_backtrace()),
329                format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
330            );
331
332            self.holders
333                .lock()
334                .entry(tid)
335                .or_insert_with(make_backtrace);
336
337            RwLockReadGuard {
338                guard: parking_lot::RwLockReadGuard::map(self.lock.read(), |v| v).into(),
339                holders: Arc::clone(&self.holders),
340            }
341        }
342
343        pub fn write(&self) -> RwLockWriteGuard<'_, T> {
344            let tid = std::thread::current().id();
345
346            // If it is locked in any way, and we locked it (reentrancy deadlock)
347            let would_deadlock = self.lock.is_locked() && self.holders.lock().contains_key(&tid);
348            assert!(
349                !would_deadlock,
350                "{} DEAD-LOCK DETECTED ({:?})!\n\
351                    Trying to grab write-lock at:\n{}\n\
352                    which is already held by current thread at:\n{}\n\n",
353                std::any::type_name::<Self>(),
354                tid,
355                format_backtrace(&mut make_backtrace()),
356                format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
357            );
358
359            self.holders
360                .lock()
361                .entry(tid)
362                .or_insert_with(make_backtrace);
363
364            RwLockWriteGuard {
365                guard: parking_lot::RwLockWriteGuard::map(self.lock.write(), |v| v).into(),
366                holders: Arc::clone(&self.holders),
367            }
368        }
369
370        #[inline(always)]
371        pub fn into_inner(self) -> T {
372            self.lock.into_inner()
373        }
374    }
375
376    fn make_backtrace() -> backtrace::Backtrace {
377        backtrace::Backtrace::new_unresolved()
378    }
379
380    fn format_backtrace(backtrace: &mut backtrace::Backtrace) -> String {
381        backtrace.resolve();
382
383        let stacktrace = format!("{backtrace:?}");
384
385        // Remove irrelevant parts of the stacktrace:
386        let end_offset = stacktrace
387            .find("std::sys_common::backtrace::__rust_begin_short_backtrace")
388            .unwrap_or(stacktrace.len());
389        let stacktrace = &stacktrace[..end_offset];
390
391        let first_interesting_function = "epaint::mutex::rw_lock_impl::make_backtrace\n";
392        if let Some(start_offset) = stacktrace.find(first_interesting_function) {
393            stacktrace[start_offset + first_interesting_function.len()..].to_owned()
394        } else {
395            stacktrace.to_owned()
396        }
397    }
398}
399
400// ----------------------------------------------------------------------------
401
402pub use mutex_impl::{Mutex, MutexGuard};
403pub use rw_lock_impl::{RwLock, RwLockReadGuard, RwLockWriteGuard};
404
405impl<T> Clone for Mutex<T>
406where
407    T: Clone,
408{
409    fn clone(&self) -> Self {
410        Self::new(self.lock().clone())
411    }
412}
413
414// ----------------------------------------------------------------------------
415
416#[cfg(test)]
417mod tests {
418    #![allow(clippy::disallowed_methods)] // Ok for tests
419
420    use crate::mutex::Mutex;
421    use std::time::Duration;
422
423    #[test]
424    fn lock_two_different_mutexes_single_thread() {
425        let one = Mutex::new(());
426        let two = Mutex::new(());
427        let _a = one.lock();
428        let _b = two.lock();
429    }
430
431    #[test]
432    fn lock_multiple_threads() {
433        use std::sync::Arc;
434        let one = Arc::new(Mutex::new(()));
435        let our_lock = one.lock();
436        let other_thread = {
437            let one = Arc::clone(&one);
438            std::thread::spawn(move || {
439                let _lock = one.lock();
440            })
441        };
442        std::thread::sleep(Duration::from_millis(200));
443        drop(our_lock);
444        other_thread.join().unwrap();
445    }
446}
447
448#[cfg(not(target_arch = "wasm32"))]
449#[cfg(feature = "deadlock_detection")]
450#[cfg(test)]
451mod tests_rwlock {
452    #![allow(clippy::disallowed_methods)] // Ok for tests
453
454    use crate::mutex::RwLock;
455    use std::time::Duration;
456
457    #[test]
458    fn lock_two_different_rwlocks_single_thread() {
459        let one = RwLock::new(());
460        let two = RwLock::new(());
461        let _a = one.write();
462        let _b = two.write();
463    }
464
465    #[test]
466    fn rwlock_multiple_threads() {
467        use std::sync::Arc;
468        let one = Arc::new(RwLock::new(()));
469        let our_lock = one.write();
470        let other_thread1 = {
471            let one = Arc::clone(&one);
472            std::thread::spawn(move || {
473                let _ = one.write();
474            })
475        };
476        let other_thread2 = {
477            let one = Arc::clone(&one);
478            std::thread::spawn(move || {
479                let _ = one.read();
480            })
481        };
482        std::thread::sleep(Duration::from_millis(200));
483        drop(our_lock);
484        other_thread1.join().unwrap();
485        other_thread2.join().unwrap();
486    }
487
488    #[test]
489    #[should_panic]
490    fn rwlock_write_write_reentrancy() {
491        let one = RwLock::new(());
492        let _a1 = one.write();
493        let _a2 = one.write(); // panics
494    }
495
496    #[test]
497    #[should_panic]
498    fn rwlock_write_read_reentrancy() {
499        let one = RwLock::new(());
500        let _a1 = one.write();
501        let _a2 = one.read(); // panics
502    }
503
504    #[test]
505    #[should_panic]
506    fn rwlock_read_write_reentrancy() {
507        let one = RwLock::new(());
508        let _a1 = one.read();
509        let _a2 = one.write(); // panics
510    }
511
512    #[test]
513    fn rwlock_read_read_reentrancy() {
514        let one = RwLock::new(());
515        let _a1 = one.read();
516        // This is legal: this test suite specifically targets native, which relies
517        // on parking_lot's rw-locks, which are reentrant.
518        let _a2 = one.read();
519    }
520
521    #[test]
522    fn rwlock_short_read_foreign_read_write_reentrancy() {
523        use std::sync::Arc;
524
525        let lock = Arc::new(RwLock::new(()));
526
527        // Thread #0 grabs a read lock
528        let t0r0 = lock.read();
529
530        // Thread #1 grabs the same read lock
531        let other_thread = {
532            let lock = Arc::clone(&lock);
533            std::thread::spawn(move || {
534                let _t1r0 = lock.read();
535            })
536        };
537        other_thread.join().unwrap();
538
539        // Thread #0 releases its read lock
540        drop(t0r0);
541
542        // Thread #0 now grabs a write lock, which is legal
543        let _t0w0 = lock.write();
544    }
545
546    #[test]
547    #[should_panic]
548    fn rwlock_read_foreign_read_write_reentrancy() {
549        use std::sync::Arc;
550
551        let lock = Arc::new(RwLock::new(()));
552
553        // Thread #0 grabs a read lock
554        let _t0r0 = lock.read();
555
556        // Thread #1 grabs the same read lock
557        let other_thread = {
558            let lock = Arc::clone(&lock);
559            std::thread::spawn(move || {
560                let _t1r0 = lock.read();
561            })
562        };
563        other_thread.join().unwrap();
564
565        // Thread #0 now grabs a write lock, which should panic (read-write)
566        let _t0w0 = lock.write(); // panics
567    }
568}