style/rule_tree/
core.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5#![allow(unsafe_code)]
6
7use crate::applicable_declarations::CascadePriority;
8use crate::shared_lock::StylesheetGuards;
9use crate::stylesheets::layer_rule::LayerOrder;
10use malloc_size_of::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps};
11use parking_lot::RwLock;
12use smallvec::SmallVec;
13use std::fmt;
14use std::hash;
15use std::io::Write;
16use std::mem;
17use std::ptr;
18use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
19
20use super::map::{Entry, Map};
21use super::unsafe_box::UnsafeBox;
22use super::{CascadeLevel, CascadeOrigin, RuleCascadeFlags, StyleSource};
23
24/// The rule tree, the structure servo uses to preserve the results of selector
25/// matching.
26///
27/// This is organized as a tree of rules. When a node matches a set of rules,
28/// they're inserted in order in the tree, starting with the less specific one.
29///
30/// When a rule is inserted in the tree, other elements may share the path up to
31/// a given rule. If that's the case, we don't duplicate child nodes, but share
32/// them.
33///
34/// When the rule node refcount drops to zero, it doesn't get freed. It gets
35/// instead put into a free list, and it is potentially GC'd after a while.
36///
37/// That way, a rule node that represents a likely-to-match-again rule (like a
38/// :hover rule) can be reused if we haven't GC'd it yet.
39#[derive(Debug)]
40pub struct RuleTree {
41    root: StrongRuleNode,
42}
43
44impl Drop for RuleTree {
45    fn drop(&mut self) {
46        unsafe { self.swap_free_list_and_gc(ptr::null_mut()) }
47    }
48}
49
50impl MallocSizeOf for RuleTree {
51    fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
52        let mut n = 0;
53        let mut stack = SmallVec::<[_; 32]>::new();
54        stack.push(self.root.clone());
55
56        while let Some(node) = stack.pop() {
57            n += unsafe { ops.malloc_size_of(&*node.p) };
58            let children = node.p.children.read();
59            children.shallow_size_of(ops);
60            for c in &*children {
61                stack.push(unsafe { c.upgrade() });
62            }
63        }
64
65        n
66    }
67}
68
69#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
70struct ChildKey(CascadePriority, ptr::NonNull<()>);
71unsafe impl Send for ChildKey {}
72unsafe impl Sync for ChildKey {}
73
74impl RuleTree {
75    /// Construct a new rule tree.
76    pub fn new() -> Self {
77        RuleTree {
78            root: StrongRuleNode::new(Box::new(RuleNode::root())),
79        }
80    }
81
82    /// Get the root rule node.
83    pub fn root(&self) -> &StrongRuleNode {
84        &self.root
85    }
86
87    /// This can only be called when no other threads is accessing this tree.
88    pub fn gc(&self) {
89        unsafe { self.swap_free_list_and_gc(RuleNode::DANGLING_PTR) }
90    }
91
92    /// This can only be called when no other threads is accessing this tree.
93    pub fn maybe_gc(&self) {
94        #[cfg(debug_assertions)]
95        self.maybe_dump_stats();
96
97        if self.root.p.approximate_free_count.load(Ordering::Relaxed) > RULE_TREE_GC_INTERVAL {
98            self.gc();
99        }
100    }
101
102    #[cfg(debug_assertions)]
103    fn maybe_dump_stats(&self) {
104        use itertools::Itertools;
105        use std::cell::Cell;
106        use std::time::{Duration, Instant};
107
108        if !log_enabled!(log::Level::Trace) {
109            return;
110        }
111
112        const RULE_TREE_STATS_INTERVAL: Duration = Duration::from_secs(2);
113
114        thread_local! {
115            pub static LAST_STATS: Cell<Instant> = Cell::new(Instant::now());
116        };
117
118        let should_dump = LAST_STATS.with(|s| {
119            let now = Instant::now();
120            if now.duration_since(s.get()) < RULE_TREE_STATS_INTERVAL {
121                return false;
122            }
123            s.set(now);
124            true
125        });
126
127        if !should_dump {
128            return;
129        }
130
131        let mut children_count = rustc_hash::FxHashMap::default();
132
133        let mut stack = SmallVec::<[_; 32]>::new();
134        stack.push(self.root.clone());
135        while let Some(node) = stack.pop() {
136            let children = node.p.children.read();
137            *children_count.entry(children.len()).or_insert(0) += 1;
138            for c in &*children {
139                stack.push(unsafe { c.upgrade() });
140            }
141        }
142
143        trace!("Rule tree stats:");
144        let counts = children_count.keys().sorted();
145        for count in counts {
146            trace!(" {} - {}", count, children_count[count]);
147        }
148    }
149
150    /// Steals the free list and drops its contents.
151    unsafe fn swap_free_list_and_gc(&self, ptr: *mut RuleNode) {
152        let root = &self.root.p;
153
154        debug_assert!(!root.next_free.load(Ordering::Relaxed).is_null());
155
156        // Reset the approximate free count to zero, as we are going to steal
157        // the free list.
158        root.approximate_free_count.store(0, Ordering::Relaxed);
159
160        // Steal the free list head. Memory loads on nodes while iterating it
161        // must observe any prior changes that occured so this requires
162        // acquire ordering, but there are no writes that need to be kept
163        // before this swap so there is no need for release.
164        let mut head = root.next_free.swap(ptr, Ordering::Acquire);
165
166        while head != RuleNode::DANGLING_PTR {
167            debug_assert!(!head.is_null());
168
169            let mut node = UnsafeBox::from_raw(head);
170
171            // The root node cannot go on the free list.
172            debug_assert!(node.root.is_some());
173
174            // The refcount of nodes on the free list never goes below 1.
175            debug_assert!(node.refcount.load(Ordering::Relaxed) > 0);
176
177            // No one else is currently writing to that field. Get the address
178            // of the next node in the free list and replace it with null,
179            // other threads will now consider that this node is not on the
180            // free list.
181            head = node.next_free.swap(ptr::null_mut(), Ordering::Relaxed);
182
183            // This release write synchronises with the acquire fence in
184            // `WeakRuleNode::upgrade`, making sure that if `upgrade` observes
185            // decrements the refcount to 0, it will also observe the
186            // `node.next_free` swap to null above.
187            if node.refcount.fetch_sub(1, Ordering::Release) == 1 {
188                // And given it observed the null swap above, it will need
189                // `pretend_to_be_on_free_list` to finish its job, writing
190                // `RuleNode::DANGLING_PTR` in `node.next_free`.
191                RuleNode::pretend_to_be_on_free_list(&node);
192
193                // Drop this node now that we just observed its refcount going
194                // down to zero.
195                RuleNode::drop_without_free_list(&mut node);
196            }
197        }
198    }
199}
200
201/// The number of RuleNodes added to the free list before we will consider
202/// doing a GC when calling maybe_gc().  (The value is copied from Gecko,
203/// where it likely did not result from a rigorous performance analysis.)
204const RULE_TREE_GC_INTERVAL: usize = 300;
205
206/// A node in the rule tree.
207struct RuleNode {
208    /// The root node. Only the root has no root pointer, for obvious reasons.
209    root: Option<WeakRuleNode>,
210
211    /// The parent rule node. Only the root has no parent.
212    parent: Option<StrongRuleNode>,
213
214    /// The actual style source, either coming from a selector in a StyleRule,
215    /// or a raw property declaration block (like the style attribute).
216    ///
217    /// None for the root node.
218    source: Option<StyleSource>,
219
220    /// The cascade level + layer order this rule is positioned at.
221    cascade_priority: CascadePriority,
222
223    /// The refcount of this node.
224    ///
225    /// Starts at one. Incremented in `StrongRuleNode::clone` and
226    /// `WeakRuleNode::upgrade`. Decremented in `StrongRuleNode::drop`
227    /// and `RuleTree::swap_free_list_and_gc`.
228    ///
229    /// If a non-root node's refcount reaches zero, it is incremented back to at
230    /// least one in `RuleNode::pretend_to_be_on_free_list` until the caller who
231    /// observed it dropping to zero had a chance to try to remove it from its
232    /// parent's children list.
233    ///
234    /// The refcount should never be decremented to zero if the value in
235    /// `next_free` is not null.
236    refcount: AtomicUsize,
237
238    /// Only used for the root, stores the number of free rule nodes that are
239    /// around.
240    approximate_free_count: AtomicUsize,
241
242    /// The children of a given rule node. Children remove themselves from here
243    /// when they go away.
244    children: RwLock<Map<ChildKey, WeakRuleNode>>,
245
246    /// This field has two different meanings depending on whether this is the
247    /// root node or not.
248    ///
249    /// If it is the root, it represents the head of the free list. It may be
250    /// null, which means the free list is gone because the tree was dropped,
251    /// and it may be `RuleNode::DANGLING_PTR`, which means the free list is
252    /// empty.
253    ///
254    /// If it is not the root node, this field is either null if the node is
255    /// not on the free list, `RuleNode::DANGLING_PTR` if it is the last item
256    /// on the free list or the node is pretending to be on the free list, or
257    /// any valid non-null pointer representing the next item on the free list
258    /// after this one.
259    ///
260    /// See `RuleNode::push_on_free_list`, `swap_free_list_and_gc`, and
261    /// `WeakRuleNode::upgrade`.
262    ///
263    /// Two threads should never attempt to put the same node on the free list
264    /// both at the same time.
265    next_free: AtomicPtr<RuleNode>,
266}
267
268// On Gecko builds, hook into the leak checking machinery.
269#[cfg(feature = "gecko_refcount_logging")]
270mod gecko_leak_checking {
271    use super::RuleNode;
272    use std::mem::size_of;
273    use std::os::raw::{c_char, c_void};
274
275    extern "C" {
276        fn NS_LogCtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32);
277        fn NS_LogDtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32);
278    }
279    static NAME: &'static [u8] = b"RuleNode\0";
280
281    /// Logs the creation of a heap-allocated object to Gecko's leak-checking machinery.
282    pub(super) fn log_ctor(ptr: *const RuleNode) {
283        let s = NAME as *const [u8] as *const u8 as *const c_char;
284        unsafe {
285            NS_LogCtor(ptr as *mut c_void, s, size_of::<RuleNode>() as u32);
286        }
287    }
288
289    /// Logs the destruction of a heap-allocated object to Gecko's leak-checking machinery.
290    pub(super) fn log_dtor(ptr: *const RuleNode) {
291        let s = NAME as *const [u8] as *const u8 as *const c_char;
292        unsafe {
293            NS_LogDtor(ptr as *mut c_void, s, size_of::<RuleNode>() as u32);
294        }
295    }
296}
297
298#[inline(always)]
299fn log_new(_ptr: *const RuleNode) {
300    #[cfg(feature = "gecko_refcount_logging")]
301    gecko_leak_checking::log_ctor(_ptr);
302}
303
304#[inline(always)]
305fn log_drop(_ptr: *const RuleNode) {
306    #[cfg(feature = "gecko_refcount_logging")]
307    gecko_leak_checking::log_dtor(_ptr);
308}
309
310impl RuleNode {
311    const DANGLING_PTR: *mut Self = ptr::NonNull::dangling().as_ptr();
312
313    unsafe fn new(
314        root: WeakRuleNode,
315        parent: StrongRuleNode,
316        source: StyleSource,
317        cascade_priority: CascadePriority,
318    ) -> Self {
319        debug_assert!(root.p.parent.is_none());
320        RuleNode {
321            root: Some(root),
322            parent: Some(parent),
323            source: Some(source),
324            cascade_priority,
325            refcount: AtomicUsize::new(1),
326            children: Default::default(),
327            approximate_free_count: AtomicUsize::new(0),
328            next_free: AtomicPtr::new(ptr::null_mut()),
329        }
330    }
331
332    fn root() -> Self {
333        RuleNode {
334            root: None,
335            parent: None,
336            source: None,
337            cascade_priority: CascadePriority::new(
338                CascadeLevel::new(CascadeOrigin::UA),
339                LayerOrder::root(),
340                RuleCascadeFlags::empty(),
341            ),
342            refcount: AtomicUsize::new(1),
343            approximate_free_count: AtomicUsize::new(0),
344            children: Default::default(),
345            next_free: AtomicPtr::new(RuleNode::DANGLING_PTR),
346        }
347    }
348
349    fn key(&self) -> ChildKey {
350        ChildKey(
351            self.cascade_priority,
352            self.source
353                .as_ref()
354                .expect("Called key() on the root node")
355                .key(),
356        )
357    }
358
359    /// Drops a node without ever putting it on the free list.
360    ///
361    /// Note that the node may not be dropped if we observe that its refcount
362    /// isn't zero anymore when we write-lock its parent's children map to
363    /// remove it.
364    ///
365    /// This loops over parents of dropped nodes if their own refcount reaches
366    /// zero to avoid recursion when dropping deep hierarchies of nodes.
367    ///
368    /// For non-root nodes, this should always be preceded by a call of
369    /// `RuleNode::pretend_to_be_on_free_list`.
370    unsafe fn drop_without_free_list(this: &mut UnsafeBox<Self>) {
371        // We clone the box and shadow the original one to be able to loop
372        // over its ancestors if they also need to be dropped.
373        let mut this = UnsafeBox::clone(this);
374        loop {
375            // If the node has a parent, we need to remove it from its parent's
376            // children list.
377            if let Some(parent) = this.parent.as_ref() {
378                debug_assert!(!this.next_free.load(Ordering::Relaxed).is_null());
379
380                // We lock the parent's children list, which means no other
381                // thread will have any more opportunity to resurrect the node
382                // anymore.
383                let mut children = parent.p.children.write();
384
385                this.next_free.store(ptr::null_mut(), Ordering::Relaxed);
386
387                // We decrement the counter to remove the "pretend to be
388                // on the free list" reference.
389                let old_refcount = this.refcount.fetch_sub(1, Ordering::Release);
390                debug_assert!(old_refcount != 0);
391                if old_refcount != 1 {
392                    // Other threads resurrected this node and those references
393                    // are still alive, we have nothing to do anymore.
394                    return;
395                }
396
397                // We finally remove the node from its parent's children list,
398                // there are now no other references to it and it cannot
399                // be resurrected anymore even after we unlock the list.
400                debug!(
401                    "Remove from child list: {:?}, parent: {:?}",
402                    this.as_mut_ptr(),
403                    this.parent.as_ref().map(|p| p.p.as_mut_ptr())
404                );
405                let weak = children.remove(&this.key(), |node| node.p.key()).unwrap();
406                assert_eq!(weak.p.as_mut_ptr(), this.as_mut_ptr());
407            } else {
408                debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
409                debug_assert_eq!(this.refcount.load(Ordering::Relaxed), 0);
410            }
411
412            // We are going to drop this node for good this time, as per the
413            // usual refcounting protocol we need an acquire fence here before
414            // we run the destructor.
415            //
416            // See https://github.com/rust-lang/rust/pull/41714#issuecomment-298996916
417            // for why it doesn't matter whether this is a load or a fence.
418            atomic::fence(Ordering::Acquire);
419
420            // Remove the parent reference from the child to avoid
421            // recursively dropping it and putting it on the free list.
422            let parent = UnsafeBox::deref_mut(&mut this).parent.take();
423
424            // We now drop the actual box and its contents, no one should
425            // access the current value in `this` anymore.
426            log_drop(&*this);
427            UnsafeBox::drop(&mut this);
428
429            if let Some(parent) = parent {
430                // We will attempt to drop the node's parent without the free
431                // list, so we clone the inner unsafe box and forget the
432                // original parent to avoid running its `StrongRuleNode`
433                // destructor which would attempt to use the free list if it
434                // still exists.
435                this = UnsafeBox::clone(&parent.p);
436                mem::forget(parent);
437                if this.refcount.fetch_sub(1, Ordering::Release) == 1 {
438                    debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
439                    if this.root.is_some() {
440                        RuleNode::pretend_to_be_on_free_list(&this);
441                    }
442                    // Parent also reached refcount zero, we loop to drop it.
443                    continue;
444                }
445            }
446
447            return;
448        }
449    }
450
451    /// Pushes this node on the tree's free list. Returns false if the free list
452    /// is gone. Should only be called after we decremented a node's refcount
453    /// to zero and pretended to be on the free list.
454    unsafe fn push_on_free_list(this: &UnsafeBox<Self>) -> bool {
455        let root = &this.root.as_ref().unwrap().p;
456
457        debug_assert!(this.refcount.load(Ordering::Relaxed) > 0);
458        debug_assert_eq!(this.next_free.load(Ordering::Relaxed), Self::DANGLING_PTR);
459
460        // Increment the approximate free count by one.
461        root.approximate_free_count.fetch_add(1, Ordering::Relaxed);
462
463        // If the compare-exchange operation fails in the loop, we will retry
464        // with the new head value, so this can be a relaxed load.
465        let mut head = root.next_free.load(Ordering::Relaxed);
466
467        while !head.is_null() {
468            // Two threads can never attempt to push the same node on the free
469            // list both at the same time, so whoever else pushed a node on the
470            // free list cannot have done so with this node.
471            debug_assert_ne!(head, this.as_mut_ptr());
472
473            // Store the current head of the free list in this node.
474            this.next_free.store(head, Ordering::Relaxed);
475
476            // Any thread acquiring the free list must observe the previous
477            // next_free changes that occured, hence the release ordering
478            // on success.
479            match root.next_free.compare_exchange_weak(
480                head,
481                this.as_mut_ptr(),
482                Ordering::Release,
483                Ordering::Relaxed,
484            ) {
485                Ok(_) => {
486                    // This node is now on the free list, caller should not use
487                    // the node anymore.
488                    return true;
489                },
490                Err(new_head) => head = new_head,
491            }
492        }
493
494        // Tree was dropped and free list has been destroyed. We did not push
495        // this node on the free list but we still pretend to be on the free
496        // list to be ready to call `drop_without_free_list`.
497        false
498    }
499
500    /// Makes the node pretend to be on the free list. This will increment the
501    /// refcount by 1 and store `Self::DANGLING_PTR` in `next_free`. This
502    /// method should only be called after caller decremented the refcount to
503    /// zero, with the null pointer stored in `next_free`.
504    unsafe fn pretend_to_be_on_free_list(this: &UnsafeBox<Self>) {
505        debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
506        this.refcount.fetch_add(1, Ordering::Relaxed);
507        this.next_free.store(Self::DANGLING_PTR, Ordering::Release);
508    }
509
510    fn as_mut_ptr(&self) -> *mut RuleNode {
511        self as *const RuleNode as *mut RuleNode
512    }
513}
514
515pub(crate) struct WeakRuleNode {
516    p: UnsafeBox<RuleNode>,
517}
518
519/// A strong reference to a rule node.
520pub struct StrongRuleNode {
521    p: UnsafeBox<RuleNode>,
522}
523
524#[cfg(feature = "servo")]
525malloc_size_of::malloc_size_of_is_0!(StrongRuleNode);
526
527impl StrongRuleNode {
528    fn new(n: Box<RuleNode>) -> Self {
529        debug_assert_eq!(n.parent.is_none(), !n.source.is_some());
530
531        log_new(&*n);
532
533        debug!("Creating rule node: {:p}", &*n);
534
535        Self {
536            p: UnsafeBox::from_box(n),
537        }
538    }
539
540    unsafe fn from_unsafe_box(p: UnsafeBox<RuleNode>) -> Self {
541        Self { p }
542    }
543
544    unsafe fn downgrade(&self) -> WeakRuleNode {
545        WeakRuleNode {
546            p: UnsafeBox::clone(&self.p),
547        }
548    }
549
550    /// Get the parent rule node of this rule node.
551    pub fn parent(&self) -> Option<&StrongRuleNode> {
552        self.p.parent.as_ref()
553    }
554
555    pub(super) fn ensure_child(
556        &self,
557        root: &StrongRuleNode,
558        source: StyleSource,
559        cascade_priority: CascadePriority,
560    ) -> StrongRuleNode {
561        use parking_lot::RwLockUpgradableReadGuard;
562
563        debug_assert!(
564            self.p.cascade_priority <= cascade_priority,
565            "Should be ordered (instead {:?} > {:?}), from {:?} and {:?}",
566            self.p.cascade_priority,
567            cascade_priority,
568            self.p.source,
569            source,
570        );
571
572        let key = ChildKey(cascade_priority, source.key());
573        let children = self.p.children.upgradable_read();
574        if let Some(child) = children.get(&key, |node| node.p.key()) {
575            // Sound to call because we read-locked the parent's children.
576            return unsafe { child.upgrade() };
577        }
578        let mut children = RwLockUpgradableReadGuard::upgrade(children);
579        match children.entry(key, |node| node.p.key()) {
580            Entry::Occupied(child) => {
581                // Sound to call because we write-locked the parent's children.
582                unsafe { child.upgrade() }
583            },
584            Entry::Vacant(entry) => unsafe {
585                let node = StrongRuleNode::new(Box::new(RuleNode::new(
586                    root.downgrade(),
587                    self.clone(),
588                    source,
589                    cascade_priority,
590                )));
591                // Sound to call because we still own a strong reference to
592                // this node, through the `node` variable itself that we are
593                // going to return to the caller.
594                entry.insert(node.downgrade());
595                node
596            },
597        }
598    }
599
600    /// Get the style source corresponding to this rule node. May return `None`
601    /// if it's the root node, which means that the node hasn't matched any
602    /// rules.
603    pub fn style_source(&self) -> Option<&StyleSource> {
604        self.p.source.as_ref()
605    }
606
607    /// The cascade priority.
608    #[inline]
609    pub fn cascade_priority(&self) -> CascadePriority {
610        self.p.cascade_priority
611    }
612
613    /// The cascade level.
614    #[inline]
615    pub fn cascade_level(&self) -> CascadeLevel {
616        self.cascade_priority().cascade_level()
617    }
618
619    /// The importance.
620    #[inline]
621    pub fn importance(&self) -> crate::properties::Importance {
622        self.cascade_level().importance()
623    }
624
625    /// Returns whether this node has any child, only intended for testing
626    /// purposes.
627    pub unsafe fn has_children_for_testing(&self) -> bool {
628        !self.p.children.read().is_empty()
629    }
630
631    pub(super) fn dump<W: Write>(&self, guards: &StylesheetGuards, writer: &mut W, indent: usize) {
632        const INDENT_INCREMENT: usize = 4;
633
634        for _ in 0..indent {
635            let _ = write!(writer, " ");
636        }
637
638        let _ = writeln!(
639            writer,
640            " - {:p} (ref: {:?}, parent: {:?})",
641            &*self.p,
642            self.p.refcount.load(Ordering::Relaxed),
643            self.parent().map(|p| &*p.p as *const RuleNode)
644        );
645
646        for _ in 0..indent {
647            let _ = write!(writer, " ");
648        }
649
650        if let Some(source) = self.style_source() {
651            source.dump(self.cascade_level().guard(guards), writer);
652        } else {
653            if indent != 0 {
654                warn!("How has this happened?");
655            }
656            let _ = write!(writer, "(root)");
657        }
658
659        let _ = write!(writer, "\n");
660        for child in &*self.p.children.read() {
661            unsafe {
662                child
663                    .upgrade()
664                    .dump(guards, writer, indent + INDENT_INCREMENT);
665            }
666        }
667    }
668}
669
670impl Clone for StrongRuleNode {
671    fn clone(&self) -> Self {
672        debug!(
673            "{:p}: {:?}+",
674            &*self.p,
675            self.p.refcount.load(Ordering::Relaxed)
676        );
677        debug_assert!(self.p.refcount.load(Ordering::Relaxed) > 0);
678        self.p.refcount.fetch_add(1, Ordering::Relaxed);
679        unsafe { StrongRuleNode::from_unsafe_box(UnsafeBox::clone(&self.p)) }
680    }
681}
682
683impl Drop for StrongRuleNode {
684    #[cfg_attr(feature = "servo", allow(unused_mut))]
685    fn drop(&mut self) {
686        let node = &*self.p;
687        debug!("{:p}: {:?}-", node, node.refcount.load(Ordering::Relaxed));
688        debug!(
689            "Dropping node: {:p}, root: {:?}, parent: {:?}",
690            node,
691            node.root.as_ref().map(|r| &*r.p as *const RuleNode),
692            node.parent.as_ref().map(|p| &*p.p as *const RuleNode)
693        );
694
695        let should_drop = {
696            debug_assert!(node.refcount.load(Ordering::Relaxed) > 0);
697            node.refcount.fetch_sub(1, Ordering::Release) == 1
698        };
699
700        if !should_drop {
701            // The refcount didn't even drop zero yet, there is nothing for us
702            // to do anymore.
703            return;
704        }
705
706        unsafe {
707            if node.root.is_some() {
708                // This is a non-root node and we just observed the refcount
709                // dropping to zero, we need to pretend to be on the free list
710                // to unstuck any thread who tried to resurrect this node first
711                // through `WeakRuleNode::upgrade`.
712                RuleNode::pretend_to_be_on_free_list(&self.p);
713
714                // Attempt to push the node on the free list. This may fail
715                // if the free list is gone.
716                if RuleNode::push_on_free_list(&self.p) {
717                    return;
718                }
719            }
720
721            // Either this was the last reference of the root node, or the
722            // tree rule is gone and there is no free list anymore. Drop the
723            // node.
724            RuleNode::drop_without_free_list(&mut self.p);
725        }
726    }
727}
728
729impl WeakRuleNode {
730    /// Upgrades this weak node reference, returning a strong one.
731    ///
732    /// Must be called with items stored in a node's children list. The children
733    /// list must at least be read-locked when this is called.
734    unsafe fn upgrade(&self) -> StrongRuleNode {
735        debug!("Upgrading weak node: {:p}", &*self.p);
736
737        if self.p.refcount.fetch_add(1, Ordering::Relaxed) == 0 {
738            // We observed a refcount of 0, we need to wait for this node to
739            // be put on the free list. Resetting the `next_free` pointer to
740            // null is only done in `RuleNode::drop_without_free_list`, just
741            // before a release refcount decrement, so this acquire fence here
742            // makes sure that we observed the write to null before we loop
743            // until there is a non-null value.
744            atomic::fence(Ordering::Acquire);
745            while self.p.next_free.load(Ordering::Relaxed).is_null() {}
746        }
747        StrongRuleNode::from_unsafe_box(UnsafeBox::clone(&self.p))
748    }
749}
750
751impl fmt::Debug for StrongRuleNode {
752    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
753        (&*self.p as *const RuleNode).fmt(f)
754    }
755}
756
757impl Eq for StrongRuleNode {}
758impl PartialEq for StrongRuleNode {
759    fn eq(&self, other: &Self) -> bool {
760        &*self.p as *const RuleNode == &*other.p
761    }
762}
763
764impl hash::Hash for StrongRuleNode {
765    fn hash<H>(&self, state: &mut H)
766    where
767        H: hash::Hasher,
768    {
769        (&*self.p as *const RuleNode).hash(state)
770    }
771}
772
773// Large pages generate thousands of RuleNode objects.
774size_of_test!(RuleNode, 80);
775// StrongRuleNode should be pointer-sized even inside an option.
776size_of_test!(Option<StrongRuleNode>, 8);