servo_arc/lib.rs
1// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11//! Fork of Arc for Servo. This has the following advantages over std::sync::Arc:
12//!
13//! * We don't waste storage on the weak reference count.
14//! * We don't do extra RMU operations to handle the possibility of weak references.
15//! * We can experiment with arena allocation (todo).
16//! * We can add methods to support our custom use cases [1].
17//! * We have support for dynamically-sized types (see from_header_and_iter).
18//! * We have support for thin arcs to unsized types (see ThinArc).
19//! * We have support for references to static data, which don't do any
20//! refcounting.
21//!
22//! [1]: https://bugzilla.mozilla.org/show_bug.cgi?id=1360883
23
24// The semantics of `Arc` are already documented in the Rust docs, so we don't
25// duplicate those here.
26#![allow(missing_docs)]
27
28#[cfg(feature = "servo")]
29use serde::{Deserialize, Serialize};
30use stable_deref_trait::{CloneStableDeref, StableDeref};
31use std::alloc::{self, Layout};
32use std::borrow;
33use std::cmp::Ordering;
34use std::fmt;
35use std::hash::{Hash, Hasher};
36use std::marker::PhantomData;
37use std::mem::{self, align_of, size_of};
38use std::ops::{Deref, DerefMut};
39use std::os::raw::c_void;
40use std::process;
41use std::ptr;
42use std::sync::atomic;
43use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
44use std::{isize, usize};
45
46/// A soft limit on the amount of references that may be made to an `Arc`.
47///
48/// Going above this limit will abort your program (although not
49/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
50const MAX_REFCOUNT: usize = (isize::MAX) as usize;
51
52/// Special refcount value that means the data is not reference counted,
53/// and that the `Arc` is really acting as a read-only static reference.
54const STATIC_REFCOUNT: usize = usize::MAX;
55
56/// An atomically reference counted shared pointer
57///
58/// See the documentation for [`Arc`] in the standard library. Unlike the
59/// standard library `Arc`, this `Arc` does not support weak reference counting.
60///
61/// See the discussion in https://github.com/rust-lang/rust/pull/60594 for the
62/// usage of PhantomData.
63///
64/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
65///
66/// cbindgen:derive-eq=false
67/// cbindgen:derive-neq=false
68#[repr(C)]
69pub struct Arc<T: ?Sized> {
70 p: ptr::NonNull<ArcInner<T>>,
71 phantom: PhantomData<T>,
72}
73
74/// An `Arc` that is known to be uniquely owned
75///
76/// When `Arc`s are constructed, they are known to be
77/// uniquely owned. In such a case it is safe to mutate
78/// the contents of the `Arc`. Normally, one would just handle
79/// this by mutating the data on the stack before allocating the
80/// `Arc`, however it's possible the data is large or unsized
81/// and you need to heap-allocate it earlier in such a way
82/// that it can be freely converted into a regular `Arc` once you're
83/// done.
84///
85/// `UniqueArc` exists for this purpose, when constructed it performs
86/// the same allocations necessary for an `Arc`, however it allows mutable access.
87/// Once the mutation is finished, you can call `.shareable()` and get a regular `Arc`
88/// out of it.
89///
90/// Ignore the doctest below there's no way to skip building with refcount
91/// logging during doc tests (see rust-lang/rust#45599).
92///
93/// ```rust,ignore
94/// # use servo_arc::UniqueArc;
95/// let data = [1, 2, 3, 4, 5];
96/// let mut x = UniqueArc::new(data);
97/// x[4] = 7; // mutate!
98/// let y = x.shareable(); // y is an Arc<T>
99/// ```
100pub struct UniqueArc<T: ?Sized>(Arc<T>);
101
102impl<T> UniqueArc<T> {
103 #[inline]
104 /// Construct a new UniqueArc
105 pub fn new(data: T) -> Self {
106 UniqueArc(Arc::new(data))
107 }
108
109 /// Construct an uninitialized arc
110 #[inline]
111 pub fn new_uninit() -> UniqueArc<mem::MaybeUninit<T>> {
112 unsafe {
113 let layout = Layout::new::<ArcInner<mem::MaybeUninit<T>>>();
114 let ptr = alloc::alloc(layout);
115 let mut p = ptr::NonNull::new(ptr)
116 .unwrap_or_else(|| alloc::handle_alloc_error(layout))
117 .cast::<ArcInner<mem::MaybeUninit<T>>>();
118 ptr::write(&mut p.as_mut().count, atomic::AtomicUsize::new(1));
119 #[cfg(feature = "track_alloc_size")]
120 ptr::write(&mut p.as_mut().alloc_size, layout.size());
121
122 #[cfg(feature = "gecko_refcount_logging")]
123 {
124 NS_LogCtor(p.as_ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8)
125 }
126
127 UniqueArc(Arc {
128 p,
129 phantom: PhantomData,
130 })
131 }
132 }
133
134 #[inline]
135 /// Convert to a shareable Arc<T> once we're done mutating it
136 pub fn shareable(self) -> Arc<T> {
137 self.0
138 }
139}
140
141impl<T> UniqueArc<mem::MaybeUninit<T>> {
142 /// Convert to an initialized Arc.
143 #[inline]
144 pub unsafe fn assume_init(this: Self) -> UniqueArc<T> {
145 UniqueArc(Arc {
146 p: mem::ManuallyDrop::new(this).0.p.cast(),
147 phantom: PhantomData,
148 })
149 }
150}
151
152impl<T> Deref for UniqueArc<T> {
153 type Target = T;
154 fn deref(&self) -> &T {
155 &*self.0
156 }
157}
158
159impl<T> DerefMut for UniqueArc<T> {
160 fn deref_mut(&mut self) -> &mut T {
161 // We know this to be uniquely owned
162 unsafe { &mut (*self.0.ptr()).data }
163 }
164}
165
166unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
167unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
168
169/// The object allocated by an Arc<T>
170///
171/// See https://github.com/mozilla/cbindgen/issues/937 for the derive-{eq,neq}=false. But we don't
172/// use those anyways so we can just disable them.
173/// cbindgen:derive-eq=false
174/// cbindgen:derive-neq=false
175#[repr(C)]
176struct ArcInner<T: ?Sized> {
177 count: atomic::AtomicUsize,
178 // NOTE(emilio): This needs to be here so that HeaderSlice<> is deallocated properly if the
179 // allocator relies on getting the right Layout. We don't need to track the right alignment,
180 // since we know that statically.
181 //
182 // This member could be completely avoided once min_specialization feature is stable (by
183 // implementing a trait for HeaderSlice that gives you the right layout). For now, servo-only
184 // since Gecko doesn't need it (its allocator doesn't need the size for the alignments we care
185 // about). See https://github.com/rust-lang/rust/issues/31844.
186 #[cfg(feature = "track_alloc_size")]
187 alloc_size: usize,
188 data: T,
189}
190
191unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
192unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
193
194/// Computes the offset of the data field within ArcInner.
195fn data_offset<T>() -> usize {
196 let size = size_of::<ArcInner<()>>();
197 let align = align_of::<T>();
198 // https://github.com/rust-lang/rust/blob/1.36.0/src/libcore/alloc.rs#L187-L207
199 size.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1)
200}
201
202impl<T> Arc<T> {
203 /// Construct an `Arc<T>`
204 #[inline]
205 pub fn new(data: T) -> Self {
206 let layout = Layout::new::<ArcInner<T>>();
207 let p = unsafe {
208 let ptr = ptr::NonNull::new(alloc::alloc(layout))
209 .unwrap_or_else(|| alloc::handle_alloc_error(layout))
210 .cast::<ArcInner<T>>();
211 ptr::write(
212 ptr.as_ptr(),
213 ArcInner {
214 count: atomic::AtomicUsize::new(1),
215 #[cfg(feature = "track_alloc_size")]
216 alloc_size: layout.size(),
217 data,
218 },
219 );
220 ptr
221 };
222
223 #[cfg(feature = "gecko_refcount_logging")]
224 unsafe {
225 // FIXME(emilio): Would be so amazing to have
226 // std::intrinsics::type_name() around, so that we could also report
227 // a real size.
228 NS_LogCtor(p.as_ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8);
229 }
230
231 Arc {
232 p,
233 phantom: PhantomData,
234 }
235 }
236
237 /// Construct an intentionally-leaked arc.
238 #[inline]
239 pub fn new_leaked(data: T) -> Self {
240 let arc = Self::new(data);
241 arc.mark_as_intentionally_leaked();
242 arc
243 }
244
245 /// Convert the Arc<T> to a raw pointer, suitable for use across FFI
246 ///
247 /// Note: This returns a pointer to the data T, which is offset in the allocation.
248 #[inline]
249 pub fn into_raw(this: Self) -> *const T {
250 let ptr = unsafe { &((*this.ptr()).data) as *const _ };
251 mem::forget(this);
252 ptr
253 }
254
255 /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw()
256 ///
257 /// Note: This raw pointer will be offset in the allocation and must be preceded
258 /// by the atomic count.
259 #[inline]
260 pub unsafe fn from_raw(ptr: *const T) -> Self {
261 // To find the corresponding pointer to the `ArcInner` we need
262 // to subtract the offset of the `data` field from the pointer.
263 let ptr = (ptr as *const u8).sub(data_offset::<T>());
264 Arc {
265 p: ptr::NonNull::new_unchecked(ptr as *mut ArcInner<T>),
266 phantom: PhantomData,
267 }
268 }
269
270 /// Like from_raw, but returns an addrefed arc instead.
271 #[inline]
272 pub unsafe fn from_raw_addrefed(ptr: *const T) -> Self {
273 let arc = Self::from_raw(ptr);
274 mem::forget(arc.clone());
275 arc
276 }
277
278 /// Create a new static Arc<T> (one that won't reference count the object)
279 /// and place it in the allocation provided by the specified `alloc`
280 /// function.
281 ///
282 /// `alloc` must return a pointer into a static allocation suitable for
283 /// storing data with the `Layout` passed into it. The pointer returned by
284 /// `alloc` will not be freed.
285 #[inline]
286 pub unsafe fn new_static<F>(alloc: F, data: T) -> Arc<T>
287 where
288 F: FnOnce(Layout) -> *mut u8,
289 {
290 let layout = Layout::new::<ArcInner<T>>();
291 let ptr = alloc(layout) as *mut ArcInner<T>;
292
293 let x = ArcInner {
294 count: atomic::AtomicUsize::new(STATIC_REFCOUNT),
295 #[cfg(feature = "track_alloc_size")]
296 alloc_size: layout.size(),
297 data,
298 };
299
300 ptr::write(ptr, x);
301
302 Arc {
303 p: ptr::NonNull::new_unchecked(ptr),
304 phantom: PhantomData,
305 }
306 }
307
308 /// Produce a pointer to the data that can be converted back
309 /// to an Arc. This is basically an `&Arc<T>`, without the extra indirection.
310 /// It has the benefits of an `&T` but also knows about the underlying refcount
311 /// and can be converted into more `Arc<T>`s if necessary.
312 #[inline]
313 pub fn borrow_arc<'a>(&'a self) -> ArcBorrow<'a, T> {
314 ArcBorrow(&**self)
315 }
316
317 /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory
318 /// reporting.
319 ///
320 /// If this is a static reference, this returns null.
321 pub fn heap_ptr(&self) -> *const c_void {
322 if self.inner().count.load(Relaxed) == STATIC_REFCOUNT {
323 ptr::null()
324 } else {
325 self.p.as_ptr() as *const ArcInner<T> as *const c_void
326 }
327 }
328}
329
330impl<T: ?Sized> Arc<T> {
331 #[inline]
332 fn inner(&self) -> &ArcInner<T> {
333 // This unsafety is ok because while this arc is alive we're guaranteed
334 // that the inner pointer is valid. Furthermore, we know that the
335 // `ArcInner` structure itself is `Sync` because the inner data is
336 // `Sync` as well, so we're ok loaning out an immutable pointer to these
337 // contents.
338 unsafe { &*self.ptr() }
339 }
340
341 #[inline(always)]
342 fn record_drop(&self) {
343 #[cfg(feature = "gecko_refcount_logging")]
344 unsafe {
345 NS_LogDtor(self.ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8);
346 }
347 }
348
349 /// Marks this `Arc` as intentionally leaked for the purposes of refcount
350 /// logging.
351 ///
352 /// It's a logic error to call this more than once, but it's not unsafe, as
353 /// it'd just report negative leaks.
354 #[inline(always)]
355 pub fn mark_as_intentionally_leaked(&self) {
356 self.record_drop();
357 }
358
359 // Non-inlined part of `drop`. Just invokes the destructor and calls the
360 // refcount logging machinery if enabled.
361 #[inline(never)]
362 unsafe fn drop_slow(&mut self) {
363 self.record_drop();
364 let inner = self.ptr();
365
366 let layout = Layout::for_value(&*inner);
367 #[cfg(feature = "track_alloc_size")]
368 let layout = Layout::from_size_align_unchecked((*inner).alloc_size, layout.align());
369
370 std::ptr::drop_in_place(inner);
371 alloc::dealloc(inner as *mut _, layout);
372 }
373
374 /// Test pointer equality between the two Arcs, i.e. they must be the _same_
375 /// allocation
376 #[inline]
377 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
378 this.raw_ptr() == other.raw_ptr()
379 }
380
381 fn ptr(&self) -> *mut ArcInner<T> {
382 self.p.as_ptr()
383 }
384
385 /// Returns a raw ptr to the underlying allocation.
386 pub fn raw_ptr(&self) -> ptr::NonNull<()> {
387 self.p.cast()
388 }
389}
390
391#[cfg(feature = "gecko_refcount_logging")]
392extern "C" {
393 fn NS_LogCtor(
394 aPtr: *mut std::os::raw::c_void,
395 aTypeName: *const std::os::raw::c_char,
396 aSize: u32,
397 );
398 fn NS_LogDtor(
399 aPtr: *mut std::os::raw::c_void,
400 aTypeName: *const std::os::raw::c_char,
401 aSize: u32,
402 );
403}
404
405impl<T: ?Sized> Clone for Arc<T> {
406 #[inline]
407 fn clone(&self) -> Self {
408 // NOTE(emilio): If you change anything here, make sure that the
409 // implementation in layout/style/ServoStyleConstsInlines.h matches!
410 //
411 // Using a relaxed ordering to check for STATIC_REFCOUNT is safe, since
412 // `count` never changes between STATIC_REFCOUNT and other values.
413 if self.inner().count.load(Relaxed) != STATIC_REFCOUNT {
414 // Using a relaxed ordering is alright here, as knowledge of the
415 // original reference prevents other threads from erroneously deleting
416 // the object.
417 //
418 // As explained in the [Boost documentation][1], Increasing the
419 // reference counter can always be done with memory_order_relaxed: New
420 // references to an object can only be formed from an existing
421 // reference, and passing an existing reference from one thread to
422 // another must already provide any required synchronization.
423 //
424 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
425 let old_size = self.inner().count.fetch_add(1, Relaxed);
426
427 // However we need to guard against massive refcounts in case someone
428 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
429 // and users will use-after free. We racily saturate to `isize::MAX` on
430 // the assumption that there aren't ~2 billion threads incrementing
431 // the reference count at once. This branch will never be taken in
432 // any realistic program.
433 //
434 // We abort because such a program is incredibly degenerate, and we
435 // don't care to support it.
436 if old_size > MAX_REFCOUNT {
437 process::abort();
438 }
439 }
440
441 unsafe {
442 Arc {
443 p: ptr::NonNull::new_unchecked(self.ptr()),
444 phantom: PhantomData,
445 }
446 }
447 }
448}
449
450impl<T: ?Sized> Deref for Arc<T> {
451 type Target = T;
452
453 #[inline]
454 fn deref(&self) -> &T {
455 &self.inner().data
456 }
457}
458
459impl<T: Clone> Arc<T> {
460 /// Makes a mutable reference to the `Arc`, cloning if necessary
461 ///
462 /// This is functionally equivalent to [`Arc::make_mut`][mm] from the standard library.
463 ///
464 /// If this `Arc` is uniquely owned, `make_mut()` will provide a mutable
465 /// reference to the contents. If not, `make_mut()` will create a _new_ `Arc`
466 /// with a copy of the contents, update `this` to point to it, and provide
467 /// a mutable reference to its contents.
468 ///
469 /// This is useful for implementing copy-on-write schemes where you wish to
470 /// avoid copying things if your `Arc` is not shared.
471 ///
472 /// [mm]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html#method.make_mut
473 #[inline]
474 pub fn make_mut(this: &mut Self) -> &mut T {
475 if !this.is_unique() {
476 // Another pointer exists; clone
477 *this = Arc::new((**this).clone());
478 }
479
480 unsafe {
481 // This unsafety is ok because we're guaranteed that the pointer
482 // returned is the *only* pointer that will ever be returned to T. Our
483 // reference count is guaranteed to be 1 at this point, and we required
484 // the Arc itself to be `mut`, so we're returning the only possible
485 // reference to the inner data.
486 &mut (*this.ptr()).data
487 }
488 }
489}
490
491impl<T: ?Sized> Arc<T> {
492 /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned.
493 #[inline]
494 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
495 if this.is_unique() {
496 unsafe {
497 // See make_mut() for documentation of the threadsafety here.
498 Some(&mut (*this.ptr()).data)
499 }
500 } else {
501 None
502 }
503 }
504
505 /// Whether or not the `Arc` is a static reference.
506 #[inline]
507 pub fn is_static(&self) -> bool {
508 // Using a relaxed ordering to check for STATIC_REFCOUNT is safe, since
509 // `count` never changes between STATIC_REFCOUNT and other values.
510 self.inner().count.load(Relaxed) == STATIC_REFCOUNT
511 }
512
513 /// Whether or not the `Arc` is uniquely owned (is the refcount 1?) and not
514 /// a static reference.
515 #[inline]
516 pub fn is_unique(&self) -> bool {
517 // See the extensive discussion in [1] for why this needs to be Acquire.
518 //
519 // [1] https://github.com/servo/servo/issues/21186
520 self.inner().count.load(Acquire) == 1
521 }
522}
523
524impl<T: ?Sized> Drop for Arc<T> {
525 #[inline]
526 fn drop(&mut self) {
527 // NOTE(emilio): If you change anything here, make sure that the
528 // implementation in layout/style/ServoStyleConstsInlines.h matches!
529 if self.is_static() {
530 return;
531 }
532
533 // Because `fetch_sub` is already atomic, we do not need to synchronize
534 // with other threads unless we are going to delete the object.
535 if self.inner().count.fetch_sub(1, Release) != 1 {
536 return;
537 }
538
539 // FIXME(bholley): Use the updated comment when [2] is merged.
540 //
541 // This load is needed to prevent reordering of use of the data and
542 // deletion of the data. Because it is marked `Release`, the decreasing
543 // of the reference count synchronizes with this `Acquire` load. This
544 // means that use of the data happens before decreasing the reference
545 // count, which happens before this load, which happens before the
546 // deletion of the data.
547 //
548 // As explained in the [Boost documentation][1],
549 //
550 // > It is important to enforce any possible access to the object in one
551 // > thread (through an existing reference) to *happen before* deleting
552 // > the object in a different thread. This is achieved by a "release"
553 // > operation after dropping a reference (any access to the object
554 // > through this reference must obviously happened before), and an
555 // > "acquire" operation before deleting the object.
556 //
557 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
558 // [2]: https://github.com/rust-lang/rust/pull/41714
559 self.inner().count.load(Acquire);
560
561 unsafe {
562 self.drop_slow();
563 }
564 }
565}
566
567impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
568 fn eq(&self, other: &Arc<T>) -> bool {
569 Self::ptr_eq(self, other) || *(*self) == *(*other)
570 }
571
572 fn ne(&self, other: &Arc<T>) -> bool {
573 !Self::ptr_eq(self, other) && *(*self) != *(*other)
574 }
575}
576
577impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
578 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
579 (**self).partial_cmp(&**other)
580 }
581
582 fn lt(&self, other: &Arc<T>) -> bool {
583 *(*self) < *(*other)
584 }
585
586 fn le(&self, other: &Arc<T>) -> bool {
587 *(*self) <= *(*other)
588 }
589
590 fn gt(&self, other: &Arc<T>) -> bool {
591 *(*self) > *(*other)
592 }
593
594 fn ge(&self, other: &Arc<T>) -> bool {
595 *(*self) >= *(*other)
596 }
597}
598impl<T: ?Sized + Ord> Ord for Arc<T> {
599 fn cmp(&self, other: &Arc<T>) -> Ordering {
600 (**self).cmp(&**other)
601 }
602}
603impl<T: ?Sized + Eq> Eq for Arc<T> {}
604
605impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
606 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
607 fmt::Display::fmt(&**self, f)
608 }
609}
610
611impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
612 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
613 fmt::Debug::fmt(&**self, f)
614 }
615}
616
617impl<T: ?Sized> fmt::Pointer for Arc<T> {
618 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
619 fmt::Pointer::fmt(&self.ptr(), f)
620 }
621}
622
623impl<T: Default> Default for Arc<T> {
624 fn default() -> Arc<T> {
625 Arc::new(Default::default())
626 }
627}
628
629impl<T: ?Sized + Hash> Hash for Arc<T> {
630 fn hash<H: Hasher>(&self, state: &mut H) {
631 (**self).hash(state)
632 }
633}
634
635impl<T> From<T> for Arc<T> {
636 #[inline]
637 fn from(t: T) -> Self {
638 Arc::new(t)
639 }
640}
641
642impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
643 #[inline]
644 fn borrow(&self) -> &T {
645 &**self
646 }
647}
648
649impl<T: ?Sized> AsRef<T> for Arc<T> {
650 #[inline]
651 fn as_ref(&self) -> &T {
652 &**self
653 }
654}
655
656unsafe impl<T: ?Sized> StableDeref for Arc<T> {}
657unsafe impl<T: ?Sized> CloneStableDeref for Arc<T> {}
658
659#[cfg(feature = "servo")]
660impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> {
661 fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
662 where
663 D: ::serde::de::Deserializer<'de>,
664 {
665 T::deserialize(deserializer).map(Arc::new)
666 }
667}
668
669#[cfg(feature = "servo")]
670impl<T: Serialize> Serialize for Arc<T> {
671 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
672 where
673 S: ::serde::ser::Serializer,
674 {
675 (**self).serialize(serializer)
676 }
677}
678
679/// Structure to allow Arc-managing some fixed-sized data and a variably-sized
680/// slice in a single allocation.
681///
682/// cbindgen:derive-eq=false
683/// cbindgen:derive-neq=false
684#[derive(Eq)]
685#[repr(C)]
686pub struct HeaderSlice<H, T> {
687 /// The fixed-sized data.
688 pub header: H,
689
690 /// The length of the slice at our end.
691 len: usize,
692
693 /// The dynamically-sized data.
694 data: [T; 0],
695}
696
697impl<H: PartialEq, T: PartialEq> PartialEq for HeaderSlice<H, T> {
698 fn eq(&self, other: &Self) -> bool {
699 self.header == other.header && self.slice() == other.slice()
700 }
701}
702
703impl<H, T> Drop for HeaderSlice<H, T> {
704 fn drop(&mut self) {
705 unsafe {
706 let mut ptr = self.data_mut();
707 for _ in 0..self.len {
708 std::ptr::drop_in_place(ptr);
709 ptr = ptr.offset(1);
710 }
711 }
712 }
713}
714
715impl<H: fmt::Debug, T: fmt::Debug> fmt::Debug for HeaderSlice<H, T> {
716 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
717 f.debug_struct("HeaderSlice")
718 .field("header", &self.header)
719 .field("slice", &self.slice())
720 .finish()
721 }
722}
723
724impl<H, T> HeaderSlice<H, T> {
725 /// Returns the dynamically sized slice in this HeaderSlice.
726 #[inline(always)]
727 pub fn slice(&self) -> &[T] {
728 unsafe { std::slice::from_raw_parts(self.data(), self.len) }
729 }
730
731 #[inline(always)]
732 fn data(&self) -> *const T {
733 std::ptr::addr_of!(self.data) as *const _
734 }
735
736 #[inline(always)]
737 fn data_mut(&mut self) -> *mut T {
738 std::ptr::addr_of_mut!(self.data) as *mut _
739 }
740
741 /// Returns the dynamically sized slice in this HeaderSlice.
742 #[inline(always)]
743 pub fn slice_mut(&mut self) -> &mut [T] {
744 unsafe { std::slice::from_raw_parts_mut(self.data_mut(), self.len) }
745 }
746
747 /// Returns the len of the slice.
748 #[inline(always)]
749 pub fn len(&self) -> usize {
750 self.len
751 }
752}
753
754impl<H, T> Arc<HeaderSlice<H, T>> {
755 /// Creates an Arc for a HeaderSlice using the given header struct and
756 /// iterator to generate the slice.
757 ///
758 /// `is_static` indicates whether to create a static Arc.
759 ///
760 /// `alloc` is used to get a pointer to the memory into which the
761 /// dynamically sized ArcInner<HeaderSlice<H, T>> value will be
762 /// written. If `is_static` is true, then `alloc` must return a
763 /// pointer into some static memory allocation. If it is false,
764 /// then `alloc` must return an allocation that can be dellocated
765 /// by calling Box::from_raw::<ArcInner<HeaderSlice<H, T>>> on it.
766 #[inline]
767 pub fn from_header_and_iter_alloc<F, I>(
768 alloc: F,
769 header: H,
770 mut items: I,
771 num_items: usize,
772 is_static: bool,
773 ) -> Self
774 where
775 F: FnOnce(Layout) -> *mut u8,
776 I: Iterator<Item = T>,
777 {
778 assert_ne!(size_of::<T>(), 0, "Need to think about ZST");
779
780 let layout = Layout::new::<ArcInner<HeaderSlice<H, T>>>();
781 debug_assert!(layout.align() >= align_of::<T>());
782 debug_assert!(layout.align() >= align_of::<usize>());
783 let array_layout = Layout::array::<T>(num_items).expect("Overflow");
784 let (layout, _offset) = layout.extend(array_layout).expect("Overflow");
785 let p = unsafe {
786 // Allocate the buffer.
787 let buffer = alloc(layout);
788 let mut p = ptr::NonNull::new(buffer)
789 .unwrap_or_else(|| alloc::handle_alloc_error(layout))
790 .cast::<ArcInner<HeaderSlice<H, T>>>();
791
792 // Write the data.
793 //
794 // Note that any panics here (i.e. from the iterator) are safe, since
795 // we'll just leak the uninitialized memory.
796 let count = if is_static {
797 atomic::AtomicUsize::new(STATIC_REFCOUNT)
798 } else {
799 atomic::AtomicUsize::new(1)
800 };
801 ptr::write(&mut p.as_mut().count, count);
802 #[cfg(feature = "track_alloc_size")]
803 ptr::write(&mut p.as_mut().alloc_size, layout.size());
804 ptr::write(&mut p.as_mut().data.header, header);
805 ptr::write(&mut p.as_mut().data.len, num_items);
806 if num_items != 0 {
807 let mut current = std::ptr::addr_of_mut!(p.as_mut().data.data) as *mut T;
808 for _ in 0..num_items {
809 ptr::write(
810 current,
811 items
812 .next()
813 .expect("ExactSizeIterator over-reported length"),
814 );
815 current = current.offset(1);
816 }
817 // We should have consumed the buffer exactly, maybe accounting
818 // for some padding from the alignment.
819 debug_assert!(
820 (buffer.add(layout.size()) as usize - current as *mut u8 as usize)
821 < layout.align()
822 );
823 }
824 assert!(
825 items.next().is_none(),
826 "ExactSizeIterator under-reported length"
827 );
828 p
829 };
830 #[cfg(feature = "gecko_refcount_logging")]
831 unsafe {
832 if !is_static {
833 // FIXME(emilio): Would be so amazing to have
834 // std::intrinsics::type_name() around.
835 NS_LogCtor(p.as_ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8)
836 }
837 }
838
839 // Return the fat Arc.
840 assert_eq!(
841 size_of::<Self>(),
842 size_of::<usize>(),
843 "The Arc should be thin"
844 );
845
846 Arc {
847 p,
848 phantom: PhantomData,
849 }
850 }
851
852 /// Creates an Arc for a HeaderSlice using the given header struct and iterator to generate the
853 /// slice. Panics if num_items doesn't match the number of items.
854 #[inline]
855 pub fn from_header_and_iter_with_size<I>(header: H, items: I, num_items: usize) -> Self
856 where
857 I: Iterator<Item = T>,
858 {
859 Arc::from_header_and_iter_alloc(
860 |layout| unsafe { alloc::alloc(layout) },
861 header,
862 items,
863 num_items,
864 /* is_static = */ false,
865 )
866 }
867
868 /// Creates an Arc for a HeaderSlice using the given header struct and
869 /// iterator to generate the slice. The resulting Arc will be fat.
870 #[inline]
871 pub fn from_header_and_iter<I>(header: H, items: I) -> Self
872 where
873 I: Iterator<Item = T> + ExactSizeIterator,
874 {
875 let len = items.len();
876 Self::from_header_and_iter_with_size(header, items, len)
877 }
878}
879
880/// This is functionally equivalent to Arc<(H, [T])>
881///
882/// When you create an `Arc` containing a dynamically sized type like a slice, the `Arc` is
883/// represented on the stack as a "fat pointer", where the length of the slice is stored alongside
884/// the `Arc`'s pointer. In some situations you may wish to have a thin pointer instead, perhaps
885/// for FFI compatibility or space efficiency. `ThinArc` solves this by storing the length in the
886/// allocation itself, via `HeaderSlice`.
887pub type ThinArc<H, T> = Arc<HeaderSlice<H, T>>;
888
889/// See `ArcUnion`. This is a version that works for `ThinArc`s.
890pub type ThinArcUnion<H1, T1, H2, T2> = ArcUnion<HeaderSlice<H1, T1>, HeaderSlice<H2, T2>>;
891
892impl<H, T> UniqueArc<HeaderSlice<H, T>> {
893 #[inline]
894 pub fn from_header_and_iter<I>(header: H, items: I) -> Self
895 where
896 I: Iterator<Item = T> + ExactSizeIterator,
897 {
898 Self(Arc::from_header_and_iter(header, items))
899 }
900
901 #[inline]
902 pub fn from_header_and_iter_with_size<I>(header: H, items: I, num_items: usize) -> Self
903 where
904 I: Iterator<Item = T>,
905 {
906 Self(Arc::from_header_and_iter_with_size(
907 header, items, num_items,
908 ))
909 }
910
911 /// Returns a mutable reference to the header.
912 pub fn header_mut(&mut self) -> &mut H {
913 // We know this to be uniquely owned
914 unsafe { &mut (*self.0.ptr()).data.header }
915 }
916
917 /// Returns a mutable reference to the slice.
918 pub fn data_mut(&mut self) -> &mut [T] {
919 // We know this to be uniquely owned
920 unsafe { (*self.0.ptr()).data.slice_mut() }
921 }
922}
923
924/// A "borrowed `Arc`". This is a pointer to
925/// a T that is known to have been allocated within an
926/// `Arc`.
927///
928/// This is equivalent in guarantees to `&Arc<T>`, however it is
929/// a bit more flexible. To obtain an `&Arc<T>` you must have
930/// an `Arc<T>` instance somewhere pinned down until we're done with it.
931/// It's also a direct pointer to `T`, so using this involves less pointer-chasing
932///
933/// However, C++ code may hand us refcounted things as pointers to T directly,
934/// so we have to conjure up a temporary `Arc` on the stack each time.
935///
936/// `ArcBorrow` lets us deal with borrows of known-refcounted objects
937/// without needing to worry about where the `Arc<T>` is.
938#[derive(Debug, Eq, PartialEq)]
939pub struct ArcBorrow<'a, T: 'a>(&'a T);
940
941impl<'a, T> Copy for ArcBorrow<'a, T> {}
942impl<'a, T> Clone for ArcBorrow<'a, T> {
943 #[inline]
944 fn clone(&self) -> Self {
945 *self
946 }
947}
948
949impl<'a, T> ArcBorrow<'a, T> {
950 /// Clone this as an `Arc<T>`. This bumps the refcount.
951 #[inline]
952 pub fn clone_arc(&self) -> Arc<T> {
953 let arc = unsafe { Arc::from_raw(self.0) };
954 // addref it!
955 mem::forget(arc.clone());
956 arc
957 }
958
959 /// For constructing from a reference known to be Arc-backed,
960 /// e.g. if we obtain such a reference over FFI
961 #[inline]
962 pub unsafe fn from_ref(r: &'a T) -> Self {
963 ArcBorrow(r)
964 }
965
966 /// Compare two `ArcBorrow`s via pointer equality. Will only return
967 /// true if they come from the same allocation
968 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
969 this.0 as *const T == other.0 as *const T
970 }
971
972 /// Temporarily converts |self| into a bonafide Arc and exposes it to the
973 /// provided callback. The refcount is not modified.
974 #[inline]
975 pub fn with_arc<F, U>(&self, f: F) -> U
976 where
977 F: FnOnce(&Arc<T>) -> U,
978 T: 'static,
979 {
980 // Synthesize transient Arc, which never touches the refcount.
981 let transient = unsafe { mem::ManuallyDrop::new(Arc::from_raw(self.0)) };
982
983 // Expose the transient Arc to the callback, which may clone it if it wants.
984 let result = f(&transient);
985
986 // Forward the result.
987 result
988 }
989
990 /// Similar to deref, but uses the lifetime |a| rather than the lifetime of
991 /// self, which is incompatible with the signature of the Deref trait.
992 #[inline]
993 pub fn get(&self) -> &'a T {
994 self.0
995 }
996}
997
998impl<'a, T> Deref for ArcBorrow<'a, T> {
999 type Target = T;
1000
1001 #[inline]
1002 fn deref(&self) -> &T {
1003 self.0
1004 }
1005}
1006
1007/// A tagged union that can represent `Arc<A>` or `Arc<B>` while only consuming a
1008/// single word. The type is also `NonNull`, and thus can be stored in an Option
1009/// without increasing size.
1010///
1011/// This is functionally equivalent to
1012/// `enum ArcUnion<A, B> { First(Arc<A>), Second(Arc<B>)` but only takes up
1013/// up a single word of stack space.
1014///
1015/// This could probably be extended to support four types if necessary.
1016pub struct ArcUnion<A, B> {
1017 p: ptr::NonNull<()>,
1018 phantom_a: PhantomData<A>,
1019 phantom_b: PhantomData<B>,
1020}
1021
1022unsafe impl<A: Sync + Send, B: Send + Sync> Send for ArcUnion<A, B> {}
1023unsafe impl<A: Sync + Send, B: Send + Sync> Sync for ArcUnion<A, B> {}
1024
1025impl<A: PartialEq, B: PartialEq> PartialEq for ArcUnion<A, B> {
1026 fn eq(&self, other: &Self) -> bool {
1027 use crate::ArcUnionBorrow::*;
1028 match (self.borrow(), other.borrow()) {
1029 (First(x), First(y)) => x == y,
1030 (Second(x), Second(y)) => x == y,
1031 (_, _) => false,
1032 }
1033 }
1034}
1035
1036impl<A: Eq, B: Eq> Eq for ArcUnion<A, B> {}
1037
1038/// This represents a borrow of an `ArcUnion`.
1039#[derive(Debug)]
1040pub enum ArcUnionBorrow<'a, A: 'a, B: 'a> {
1041 First(ArcBorrow<'a, A>),
1042 Second(ArcBorrow<'a, B>),
1043}
1044
1045impl<A, B> ArcUnion<A, B> {
1046 unsafe fn new(ptr: *mut ()) -> Self {
1047 ArcUnion {
1048 p: ptr::NonNull::new_unchecked(ptr),
1049 phantom_a: PhantomData,
1050 phantom_b: PhantomData,
1051 }
1052 }
1053
1054 /// Returns true if the two values are pointer-equal.
1055 #[inline]
1056 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1057 this.p == other.p
1058 }
1059
1060 #[inline]
1061 pub fn ptr(&self) -> ptr::NonNull<()> {
1062 self.p
1063 }
1064
1065 /// Returns an enum representing a borrow of either A or B.
1066 #[inline]
1067 pub fn borrow(&self) -> ArcUnionBorrow<A, B> {
1068 if self.is_first() {
1069 let ptr = self.p.as_ptr() as *const ArcInner<A>;
1070 let borrow = unsafe { ArcBorrow::from_ref(&(*ptr).data) };
1071 ArcUnionBorrow::First(borrow)
1072 } else {
1073 let ptr = ((self.p.as_ptr() as usize) & !0x1) as *const ArcInner<B>;
1074 let borrow = unsafe { ArcBorrow::from_ref(&(*ptr).data) };
1075 ArcUnionBorrow::Second(borrow)
1076 }
1077 }
1078
1079 /// Creates an `ArcUnion` from an instance of the first type.
1080 pub fn from_first(other: Arc<A>) -> Self {
1081 let union = unsafe { Self::new(other.ptr() as *mut _) };
1082 mem::forget(other);
1083 union
1084 }
1085
1086 /// Creates an `ArcUnion` from an instance of the second type.
1087 pub fn from_second(other: Arc<B>) -> Self {
1088 let union = unsafe { Self::new(((other.ptr() as usize) | 0x1) as *mut _) };
1089 mem::forget(other);
1090 union
1091 }
1092
1093 /// Returns true if this `ArcUnion` contains the first type.
1094 pub fn is_first(&self) -> bool {
1095 self.p.as_ptr() as usize & 0x1 == 0
1096 }
1097
1098 /// Returns true if this `ArcUnion` contains the second type.
1099 pub fn is_second(&self) -> bool {
1100 !self.is_first()
1101 }
1102
1103 /// Returns a borrow of the first type if applicable, otherwise `None`.
1104 pub fn as_first(&self) -> Option<ArcBorrow<A>> {
1105 match self.borrow() {
1106 ArcUnionBorrow::First(x) => Some(x),
1107 ArcUnionBorrow::Second(_) => None,
1108 }
1109 }
1110
1111 /// Returns a borrow of the second type if applicable, otherwise None.
1112 pub fn as_second(&self) -> Option<ArcBorrow<B>> {
1113 match self.borrow() {
1114 ArcUnionBorrow::First(_) => None,
1115 ArcUnionBorrow::Second(x) => Some(x),
1116 }
1117 }
1118}
1119
1120impl<A, B> Clone for ArcUnion<A, B> {
1121 fn clone(&self) -> Self {
1122 match self.borrow() {
1123 ArcUnionBorrow::First(x) => ArcUnion::from_first(x.clone_arc()),
1124 ArcUnionBorrow::Second(x) => ArcUnion::from_second(x.clone_arc()),
1125 }
1126 }
1127}
1128
1129impl<A, B> Drop for ArcUnion<A, B> {
1130 fn drop(&mut self) {
1131 match self.borrow() {
1132 ArcUnionBorrow::First(x) => unsafe {
1133 let _ = Arc::from_raw(&*x);
1134 },
1135 ArcUnionBorrow::Second(x) => unsafe {
1136 let _ = Arc::from_raw(&*x);
1137 },
1138 }
1139 }
1140}
1141
1142impl<A: fmt::Debug, B: fmt::Debug> fmt::Debug for ArcUnion<A, B> {
1143 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1144 fmt::Debug::fmt(&self.borrow(), f)
1145 }
1146}
1147
1148#[cfg(test)]
1149mod tests {
1150 use super::{Arc, ThinArc};
1151 use std::clone::Clone;
1152 use std::ops::Drop;
1153 use std::sync::atomic;
1154 use std::sync::atomic::Ordering::{Acquire, SeqCst};
1155
1156 #[derive(PartialEq)]
1157 struct Canary(*mut atomic::AtomicUsize);
1158
1159 impl Drop for Canary {
1160 fn drop(&mut self) {
1161 unsafe {
1162 (*self.0).fetch_add(1, SeqCst);
1163 }
1164 }
1165 }
1166
1167 #[test]
1168 fn empty_thin() {
1169 let x = Arc::from_header_and_iter(100u32, std::iter::empty::<i32>());
1170 assert_eq!(x.header, 100);
1171 assert!(x.slice().is_empty());
1172 }
1173
1174 #[test]
1175 fn thin_assert_padding() {
1176 #[derive(Clone, Default)]
1177 #[repr(C)]
1178 struct Padded {
1179 i: u16,
1180 }
1181
1182 // The header will have more alignment than `Padded`
1183 let items = vec![Padded { i: 0xdead }, Padded { i: 0xbeef }];
1184 let a = ThinArc::from_header_and_iter(0i32, items.into_iter());
1185 assert_eq!(a.len(), 2);
1186 assert_eq!(a.slice()[0].i, 0xdead);
1187 assert_eq!(a.slice()[1].i, 0xbeef);
1188 }
1189
1190 #[test]
1191 fn slices_and_thin() {
1192 let mut canary = atomic::AtomicUsize::new(0);
1193 let c = Canary(&mut canary as *mut atomic::AtomicUsize);
1194 let v = vec![5, 6];
1195 {
1196 let x = Arc::from_header_and_iter(c, v.into_iter());
1197 let _ = x.clone();
1198 let _ = x == x;
1199 }
1200 assert_eq!(canary.load(Acquire), 1);
1201 }
1202}