triomphe/
arc.rs

1use alloc::alloc::handle_alloc_error;
2use alloc::boxed::Box;
3use core::alloc::Layout;
4use core::borrow;
5use core::cmp::Ordering;
6use core::convert::From;
7use core::ffi::c_void;
8use core::fmt;
9use core::hash::{Hash, Hasher};
10use core::iter::FromIterator;
11use core::marker::PhantomData;
12use core::mem::{ManuallyDrop, MaybeUninit};
13use core::ops::Deref;
14use core::panic::{RefUnwindSafe, UnwindSafe};
15use core::ptr::{self, NonNull};
16use core::sync::atomic;
17use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
18
19#[cfg(feature = "serde")]
20use serde::{Deserialize, Serialize};
21#[cfg(feature = "stable_deref_trait")]
22use stable_deref_trait::{CloneStableDeref, StableDeref};
23
24use crate::{abort, ArcBorrow, HeaderSlice, OffsetArc, UniqueArc};
25
26/// A soft limit on the amount of references that may be made to an `Arc`.
27///
28/// Going above this limit will abort your program (although not
29/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
30const MAX_REFCOUNT: usize = (isize::MAX) as usize;
31
32/// The object allocated by an `Arc<T>`
33#[repr(C)]
34pub(crate) struct ArcInner<T: ?Sized> {
35    pub(crate) count: atomic::AtomicUsize,
36    pub(crate) data: T,
37}
38
39unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
40unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
41
42impl<T: ?Sized> ArcInner<T> {
43    /// Compute the offset of the `data` field within `ArcInner<T>`.
44    ///
45    /// # Safety
46    ///
47    /// - The pointer must be created from `Arc::into_raw` or similar functions
48    /// - The pointee must be initialized (`&*value` must not be UB).
49    ///   That happens automatically if the pointer comes from `Arc` and type was not changed.
50    ///   This is **not** the case, for example, when `Arc` was uninitialized `MaybeUninit<T>`
51    ///   and the pointer was cast to `*const T`.
52    unsafe fn offset_of_data(value: *const T) -> usize {
53        // We can use `Layout::for_value_raw` when it is stable.
54        let value = &*value;
55
56        let layout = Layout::new::<atomic::AtomicUsize>();
57        let (_, offset) = layout.extend(Layout::for_value(value)).unwrap();
58        offset
59    }
60}
61
62/// An atomically reference counted shared pointer
63///
64/// See the documentation for [`Arc`] in the standard library. Unlike the
65/// standard library `Arc`, this `Arc` does not support weak reference counting.
66///
67/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
68#[repr(transparent)]
69pub struct Arc<T: ?Sized> {
70    pub(crate) p: ptr::NonNull<ArcInner<T>>,
71    pub(crate) phantom: PhantomData<T>,
72}
73
74unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
75unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
76
77impl<T: ?Sized + RefUnwindSafe> UnwindSafe for Arc<T> {}
78
79impl<T> Arc<T> {
80    /// Construct an `Arc<T>`
81    #[inline]
82    pub fn new(data: T) -> Self {
83        let ptr = Box::into_raw(Box::new(ArcInner {
84            count: atomic::AtomicUsize::new(1),
85            data,
86        }));
87
88        unsafe {
89            Arc {
90                p: ptr::NonNull::new_unchecked(ptr),
91                phantom: PhantomData,
92            }
93        }
94    }
95
96    /// Temporarily converts |self| into a bonafide OffsetArc and exposes it to the
97    /// provided callback. The refcount is not modified.
98    #[inline(always)]
99    pub fn with_raw_offset_arc<F, U>(&self, f: F) -> U
100    where
101        F: FnOnce(&OffsetArc<T>) -> U,
102    {
103        // Synthesize transient Arc, which never touches the refcount of the ArcInner.
104        // Store transient in `ManuallyDrop`, to leave the refcount untouched.
105        let transient = unsafe { ManuallyDrop::new(Arc::into_raw_offset(ptr::read(self))) };
106
107        // Expose the transient Arc to the callback, which may clone it if it wants.
108        f(&transient)
109    }
110
111    /// Converts an `Arc` into a `OffsetArc`. This consumes the `Arc`, so the refcount
112    /// is not modified.
113    #[inline]
114    pub fn into_raw_offset(a: Self) -> OffsetArc<T> {
115        unsafe {
116            OffsetArc {
117                ptr: ptr::NonNull::new_unchecked(Arc::into_raw(a) as *mut T),
118                phantom: PhantomData,
119            }
120        }
121    }
122
123    /// Converts a `OffsetArc` into an `Arc`. This consumes the `OffsetArc`, so the refcount
124    /// is not modified.
125    #[inline]
126    pub fn from_raw_offset(a: OffsetArc<T>) -> Self {
127        let a = ManuallyDrop::new(a);
128        let ptr = a.ptr.as_ptr();
129        unsafe { Arc::from_raw(ptr) }
130    }
131
132    /// Returns the inner value, if the [`Arc`] has exactly one strong reference.
133    ///
134    /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was
135    /// passed in.
136    ///
137    /// # Examples
138    ///
139    /// ```
140    /// use triomphe::Arc;
141    ///
142    /// let x = Arc::new(3);
143    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
144    ///
145    /// let x = Arc::new(4);
146    /// let _y = Arc::clone(&x);
147    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
148    /// ```
149    pub fn try_unwrap(this: Self) -> Result<T, Self> {
150        Self::try_unique(this).map(UniqueArc::into_inner)
151    }
152
153    /// Converts the `Arc` to `UniqueArc` if the `Arc` has exactly one strong reference.
154    ///
155    /// Otherwise, `None` is returned and the `Arc` is dropped.
156    ///
157    /// If `Arc::into_unique` is called on every clone of this `Arc`, it is guaranteed that exactly one of the calls
158    /// returns a `UniqueArc`. This means in particular that the inner data is not dropped. This can be useful when
159    /// it is desirable to recover the inner value in a way that does not require coordination amongst the various
160    /// copies of `Arc`.
161    ///
162    /// `Arc::try_unique` is conceptually similar to `Arc::into_unique`, but it is meant for different use-cases. If
163    /// used as a direct replacement for `Arc::into_unique`, such as with the expression `Arc::try_unique(this).ok()`,
164    /// then it does not give the same guarantee as described in the previous paragraph.
165    ///
166    /// For more information, see the examples below and read the documentation of `Arc::try_unique`.
167    ///
168    /// # Examples
169    ///
170    /// ```
171    /// use triomphe::Arc;
172    ///
173    /// let x = Arc::new(3);
174    /// let y = Arc::clone(&x);
175    ///
176    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
177    /// let x_thread = std::thread::spawn(|| Arc::into_unique(x));
178    /// let y_thread = std::thread::spawn(|| Arc::into_unique(y));
179    ///
180    /// let x_unique = x_thread.join().unwrap();
181    /// let y_unique = y_thread.join().unwrap();
182    ///
183    /// // One of the threads is guaranteed to receive the inner value:
184    /// assert!((x_unique.is_some() && y_unique.is_none()) || (x_unique.is_none() && y_unique.is_some()));
185    /// // The result could also be `(None, None)` if the threads called
186    /// // `Arc::try_unique(x).ok()` and `Arc::try_unique(y).ok()` instead.
187    /// ```
188    pub fn into_unique(this: Self) -> Option<UniqueArc<T>> {
189        // Prevent ourselves from being dropped in avoid clashing with the existing drop logic.
190        let this = ManuallyDrop::new(this);
191
192        // Update the reference count by decrementing by one, and if we are the last holder of this `Arc` (previous
193        // value was one), then we know we now can reconstitute this `Arc` into a `UniqueArc`. Otherwise, there's
194        // nothing else for us to do and we return `None` to signal that we weren't the last holder.
195        //
196        // Unlike `drop_inner`, we use AcqRel ordering on `fetch_sub` (instead of `fetch_sub(Release)` followed by
197        // `load(Acquire)`) to end up with the same outcome, just with a single atomic operation instead. This _is_
198        // strictly stronger (in terms of synchronization) but is not materially different: we're simply ensuring that
199        // any subsequent mutation of the data through `UniqueArc` cannot be ordered _before_ the reference count is
200        // updated, which could allow for other threads to see the data in an inconsistent state, ultimately leading to
201        // a data race.
202        if this.inner().count.fetch_sub(1, AcqRel) != 1 {
203            return None;
204        }
205
206        // Update the reference count _back_ to one, which upholds the reference count invariant of `UniqueArc`.
207        //
208        // Since we know we are the only thread accessing this `Arc` at this point, we have no special ordering needs.
209        this.inner().count.store(1, Relaxed);
210
211        // SAFETY: The reference count is guaranteed to be one at this point.
212        Some(unsafe { UniqueArc::from_arc(ManuallyDrop::into_inner(this)) })
213    }
214}
215
216impl<T> Arc<[T]> {
217    /// Reconstruct the `Arc<[T]>` from a raw pointer obtained from `into_raw()`.
218    ///
219    /// [`Arc::from_raw`] should accept unsized types, but this is not trivial to do correctly
220    /// until the feature [`pointer_bytes_offsets`](https://github.com/rust-lang/rust/issues/96283)
221    /// is stabilized. This is stopgap solution for slices.
222    ///
223    ///  # Safety
224    /// - The given pointer must be a valid pointer to `[T]` that came from [`Arc::into_raw`].
225    /// - After `from_raw_slice`, the pointer must not be accessed.
226    pub unsafe fn from_raw_slice(ptr: *const [T]) -> Self {
227        Arc::from_raw(ptr)
228    }
229}
230
231impl<T: ?Sized> Arc<T> {
232    /// Convert the `Arc<T>` to a raw pointer, suitable for use across FFI
233    ///
234    /// Note: This returns a pointer to the data T, which is offset in the allocation.
235    ///
236    /// It is recommended to use OffsetArc for this.
237    #[inline]
238    pub fn into_raw(this: Self) -> *const T {
239        let this = ManuallyDrop::new(this);
240        this.as_ptr()
241    }
242
243    /// Reconstruct the `Arc<T>` from a raw pointer obtained from into_raw()
244    ///
245    /// Note: This raw pointer will be offset in the allocation and must be preceded
246    /// by the atomic count.
247    ///
248    /// It is recommended to use OffsetArc for this
249    ///
250    ///  # Safety
251    /// - The given pointer must be a valid pointer to `T` that came from [`Arc::into_raw`].
252    /// - After `from_raw`, the pointer must not be accessed.
253    #[inline]
254    pub unsafe fn from_raw(ptr: *const T) -> Self {
255        // To find the corresponding pointer to the `ArcInner` we need
256        // to subtract the offset of the `data` field from the pointer.
257
258        // SAFETY: `ptr` comes from `ArcInner.data`, so it must be initialized.
259        let offset_of_data = ArcInner::<T>::offset_of_data(ptr);
260
261        // SAFETY: `from_raw_inner` expects a pointer to the beginning of the allocation,
262        //   not a pointer to data part.
263        //  `ptr` points to `ArcInner.data`, so subtraction results
264        //   in the beginning of the `ArcInner`, which is the beginning of the allocation.
265        let arc_inner_ptr = ptr.byte_sub(offset_of_data);
266        Arc::from_raw_inner(arc_inner_ptr as *mut ArcInner<T>)
267    }
268
269    /// Returns the raw pointer.
270    ///
271    /// Same as into_raw except `self` isn't consumed.
272    #[inline]
273    pub fn as_ptr(&self) -> *const T {
274        // SAFETY: This cannot go through a reference to `data`, because this method
275        // is used to implement `into_raw`. To reconstruct the full `Arc` from this
276        // pointer, it needs to maintain its full provenance, and not be reduced to
277        // just the contained `T`.
278        unsafe { ptr::addr_of_mut!((*self.ptr()).data) }
279    }
280
281    /// Produce a pointer to the data that can be converted back
282    /// to an Arc. This is basically an `&Arc<T>`, without the extra indirection.
283    /// It has the benefits of an `&T` but also knows about the underlying refcount
284    /// and can be converted into more `Arc<T>`s if necessary.
285    #[inline]
286    pub fn borrow_arc(&self) -> ArcBorrow<'_, T> {
287        unsafe { ArcBorrow(NonNull::new_unchecked(self.as_ptr() as *mut T), PhantomData) }
288    }
289
290    /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory
291    /// reporting.
292    pub fn heap_ptr(&self) -> *const c_void {
293        self.p.as_ptr() as *const ArcInner<T> as *const c_void
294    }
295
296    /// The reference count of this `Arc`.
297    ///
298    /// The number does not include borrowed pointers,
299    /// or temporary `Arc` pointers created with functions like
300    /// [`ArcBorrow::with_arc`].
301    ///
302    /// The function is called `strong_count` to mirror `std::sync::Arc::strong_count`,
303    /// however `triomphe::Arc` does not support weak references.
304    #[inline]
305    pub fn strong_count(this: &Self) -> usize {
306        this.inner().count.load(Relaxed)
307    }
308
309    #[inline]
310    pub(super) fn into_raw_inner(this: Self) -> *mut ArcInner<T> {
311        let this = ManuallyDrop::new(this);
312        this.ptr()
313    }
314
315    /// Construct an `Arc` from an allocated `ArcInner`.
316    /// # Safety
317    /// The `ptr` must point to a valid instance, allocated by an `Arc`. The reference could will
318    /// not be modified.
319    pub(super) unsafe fn from_raw_inner(ptr: *mut ArcInner<T>) -> Self {
320        Arc {
321            p: ptr::NonNull::new_unchecked(ptr),
322            phantom: PhantomData,
323        }
324    }
325
326    #[inline]
327    pub(super) fn inner(&self) -> &ArcInner<T> {
328        // This unsafety is ok because while this arc is alive we're guaranteed
329        // that the inner pointer is valid. Furthermore, we know that the
330        // `ArcInner` structure itself is `Sync` because the inner data is
331        // `Sync` as well, so we're ok loaning out an immutable pointer to these
332        // contents.
333        unsafe { &*self.ptr() }
334    }
335
336    // Non-inlined part of `drop`. Just invokes the destructor.
337    #[inline(never)]
338    unsafe fn drop_slow(&mut self) {
339        let _ = Box::from_raw(self.ptr());
340    }
341
342    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
343    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
344    #[inline]
345    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
346        ptr::addr_eq(this.ptr(), other.ptr())
347    }
348
349    pub(crate) fn ptr(&self) -> *mut ArcInner<T> {
350        self.p.as_ptr()
351    }
352
353    /// Allocates an `ArcInner<T>` with sufficient space for
354    /// a possibly-unsized inner value where the value has the layout provided.
355    ///
356    /// The function `mem_to_arcinner` is called with the data pointer
357    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
358    ///
359    /// ## Safety
360    ///
361    /// `mem_to_arcinner` must return the same pointer, the only things that can change are
362    /// - its type
363    /// - its metadata
364    ///
365    /// `value_layout` must be correct for `T`.
366    #[allow(unused_unsafe)]
367    pub(super) unsafe fn allocate_for_layout(
368        value_layout: Layout,
369        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
370    ) -> NonNull<ArcInner<T>> {
371        let layout = Layout::new::<ArcInner<()>>()
372            .extend(value_layout)
373            .unwrap()
374            .0
375            .pad_to_align();
376
377        // Safety: we propagate safety requirements to the caller
378        unsafe {
379            Arc::try_allocate_for_layout(value_layout, mem_to_arcinner)
380                .unwrap_or_else(|_| handle_alloc_error(layout))
381        }
382    }
383
384    /// Allocates an `ArcInner<T>` with sufficient space for
385    /// a possibly-unsized inner value where the value has the layout provided,
386    /// returning an error if allocation fails.
387    ///
388    /// The function `mem_to_arcinner` is called with the data pointer
389    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
390    ///
391    /// ## Safety
392    ///
393    /// `mem_to_arcinner` must return the same pointer, the only things that can change are
394    /// - its type
395    /// - its metadata
396    ///
397    /// `value_layout` must be correct for `T`.
398    #[allow(unused_unsafe)]
399    unsafe fn try_allocate_for_layout(
400        value_layout: Layout,
401        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
402    ) -> Result<NonNull<ArcInner<T>>, ()> {
403        let layout = Layout::new::<ArcInner<()>>()
404            .extend(value_layout)
405            .unwrap()
406            .0
407            .pad_to_align();
408
409        let ptr = NonNull::new(alloc::alloc::alloc(layout)).ok_or(())?;
410
411        // Initialize the ArcInner
412        let inner = mem_to_arcinner(ptr.as_ptr());
413        debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
414
415        unsafe {
416            ptr::write(&mut (*inner).count, atomic::AtomicUsize::new(1));
417        }
418
419        // Safety: `ptr` is checked to be non-null,
420        //         `inner` is the same as `ptr` (per the safety requirements of this function)
421        unsafe { Ok(NonNull::new_unchecked(inner)) }
422    }
423}
424
425impl<H, T> Arc<HeaderSlice<H, [T]>> {
426    pub(super) fn allocate_for_header_and_slice(
427        len: usize,
428    ) -> NonNull<ArcInner<HeaderSlice<H, [T]>>> {
429        let layout = Layout::new::<H>()
430            .extend(Layout::array::<T>(len).unwrap())
431            .unwrap()
432            .0
433            .pad_to_align();
434
435        unsafe {
436            // Safety:
437            // - the provided closure does not change the pointer (except for meta & type)
438            // - the provided layout is valid for `HeaderSlice<H, [T]>`
439            Arc::allocate_for_layout(layout, |mem| {
440                // Synthesize the fat pointer. We do this by claiming we have a direct
441                // pointer to a [T], and then changing the type of the borrow. The key
442                // point here is that the length portion of the fat pointer applies
443                // only to the number of elements in the dynamically-sized portion of
444                // the type, so the value will be the same whether it points to a [T]
445                // or something else with a [T] as its last member.
446                let fake_slice = ptr::slice_from_raw_parts_mut(mem as *mut T, len);
447                fake_slice as *mut ArcInner<HeaderSlice<H, [T]>>
448            })
449        }
450    }
451}
452
453impl<T> Arc<MaybeUninit<T>> {
454    /// Create an Arc contains an `MaybeUninit<T>`.
455    pub fn new_uninit() -> Self {
456        Arc::new(MaybeUninit::<T>::uninit())
457    }
458
459    /// Calls `MaybeUninit::write` on the value contained.
460    ///
461    /// ## Panics
462    ///
463    /// If the `Arc` is not unique.
464    #[deprecated(
465        since = "0.1.7",
466        note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc::write` instead."
467    )]
468    #[track_caller]
469    pub fn write(&mut self, val: T) -> &mut T {
470        UniqueArc::write(must_be_unique(self), val)
471    }
472
473    /// Obtain a mutable pointer to the stored `MaybeUninit<T>`.
474    pub fn as_mut_ptr(&mut self) -> *mut MaybeUninit<T> {
475        unsafe { &mut (*self.ptr()).data }
476    }
477
478    /// # Safety
479    ///
480    /// Must initialize all fields before calling this function.
481    #[inline]
482    pub unsafe fn assume_init(self) -> Arc<T> {
483        Arc::from_raw_inner(ManuallyDrop::new(self).ptr().cast())
484    }
485}
486
487impl<T> Arc<[MaybeUninit<T>]> {
488    /// Create an Arc contains an array `[MaybeUninit<T>]` of `len`.
489    pub fn new_uninit_slice(len: usize) -> Self {
490        UniqueArc::new_uninit_slice(len).shareable()
491    }
492
493    /// Obtain a mutable slice to the stored `[MaybeUninit<T>]`.
494    #[deprecated(
495        since = "0.1.8",
496        note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc` or `get_mut` instead."
497    )]
498    #[track_caller]
499    pub fn as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
500        must_be_unique(self)
501    }
502
503    /// # Safety
504    ///
505    /// Must initialize all fields before calling this function.
506    #[inline]
507    pub unsafe fn assume_init(self) -> Arc<[T]> {
508        Arc::from_raw_inner(ManuallyDrop::new(self).ptr() as _)
509    }
510}
511
512impl<T: ?Sized> Clone for Arc<T> {
513    #[inline]
514    fn clone(&self) -> Self {
515        // Using a relaxed ordering is alright here, as knowledge of the
516        // original reference prevents other threads from erroneously deleting
517        // the object.
518        //
519        // As explained in the [Boost documentation][1], Increasing the
520        // reference counter can always be done with memory_order_relaxed: New
521        // references to an object can only be formed from an existing
522        // reference, and passing an existing reference from one thread to
523        // another must already provide any required synchronization.
524        //
525        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
526        let old_size = self.inner().count.fetch_add(1, Relaxed);
527
528        // However we need to guard against massive refcounts in case someone
529        // is `mem::forget`ing Arcs. If we don't do this the count can overflow
530        // and users will use-after free. We racily saturate to `isize::MAX` on
531        // the assumption that there aren't ~2 billion threads incrementing
532        // the reference count at once. This branch will never be taken in
533        // any realistic program.
534        //
535        // We abort because such a program is incredibly degenerate, and we
536        // don't care to support it.
537        if old_size > MAX_REFCOUNT {
538            abort();
539        }
540
541        unsafe {
542            Arc {
543                p: ptr::NonNull::new_unchecked(self.ptr()),
544                phantom: PhantomData,
545            }
546        }
547    }
548}
549
550impl<T: ?Sized> Deref for Arc<T> {
551    type Target = T;
552
553    #[inline]
554    fn deref(&self) -> &T {
555        &self.inner().data
556    }
557}
558
559impl<T: Clone> Arc<T> {
560    /// Makes a mutable reference to the `Arc`, cloning if necessary
561    ///
562    /// This is functionally equivalent to [`Arc::make_mut`][mm] from the standard library.
563    ///
564    /// If this `Arc` is uniquely owned, `make_mut()` will provide a mutable
565    /// reference to the contents. If not, `make_mut()` will create a _new_ `Arc`
566    /// with a copy of the contents, update `this` to point to it, and provide
567    /// a mutable reference to its contents.
568    ///
569    /// This is useful for implementing copy-on-write schemes where you wish to
570    /// avoid copying things if your `Arc` is not shared.
571    ///
572    /// [mm]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html#method.make_mut
573    #[inline]
574    pub fn make_mut(this: &mut Self) -> &mut T {
575        if !this.is_unique() {
576            // Another pointer exists; clone
577            *this = Arc::new(T::clone(this));
578        }
579
580        unsafe {
581            // This unsafety is ok because we're guaranteed that the pointer
582            // returned is the *only* pointer that will ever be returned to T. Our
583            // reference count is guaranteed to be 1 at this point, and we required
584            // the Arc itself to be `mut`, so we're returning the only possible
585            // reference to the inner data.
586            &mut (*this.ptr()).data
587        }
588    }
589
590    /// Makes a `UniqueArc` from an `Arc`, cloning if necessary.
591    ///
592    /// If this `Arc` is uniquely owned, `make_unique()` will provide a `UniqueArc`
593    /// containing `this`. If not, `make_unique()` will create a _new_ `Arc`
594    /// with a copy of the contents, update `this` to point to it, and provide
595    /// a `UniqueArc` to it.
596    ///
597    /// This is useful for implementing copy-on-write schemes where you wish to
598    /// avoid copying things if your `Arc` is not shared.
599    #[inline]
600    pub fn make_unique(this: &mut Self) -> &mut UniqueArc<T> {
601        if !this.is_unique() {
602            // Another pointer exists; clone
603            *this = Arc::new(T::clone(this));
604        }
605
606        unsafe {
607            // Safety: this is either unique or just created (which is also unique)
608            UniqueArc::from_arc_ref(this)
609        }
610    }
611
612    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the clone.
613    ///
614    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
615    pub fn unwrap_or_clone(this: Arc<T>) -> T {
616        Self::try_unwrap(this).unwrap_or_else(|this| T::clone(&this))
617    }
618}
619
620impl<T: ?Sized> Arc<T> {
621    /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned.
622    #[inline]
623    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
624        if this.is_unique() {
625            unsafe {
626                // See make_mut() for documentation of the threadsafety here.
627                Some(&mut (*this.ptr()).data)
628            }
629        } else {
630            None
631        }
632    }
633
634    /// Provides unique access to the arc _if_ the `Arc` is uniquely owned.
635    pub fn get_unique(this: &mut Self) -> Option<&mut UniqueArc<T>> {
636        Self::try_as_unique(this).ok()
637    }
638
639    /// Whether or not the `Arc` is uniquely owned (is the refcount 1?).
640    pub fn is_unique(&self) -> bool {
641        // See the extensive discussion in [1] for why this needs to be Acquire.
642        //
643        // [1] https://github.com/servo/servo/issues/21186
644        Self::count(self) == 1
645    }
646
647    /// Gets the number of [`Arc`] pointers to this allocation
648    pub fn count(this: &Self) -> usize {
649        this.inner().count.load(Acquire)
650    }
651
652    /// Returns a [`UniqueArc`] if the [`Arc`] has exactly one strong reference.
653    ///
654    /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was
655    /// passed in.
656    ///
657    /// # Examples
658    ///
659    /// ```
660    /// use triomphe::{Arc, UniqueArc};
661    ///
662    /// let x = Arc::new(3);
663    /// assert_eq!(UniqueArc::into_inner(Arc::try_unique(x).unwrap()), 3);
664    ///
665    /// let x = Arc::new(4);
666    /// let _y = Arc::clone(&x);
667    /// assert_eq!(
668    ///     *Arc::try_unique(x).map(UniqueArc::into_inner).unwrap_err(),
669    ///     4,
670    /// );
671    /// ```
672    pub fn try_unique(this: Self) -> Result<UniqueArc<T>, Self> {
673        if this.is_unique() {
674            // Safety: The current arc is unique and making a `UniqueArc`
675            //         from it is sound
676            unsafe { Ok(UniqueArc::from_arc(this)) }
677        } else {
678            Err(this)
679        }
680    }
681
682    pub(crate) fn try_as_unique(this: &mut Self) -> Result<&mut UniqueArc<T>, &mut Self> {
683        if this.is_unique() {
684            // Safety: The current arc is unique and making a `UniqueArc`
685            //         from it is sound
686            unsafe { Ok(UniqueArc::from_arc_ref(this)) }
687        } else {
688            Err(this)
689        }
690    }
691
692    fn drop_inner(&mut self) {
693        // Because `fetch_sub` is already atomic, we do not need to synchronize
694        // with other threads unless we are going to delete the object.
695        if self.inner().count.fetch_sub(1, Release) != 1 {
696            return;
697        }
698
699        // This fence is needed to prevent reordering of use of the data and
700        // deletion of the data. Because it is marked `Release`, the decreasing
701        // of the reference count synchronizes with this `Acquire` fence. This
702        // means that use of the data happens before decreasing the reference
703        // count, which happens before this fence, which happens before the
704        // deletion of the data.
705        //
706        // As explained in the [Boost documentation][1],
707        //
708        // > It is important to enforce any possible access to the object in one
709        // > thread (through an existing reference) to *happen before* deleting
710        // > the object in a different thread. This is achieved by a "release"
711        // > operation after dropping a reference (any access to the object
712        // > through this reference must obviously happened before), and an
713        // > "acquire" operation before deleting the object.
714        //
715        // In particular, while the contents of an Arc are usually immutable, it's
716        // possible to have interior writes to something like a Mutex<T>. Since a
717        // Mutex is not acquired when it is deleted, we can't rely on its
718        // synchronization logic to make writes in thread A visible to a destructor
719        // running in thread B.
720        //
721        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
722        atomic::fence(Acquire);
723
724        unsafe {
725            self.drop_slow();
726        }
727    }
728}
729
730#[cfg(not(feature = "unstable_dropck_eyepatch"))]
731impl<T: ?Sized> Drop for Arc<T> {
732    #[inline]
733    fn drop(&mut self) {
734        self.drop_inner();
735    }
736}
737
738// SAFETY: We do not access the inner `T`, so we are fine to drop Arc with an already dropped T.
739#[cfg(feature = "unstable_dropck_eyepatch")]
740unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
741    #[inline]
742    fn drop(&mut self) {
743        self.drop_inner();
744    }
745}
746
747impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
748    fn eq(&self, other: &Arc<T>) -> bool {
749        // TODO: pointer equality is incorrect if `T` is not `Eq`.
750        Self::ptr_eq(self, other) || *(*self) == *(*other)
751    }
752
753    #[allow(clippy::partialeq_ne_impl)]
754    fn ne(&self, other: &Arc<T>) -> bool {
755        !Self::ptr_eq(self, other) && *(*self) != *(*other)
756    }
757}
758
759impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
760    fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
761        (**self).partial_cmp(&**other)
762    }
763
764    fn lt(&self, other: &Arc<T>) -> bool {
765        *(*self) < *(*other)
766    }
767
768    fn le(&self, other: &Arc<T>) -> bool {
769        *(*self) <= *(*other)
770    }
771
772    fn gt(&self, other: &Arc<T>) -> bool {
773        *(*self) > *(*other)
774    }
775
776    fn ge(&self, other: &Arc<T>) -> bool {
777        *(*self) >= *(*other)
778    }
779}
780
781impl<T: ?Sized + Ord> Ord for Arc<T> {
782    fn cmp(&self, other: &Arc<T>) -> Ordering {
783        (**self).cmp(&**other)
784    }
785}
786
787impl<T: ?Sized + Eq> Eq for Arc<T> {}
788
789impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
790    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
791        fmt::Display::fmt(&**self, f)
792    }
793}
794
795impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
796    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
797        fmt::Debug::fmt(&**self, f)
798    }
799}
800
801impl<T: ?Sized> fmt::Pointer for Arc<T> {
802    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
803        fmt::Pointer::fmt(&self.ptr(), f)
804    }
805}
806
807impl<T: Default> Default for Arc<T> {
808    #[inline]
809    fn default() -> Arc<T> {
810        Arc::new(Default::default())
811    }
812}
813
814impl<T: ?Sized + Hash> Hash for Arc<T> {
815    fn hash<H: Hasher>(&self, state: &mut H) {
816        (**self).hash(state)
817    }
818}
819
820impl<T> From<T> for Arc<T> {
821    #[inline]
822    fn from(t: T) -> Self {
823        Arc::new(t)
824    }
825}
826
827impl<A> FromIterator<A> for Arc<[A]> {
828    fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
829        UniqueArc::from_iter(iter).shareable()
830    }
831}
832
833impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
834    #[inline]
835    fn borrow(&self) -> &T {
836        self
837    }
838}
839
840impl<T: ?Sized> AsRef<T> for Arc<T> {
841    #[inline]
842    fn as_ref(&self) -> &T {
843        self
844    }
845}
846
847#[cfg(feature = "stable_deref_trait")]
848unsafe impl<T: ?Sized> StableDeref for Arc<T> {}
849#[cfg(feature = "stable_deref_trait")]
850unsafe impl<T: ?Sized> CloneStableDeref for Arc<T> {}
851
852#[cfg(feature = "serde")]
853impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> {
854    fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
855    where
856        D: ::serde::de::Deserializer<'de>,
857    {
858        T::deserialize(deserializer).map(Arc::new)
859    }
860}
861
862#[cfg(feature = "serde")]
863impl<T: Serialize> Serialize for Arc<T> {
864    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
865    where
866        S: ::serde::ser::Serializer,
867    {
868        (**self).serialize(serializer)
869    }
870}
871
872// Safety:
873// This implementation must guarantee that it is sound to call replace_ptr with an unsized variant
874// of the pointer retuned in `as_sized_ptr`. The basic property of Unsize coercion is that safety
875// variants and layout is unaffected. The Arc does not rely on any other property of T. This makes
876// any unsized ArcInner valid for being shared with the sized variant.
877// This does _not_ mean that any T can be unsized into an U, but rather than if such unsizing is
878// possible then it can be propagated into the Arc<T>.
879#[cfg(feature = "unsize")]
880unsafe impl<T, U: ?Sized> unsize::CoerciblePtr<U> for Arc<T> {
881    type Pointee = T;
882    type Output = Arc<U>;
883
884    fn as_sized_ptr(&mut self) -> *mut T {
885        // Returns a pointer to the complete inner. The unsizing itself won't care about the
886        // pointer value and promises not to offset it.
887        self.p.as_ptr() as *mut T
888    }
889
890    unsafe fn replace_ptr(self, new: *mut U) -> Arc<U> {
891        // Fix the provenance by ensuring that of `self` is used.
892        let inner = ManuallyDrop::new(self);
893        let p = inner.p.as_ptr() as *mut T;
894        // Safety: This points to an ArcInner of the previous self and holds shared ownership since
895        // the old pointer never decremented the reference count. The caller upholds that `new` is
896        // an unsized version of the previous ArcInner. This assumes that unsizing to the fat
897        // pointer tag of an `ArcInner<U>` and `U` is isomorphic under a direct pointer cast since
898        // in reality we unsized *mut T to *mut U at the address of the ArcInner. This is the case
899        // for all currently envisioned unsized types where the tag of T and ArcInner<T> are simply
900        // the same.
901        Arc::from_raw_inner(p.replace_ptr(new) as *mut ArcInner<U>)
902    }
903}
904
905#[track_caller]
906fn must_be_unique<T: ?Sized>(arc: &mut Arc<T>) -> &mut UniqueArc<T> {
907    match Arc::try_as_unique(arc) {
908        Ok(unique) => unique,
909        Err(this) => panic!("`Arc` must be unique in order for this operation to be safe, there are currently {} copies", Arc::count(this)),
910    }
911}
912
913#[cfg(test)]
914mod tests {
915    use crate::arc::Arc;
916    use alloc::borrow::ToOwned;
917    use alloc::string::String;
918    use alloc::vec::Vec;
919    use core::iter::FromIterator;
920    use core::mem::MaybeUninit;
921    #[cfg(feature = "unsize")]
922    use unsize::{CoerceUnsize, Coercion};
923
924    #[test]
925    fn try_unwrap() {
926        let x = Arc::new(100usize);
927        let y = x.clone();
928
929        // The count should be two so `try_unwrap()` should fail
930        assert_eq!(Arc::count(&x), 2);
931        assert!(Arc::try_unwrap(x).is_err());
932
933        // Since `x` has now been dropped, the count should be 1
934        // and `try_unwrap()` should succeed
935        assert_eq!(Arc::count(&y), 1);
936        assert_eq!(Arc::try_unwrap(y), Ok(100));
937    }
938
939    #[test]
940    #[cfg(feature = "unsize")]
941    fn coerce_to_slice() {
942        let x = Arc::new([0u8; 4]);
943        let y: Arc<[u8]> = x.clone().unsize(Coercion::to_slice());
944        assert_eq!((*x).as_ptr(), (*y).as_ptr());
945    }
946
947    #[test]
948    #[cfg(feature = "unsize")]
949    fn coerce_to_dyn() {
950        let x: Arc<_> = Arc::new(|| 42u32);
951        let x: Arc<_> = x.unsize(Coercion::<_, dyn Fn() -> u32>::to_fn());
952        assert_eq!((*x)(), 42);
953    }
954
955    #[test]
956    #[allow(deprecated)]
957    fn maybeuninit() {
958        let mut arc: Arc<MaybeUninit<_>> = Arc::new_uninit();
959        arc.write(999);
960
961        let arc = unsafe { arc.assume_init() };
962        assert_eq!(*arc, 999);
963    }
964
965    #[test]
966    #[allow(deprecated)]
967    #[should_panic = "`Arc` must be unique in order for this operation to be safe"]
968    fn maybeuninit_ub_to_proceed() {
969        let mut uninit = Arc::new_uninit();
970        let clone = uninit.clone();
971
972        let x: &MaybeUninit<String> = &clone;
973
974        // This write invalidates `x` reference
975        uninit.write(String::from("nonononono"));
976
977        // Read invalidated reference to trigger UB
978        let _read = &*x;
979    }
980
981    #[test]
982    #[allow(deprecated)]
983    #[should_panic = "`Arc` must be unique in order for this operation to be safe"]
984    fn maybeuninit_slice_ub_to_proceed() {
985        let mut uninit = Arc::new_uninit_slice(13);
986        let clone = uninit.clone();
987
988        let x: &[MaybeUninit<String>] = &clone;
989
990        // This write invalidates `x` reference
991        uninit.as_mut_slice()[0].write(String::from("nonononono"));
992
993        // Read invalidated reference to trigger UB
994        let _read = &*x;
995    }
996
997    #[test]
998    fn maybeuninit_array() {
999        let mut arc: Arc<[MaybeUninit<_>]> = Arc::new_uninit_slice(5);
1000        assert!(arc.is_unique());
1001        #[allow(deprecated)]
1002        for (uninit, index) in arc.as_mut_slice().iter_mut().zip(0..5) {
1003            let ptr = uninit.as_mut_ptr();
1004            unsafe { core::ptr::write(ptr, index) };
1005        }
1006
1007        let arc = unsafe { arc.assume_init() };
1008        assert!(arc.is_unique());
1009        // Using clone to that the layout generated in new_uninit_slice is compatible
1010        // with ArcInner.
1011        let arcs = [
1012            arc.clone(),
1013            arc.clone(),
1014            arc.clone(),
1015            arc.clone(),
1016            arc.clone(),
1017        ];
1018        assert_eq!(6, Arc::count(&arc));
1019        // If the layout is not compatible, then the data might be corrupted.
1020        assert_eq!(*arc, [0, 1, 2, 3, 4]);
1021
1022        // Drop the arcs and check the count and the content to
1023        // make sure it isn't corrupted.
1024        drop(arcs);
1025        assert!(arc.is_unique());
1026        assert_eq!(*arc, [0, 1, 2, 3, 4]);
1027    }
1028
1029    #[test]
1030    fn roundtrip() {
1031        let arc: Arc<usize> = Arc::new(0usize);
1032        let ptr = Arc::into_raw(arc);
1033        unsafe {
1034            let _arc = Arc::from_raw(ptr);
1035        }
1036    }
1037
1038    #[test]
1039    fn from_iterator_exact_size() {
1040        let arc = Arc::from_iter(Vec::from_iter(["ololo".to_owned(), "trololo".to_owned()]));
1041        assert_eq!(1, Arc::count(&arc));
1042        assert_eq!(["ololo".to_owned(), "trololo".to_owned()], *arc);
1043    }
1044
1045    #[test]
1046    fn from_iterator_unknown_size() {
1047        let arc = Arc::from_iter(
1048            Vec::from_iter(["ololo".to_owned(), "trololo".to_owned()])
1049                .into_iter()
1050                // Filter is opaque to iterators, so the resulting iterator
1051                // will report lower bound of 0.
1052                .filter(|_| true),
1053        );
1054        assert_eq!(1, Arc::count(&arc));
1055        assert_eq!(["ololo".to_owned(), "trololo".to_owned()], *arc);
1056    }
1057
1058    #[test]
1059    fn roundtrip_slice() {
1060        let arc = Arc::from(Vec::from_iter([17, 19]));
1061        let ptr = Arc::into_raw(arc);
1062        let arc = unsafe { Arc::from_raw_slice(ptr) };
1063        assert_eq!([17, 19], *arc);
1064        assert_eq!(1, Arc::count(&arc));
1065    }
1066
1067    #[test]
1068    fn arc_eq_and_cmp() {
1069        [
1070            [("*", &b"AB"[..]), ("*", &b"ab"[..])],
1071            [("*", &b"AB"[..]), ("*", &b"a"[..])],
1072            [("*", &b"A"[..]), ("*", &b"ab"[..])],
1073            [("A", &b"*"[..]), ("a", &b"*"[..])],
1074            [("a", &b"*"[..]), ("A", &b"*"[..])],
1075            [("AB", &b"*"[..]), ("a", &b"*"[..])],
1076            [("A", &b"*"[..]), ("ab", &b"*"[..])],
1077        ]
1078        .iter()
1079        .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| {
1080            let l = Arc::from_header_and_slice(lh, ls);
1081            let r = Arc::from_header_and_slice(rh, rs);
1082
1083            assert_eq!(l, l);
1084            assert_eq!(r, r);
1085
1086            assert_ne!(l, r);
1087            assert_ne!(r, l);
1088
1089            assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}");
1090            assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}");
1091
1092            assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}");
1093            assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}");
1094
1095            assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}");
1096            assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}");
1097
1098            assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}");
1099            assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}");
1100
1101            assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}");
1102            assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}");
1103        })
1104    }
1105
1106    #[test]
1107    fn arc_eq_and_partial_cmp() {
1108        [
1109            [(0.0, &[0.0, 0.0][..]), (1.0, &[0.0, 0.0][..])],
1110            [(1.0, &[0.0, 0.0][..]), (0.0, &[0.0, 0.0][..])],
1111            [(0.0, &[0.0][..]), (0.0, &[0.0, 0.0][..])],
1112            [(0.0, &[0.0, 0.0][..]), (0.0, &[0.0][..])],
1113            [(0.0, &[1.0, 2.0][..]), (0.0, &[10.0, 20.0][..])],
1114        ]
1115        .iter()
1116        .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| {
1117            let l = Arc::from_header_and_slice(lh, ls);
1118            let r = Arc::from_header_and_slice(rh, rs);
1119
1120            assert_eq!(l, l);
1121            assert_eq!(r, r);
1122
1123            assert_ne!(l, r);
1124            assert_ne!(r, l);
1125
1126            assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}");
1127            assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}");
1128
1129            assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}");
1130            assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}");
1131
1132            assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}");
1133            assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}");
1134
1135            assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}");
1136            assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}");
1137
1138            assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}");
1139            assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}");
1140        })
1141    }
1142
1143    #[test]
1144    fn test_strong_count() {
1145        let arc = Arc::new(17);
1146        assert_eq!(1, Arc::strong_count(&arc));
1147        let arc2 = arc.clone();
1148        assert_eq!(2, Arc::strong_count(&arc));
1149        drop(arc);
1150        assert_eq!(1, Arc::strong_count(&arc2));
1151    }
1152
1153    #[test]
1154    fn test_partial_eq_bug() {
1155        let float = f32::NAN;
1156        assert_ne!(float, float);
1157        let arc = Arc::new(f32::NAN);
1158        // TODO: this is a bug.
1159        assert_eq!(arc, arc);
1160    }
1161
1162    #[test]
1163    fn test_into_raw_from_raw_dst() {
1164        trait AnInteger {
1165            fn get_me_an_integer(&self) -> u64;
1166        }
1167
1168        impl AnInteger for u32 {
1169            fn get_me_an_integer(&self) -> u64 {
1170                *self as u64
1171            }
1172        }
1173
1174        let arc = Arc::<u32>::new(19);
1175        let data = Arc::into_raw(arc);
1176        let data: *const dyn AnInteger = data as *const _;
1177        let arc: Arc<dyn AnInteger> = unsafe { Arc::from_raw(data) };
1178        assert_eq!(19, arc.get_me_an_integer());
1179    }
1180
1181    #[test]
1182    fn into_unique() {
1183        let arc = Arc::new(42);
1184        assert_eq!(1, Arc::count(&arc));
1185
1186        let arc2 = Arc::clone(&arc);
1187
1188        assert_eq!(2, Arc::count(&arc));
1189
1190        let arc2_unique = Arc::into_unique(arc2);
1191        assert!(arc2_unique.is_none());
1192        assert_eq!(1, Arc::count(&arc));
1193
1194        let arc_unique = Arc::into_unique(arc).unwrap();
1195        assert_eq!(42, *arc_unique);
1196    }
1197
1198    #[cfg(feature = "std")]
1199    #[test]
1200    fn into_unique_data_race_no_sleep() {
1201        // Exists to be exercised by Miri to check for data races.
1202        let a = Arc::new(0);
1203        let b = a.clone();
1204        std::thread::spawn(move || {
1205            let _value = *b;
1206        });
1207        std::thread::spawn(move || {
1208            *Arc::into_unique(a).unwrap() += 1;
1209        });
1210    }
1211
1212    #[cfg(feature = "std")]
1213    #[test]
1214    fn into_unique_data_race_sleep() {
1215        // Exists to be exercised by Miri to check for data races.
1216        let a = Arc::new(0);
1217        let b = a.clone();
1218        let t1 = std::thread::spawn(move || {
1219            let _value = *b;
1220        });
1221        let t2 = std::thread::spawn(move || {
1222            std::thread::sleep(std::time::Duration::from_millis(100));
1223            if let Some(mut u) = Arc::into_unique(a) {
1224                *u += 1
1225            }
1226        });
1227        t1.join().unwrap();
1228        t2.join().unwrap();
1229    }
1230
1231    #[allow(dead_code)]
1232    const fn is_partial_ord<T: ?Sized + PartialOrd>() {}
1233
1234    #[allow(dead_code)]
1235    const fn is_ord<T: ?Sized + Ord>() {}
1236
1237    // compile-time check that PartialOrd/Ord is correctly derived
1238    const _: () = is_partial_ord::<Arc<f64>>();
1239    const _: () = is_ord::<Arc<u64>>();
1240}