triomphe/arc.rs
1use alloc::alloc::handle_alloc_error;
2use alloc::boxed::Box;
3use core::alloc::Layout;
4use core::borrow;
5use core::cmp::Ordering;
6use core::convert::From;
7use core::ffi::c_void;
8use core::fmt;
9use core::hash::{Hash, Hasher};
10use core::iter::FromIterator;
11use core::marker::PhantomData;
12use core::mem::{ManuallyDrop, MaybeUninit};
13use core::ops::Deref;
14use core::ptr::{self, NonNull};
15use core::sync::atomic;
16use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
17use core::{isize, usize};
18
19#[cfg(feature = "serde")]
20use serde::{Deserialize, Serialize};
21#[cfg(feature = "stable_deref_trait")]
22use stable_deref_trait::{CloneStableDeref, StableDeref};
23
24use crate::{abort, ArcBorrow, HeaderSlice, OffsetArc, UniqueArc};
25
26/// A soft limit on the amount of references that may be made to an `Arc`.
27///
28/// Going above this limit will abort your program (although not
29/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
30const MAX_REFCOUNT: usize = (isize::MAX) as usize;
31
32/// The object allocated by an `Arc<T>`
33#[repr(C)]
34pub(crate) struct ArcInner<T: ?Sized> {
35 pub(crate) count: atomic::AtomicUsize,
36 pub(crate) data: T,
37}
38
39unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
40unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
41
42impl<T: ?Sized> ArcInner<T> {
43 /// Compute the offset of the `data` field within `ArcInner<T>`.
44 ///
45 /// # Safety
46 ///
47 /// - The pointer must be created from `Arc::into_raw` or similar functions
48 /// - The pointee must be initialized (`&*value` must not be UB).
49 /// That happens automatically if the pointer comes from `Arc` and type was not changed.
50 /// This is **not** the case, for example, when `Arc` was uninitialized `MaybeUninit<T>`
51 /// and the pointer was cast to `*const T`.
52 unsafe fn offset_of_data(value: *const T) -> usize {
53 // We can use `Layout::for_value_raw` when it is stable.
54 let value = &*value;
55
56 let layout = Layout::new::<atomic::AtomicUsize>();
57 let (_, offset) = layout.extend(Layout::for_value(value)).unwrap();
58 offset
59 }
60}
61
62/// An atomically reference counted shared pointer
63///
64/// See the documentation for [`Arc`] in the standard library. Unlike the
65/// standard library `Arc`, this `Arc` does not support weak reference counting.
66///
67/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
68#[repr(transparent)]
69pub struct Arc<T: ?Sized> {
70 pub(crate) p: ptr::NonNull<ArcInner<T>>,
71 pub(crate) phantom: PhantomData<T>,
72}
73
74unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
75unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
76
77impl<T> Arc<T> {
78 /// Construct an `Arc<T>`
79 #[inline]
80 pub fn new(data: T) -> Self {
81 let ptr = Box::into_raw(Box::new(ArcInner {
82 count: atomic::AtomicUsize::new(1),
83 data,
84 }));
85
86 unsafe {
87 Arc {
88 p: ptr::NonNull::new_unchecked(ptr),
89 phantom: PhantomData,
90 }
91 }
92 }
93
94 /// Temporarily converts |self| into a bonafide OffsetArc and exposes it to the
95 /// provided callback. The refcount is not modified.
96 #[inline(always)]
97 pub fn with_raw_offset_arc<F, U>(&self, f: F) -> U
98 where
99 F: FnOnce(&OffsetArc<T>) -> U,
100 {
101 // Synthesize transient Arc, which never touches the refcount of the ArcInner.
102 // Store transient in `ManuallyDrop`, to leave the refcount untouched.
103 let transient = unsafe { ManuallyDrop::new(Arc::into_raw_offset(ptr::read(self))) };
104
105 // Expose the transient Arc to the callback, which may clone it if it wants.
106 f(&transient)
107 }
108
109 /// Converts an `Arc` into a `OffsetArc`. This consumes the `Arc`, so the refcount
110 /// is not modified.
111 #[inline]
112 pub fn into_raw_offset(a: Self) -> OffsetArc<T> {
113 unsafe {
114 OffsetArc {
115 ptr: ptr::NonNull::new_unchecked(Arc::into_raw(a) as *mut T),
116 phantom: PhantomData,
117 }
118 }
119 }
120
121 /// Converts a `OffsetArc` into an `Arc`. This consumes the `OffsetArc`, so the refcount
122 /// is not modified.
123 #[inline]
124 pub fn from_raw_offset(a: OffsetArc<T>) -> Self {
125 let a = ManuallyDrop::new(a);
126 let ptr = a.ptr.as_ptr();
127 unsafe { Arc::from_raw(ptr) }
128 }
129
130 /// Returns the inner value, if the [`Arc`] has exactly one strong reference.
131 ///
132 /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was
133 /// passed in.
134 ///
135 /// # Examples
136 ///
137 /// ```
138 /// use triomphe::Arc;
139 ///
140 /// let x = Arc::new(3);
141 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
142 ///
143 /// let x = Arc::new(4);
144 /// let _y = Arc::clone(&x);
145 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
146 /// ```
147 pub fn try_unwrap(this: Self) -> Result<T, Self> {
148 Self::try_unique(this).map(UniqueArc::into_inner)
149 }
150}
151
152impl<T> Arc<[T]> {
153 /// Reconstruct the `Arc<[T]>` from a raw pointer obtained from `into_raw()`.
154 ///
155 /// [`Arc::from_raw`] should accept unsized types, but this is not trivial to do correctly
156 /// until the feature [`pointer_bytes_offsets`](https://github.com/rust-lang/rust/issues/96283)
157 /// is stabilized. This is stopgap solution for slices.
158 ///
159 /// # Safety
160 /// - The given pointer must be a valid pointer to `[T]` that came from [`Arc::into_raw`].
161 /// - After `from_raw_slice`, the pointer must not be accessed.
162 pub unsafe fn from_raw_slice(ptr: *const [T]) -> Self {
163 Arc::from_raw(ptr)
164 }
165}
166
167impl<T: ?Sized> Arc<T> {
168 /// Convert the `Arc<T>` to a raw pointer, suitable for use across FFI
169 ///
170 /// Note: This returns a pointer to the data T, which is offset in the allocation.
171 ///
172 /// It is recommended to use OffsetArc for this.
173 #[inline]
174 pub fn into_raw(this: Self) -> *const T {
175 let this = ManuallyDrop::new(this);
176 this.as_ptr()
177 }
178
179 /// Reconstruct the `Arc<T>` from a raw pointer obtained from into_raw()
180 ///
181 /// Note: This raw pointer will be offset in the allocation and must be preceded
182 /// by the atomic count.
183 ///
184 /// It is recommended to use OffsetArc for this
185 ///
186 /// # Safety
187 /// - The given pointer must be a valid pointer to `T` that came from [`Arc::into_raw`].
188 /// - After `from_raw`, the pointer must not be accessed.
189 #[inline]
190 pub unsafe fn from_raw(ptr: *const T) -> Self {
191 // To find the corresponding pointer to the `ArcInner` we need
192 // to subtract the offset of the `data` field from the pointer.
193
194 // SAFETY: `ptr` comes from `ArcInner.data`, so it must be initialized.
195 let offset_of_data = ArcInner::<T>::offset_of_data(ptr);
196
197 // SAFETY: `from_raw_inner` expects a pointer to the beginning of the allocation,
198 // not a pointer to data part.
199 // `ptr` points to `ArcInner.data`, so subtraction results
200 // in the beginning of the `ArcInner`, which is the beginning of the allocation.
201 let arc_inner_ptr = ptr.byte_sub(offset_of_data);
202 Arc::from_raw_inner(arc_inner_ptr as *mut ArcInner<T>)
203 }
204
205 /// Returns the raw pointer.
206 ///
207 /// Same as into_raw except `self` isn't consumed.
208 #[inline]
209 pub fn as_ptr(&self) -> *const T {
210 // SAFETY: This cannot go through a reference to `data`, because this method
211 // is used to implement `into_raw`. To reconstruct the full `Arc` from this
212 // pointer, it needs to maintain its full provenance, and not be reduced to
213 // just the contained `T`.
214 unsafe { ptr::addr_of_mut!((*self.ptr()).data) }
215 }
216
217 /// Produce a pointer to the data that can be converted back
218 /// to an Arc. This is basically an `&Arc<T>`, without the extra indirection.
219 /// It has the benefits of an `&T` but also knows about the underlying refcount
220 /// and can be converted into more `Arc<T>`s if necessary.
221 #[inline]
222 pub fn borrow_arc(&self) -> ArcBorrow<'_, T> {
223 unsafe { ArcBorrow(NonNull::new_unchecked(self.as_ptr() as *mut T), PhantomData) }
224 }
225
226 /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory
227 /// reporting.
228 pub fn heap_ptr(&self) -> *const c_void {
229 self.p.as_ptr() as *const ArcInner<T> as *const c_void
230 }
231
232 /// The reference count of this `Arc`.
233 ///
234 /// The number does not include borrowed pointers,
235 /// or temporary `Arc` pointers created with functions like
236 /// [`ArcBorrow::with_arc`].
237 ///
238 /// The function is called `strong_count` to mirror `std::sync::Arc::strong_count`,
239 /// however `triomphe::Arc` does not support weak references.
240 #[inline]
241 pub fn strong_count(this: &Self) -> usize {
242 this.inner().count.load(Relaxed)
243 }
244
245 #[inline]
246 pub(super) fn into_raw_inner(this: Self) -> *mut ArcInner<T> {
247 let this = ManuallyDrop::new(this);
248 this.ptr()
249 }
250
251 /// Construct an `Arc` from an allocated `ArcInner`.
252 /// # Safety
253 /// The `ptr` must point to a valid instance, allocated by an `Arc`. The reference could will
254 /// not be modified.
255 pub(super) unsafe fn from_raw_inner(ptr: *mut ArcInner<T>) -> Self {
256 Arc {
257 p: ptr::NonNull::new_unchecked(ptr),
258 phantom: PhantomData,
259 }
260 }
261
262 #[inline]
263 pub(super) fn inner(&self) -> &ArcInner<T> {
264 // This unsafety is ok because while this arc is alive we're guaranteed
265 // that the inner pointer is valid. Furthermore, we know that the
266 // `ArcInner` structure itself is `Sync` because the inner data is
267 // `Sync` as well, so we're ok loaning out an immutable pointer to these
268 // contents.
269 unsafe { &*self.ptr() }
270 }
271
272 // Non-inlined part of `drop`. Just invokes the destructor.
273 #[inline(never)]
274 unsafe fn drop_slow(&mut self) {
275 let _ = Box::from_raw(self.ptr());
276 }
277
278 /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
279 /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers.
280 #[inline]
281 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
282 ptr::addr_eq(this.ptr(), other.ptr())
283 }
284
285 pub(crate) fn ptr(&self) -> *mut ArcInner<T> {
286 self.p.as_ptr()
287 }
288
289 /// Allocates an `ArcInner<T>` with sufficient space for
290 /// a possibly-unsized inner value where the value has the layout provided.
291 ///
292 /// The function `mem_to_arcinner` is called with the data pointer
293 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
294 ///
295 /// ## Safety
296 ///
297 /// `mem_to_arcinner` must return the same pointer, the only things that can change are
298 /// - its type
299 /// - its metadata
300 ///
301 /// `value_layout` must be correct for `T`.
302 #[allow(unused_unsafe)]
303 pub(super) unsafe fn allocate_for_layout(
304 value_layout: Layout,
305 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
306 ) -> NonNull<ArcInner<T>> {
307 let layout = Layout::new::<ArcInner<()>>()
308 .extend(value_layout)
309 .unwrap()
310 .0
311 .pad_to_align();
312
313 // Safety: we propagate safety requirements to the caller
314 unsafe {
315 Arc::try_allocate_for_layout(value_layout, mem_to_arcinner)
316 .unwrap_or_else(|_| handle_alloc_error(layout))
317 }
318 }
319
320 /// Allocates an `ArcInner<T>` with sufficient space for
321 /// a possibly-unsized inner value where the value has the layout provided,
322 /// returning an error if allocation fails.
323 ///
324 /// The function `mem_to_arcinner` is called with the data pointer
325 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
326 ///
327 /// ## Safety
328 ///
329 /// `mem_to_arcinner` must return the same pointer, the only things that can change are
330 /// - its type
331 /// - its metadata
332 ///
333 /// `value_layout` must be correct for `T`.
334 #[allow(unused_unsafe)]
335 unsafe fn try_allocate_for_layout(
336 value_layout: Layout,
337 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
338 ) -> Result<NonNull<ArcInner<T>>, ()> {
339 let layout = Layout::new::<ArcInner<()>>()
340 .extend(value_layout)
341 .unwrap()
342 .0
343 .pad_to_align();
344
345 let ptr = NonNull::new(alloc::alloc::alloc(layout)).ok_or(())?;
346
347 // Initialize the ArcInner
348 let inner = mem_to_arcinner(ptr.as_ptr());
349 debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
350
351 unsafe {
352 ptr::write(&mut (*inner).count, atomic::AtomicUsize::new(1));
353 }
354
355 // Safety: `ptr` is checked to be non-null,
356 // `inner` is the same as `ptr` (per the safety requirements of this function)
357 unsafe { Ok(NonNull::new_unchecked(inner)) }
358 }
359}
360
361impl<H, T> Arc<HeaderSlice<H, [T]>> {
362 pub(super) fn allocate_for_header_and_slice(
363 len: usize,
364 ) -> NonNull<ArcInner<HeaderSlice<H, [T]>>> {
365 let layout = Layout::new::<H>()
366 .extend(Layout::array::<T>(len).unwrap())
367 .unwrap()
368 .0
369 .pad_to_align();
370
371 unsafe {
372 // Safety:
373 // - the provided closure does not change the pointer (except for meta & type)
374 // - the provided layout is valid for `HeaderSlice<H, [T]>`
375 Arc::allocate_for_layout(layout, |mem| {
376 // Synthesize the fat pointer. We do this by claiming we have a direct
377 // pointer to a [T], and then changing the type of the borrow. The key
378 // point here is that the length portion of the fat pointer applies
379 // only to the number of elements in the dynamically-sized portion of
380 // the type, so the value will be the same whether it points to a [T]
381 // or something else with a [T] as its last member.
382 let fake_slice = ptr::slice_from_raw_parts_mut(mem as *mut T, len);
383 fake_slice as *mut ArcInner<HeaderSlice<H, [T]>>
384 })
385 }
386 }
387}
388
389impl<T> Arc<MaybeUninit<T>> {
390 /// Create an Arc contains an `MaybeUninit<T>`.
391 pub fn new_uninit() -> Self {
392 Arc::new(MaybeUninit::<T>::uninit())
393 }
394
395 /// Calls `MaybeUninit::write` on the value contained.
396 ///
397 /// ## Panics
398 ///
399 /// If the `Arc` is not unique.
400 #[deprecated(
401 since = "0.1.7",
402 note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc::write` instead."
403 )]
404 #[track_caller]
405 pub fn write(&mut self, val: T) -> &mut T {
406 UniqueArc::write(must_be_unique(self), val)
407 }
408
409 /// Obtain a mutable pointer to the stored `MaybeUninit<T>`.
410 pub fn as_mut_ptr(&mut self) -> *mut MaybeUninit<T> {
411 unsafe { &mut (*self.ptr()).data }
412 }
413
414 /// # Safety
415 ///
416 /// Must initialize all fields before calling this function.
417 #[inline]
418 pub unsafe fn assume_init(self) -> Arc<T> {
419 Arc::from_raw_inner(ManuallyDrop::new(self).ptr().cast())
420 }
421}
422
423impl<T> Arc<[MaybeUninit<T>]> {
424 /// Create an Arc contains an array `[MaybeUninit<T>]` of `len`.
425 pub fn new_uninit_slice(len: usize) -> Self {
426 UniqueArc::new_uninit_slice(len).shareable()
427 }
428
429 /// Obtain a mutable slice to the stored `[MaybeUninit<T>]`.
430 #[deprecated(
431 since = "0.1.8",
432 note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc` or `get_mut` instead."
433 )]
434 #[track_caller]
435 pub fn as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
436 must_be_unique(self)
437 }
438
439 /// # Safety
440 ///
441 /// Must initialize all fields before calling this function.
442 #[inline]
443 pub unsafe fn assume_init(self) -> Arc<[T]> {
444 Arc::from_raw_inner(ManuallyDrop::new(self).ptr() as _)
445 }
446}
447
448impl<T: ?Sized> Clone for Arc<T> {
449 #[inline]
450 fn clone(&self) -> Self {
451 // Using a relaxed ordering is alright here, as knowledge of the
452 // original reference prevents other threads from erroneously deleting
453 // the object.
454 //
455 // As explained in the [Boost documentation][1], Increasing the
456 // reference counter can always be done with memory_order_relaxed: New
457 // references to an object can only be formed from an existing
458 // reference, and passing an existing reference from one thread to
459 // another must already provide any required synchronization.
460 //
461 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
462 let old_size = self.inner().count.fetch_add(1, Relaxed);
463
464 // However we need to guard against massive refcounts in case someone
465 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
466 // and users will use-after free. We racily saturate to `isize::MAX` on
467 // the assumption that there aren't ~2 billion threads incrementing
468 // the reference count at once. This branch will never be taken in
469 // any realistic program.
470 //
471 // We abort because such a program is incredibly degenerate, and we
472 // don't care to support it.
473 if old_size > MAX_REFCOUNT {
474 abort();
475 }
476
477 unsafe {
478 Arc {
479 p: ptr::NonNull::new_unchecked(self.ptr()),
480 phantom: PhantomData,
481 }
482 }
483 }
484}
485
486impl<T: ?Sized> Deref for Arc<T> {
487 type Target = T;
488
489 #[inline]
490 fn deref(&self) -> &T {
491 &self.inner().data
492 }
493}
494
495impl<T: Clone> Arc<T> {
496 /// Makes a mutable reference to the `Arc`, cloning if necessary
497 ///
498 /// This is functionally equivalent to [`Arc::make_mut`][mm] from the standard library.
499 ///
500 /// If this `Arc` is uniquely owned, `make_mut()` will provide a mutable
501 /// reference to the contents. If not, `make_mut()` will create a _new_ `Arc`
502 /// with a copy of the contents, update `this` to point to it, and provide
503 /// a mutable reference to its contents.
504 ///
505 /// This is useful for implementing copy-on-write schemes where you wish to
506 /// avoid copying things if your `Arc` is not shared.
507 ///
508 /// [mm]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html#method.make_mut
509 #[inline]
510 pub fn make_mut(this: &mut Self) -> &mut T {
511 if !this.is_unique() {
512 // Another pointer exists; clone
513 *this = Arc::new(T::clone(this));
514 }
515
516 unsafe {
517 // This unsafety is ok because we're guaranteed that the pointer
518 // returned is the *only* pointer that will ever be returned to T. Our
519 // reference count is guaranteed to be 1 at this point, and we required
520 // the Arc itself to be `mut`, so we're returning the only possible
521 // reference to the inner data.
522 &mut (*this.ptr()).data
523 }
524 }
525
526 /// Makes a `UniqueArc` from an `Arc`, cloning if necessary.
527 ///
528 /// If this `Arc` is uniquely owned, `make_unique()` will provide a `UniqueArc`
529 /// containing `this`. If not, `make_unique()` will create a _new_ `Arc`
530 /// with a copy of the contents, update `this` to point to it, and provide
531 /// a `UniqueArc` to it.
532 ///
533 /// This is useful for implementing copy-on-write schemes where you wish to
534 /// avoid copying things if your `Arc` is not shared.
535 #[inline]
536 pub fn make_unique(this: &mut Self) -> &mut UniqueArc<T> {
537 if !this.is_unique() {
538 // Another pointer exists; clone
539 *this = Arc::new(T::clone(this));
540 }
541
542 unsafe {
543 // Safety: this is either unique or just created (which is also unique)
544 UniqueArc::from_arc_ref(this)
545 }
546 }
547
548 /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the clone.
549 ///
550 /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
551 pub fn unwrap_or_clone(this: Arc<T>) -> T {
552 Self::try_unwrap(this).unwrap_or_else(|this| T::clone(&this))
553 }
554}
555
556impl<T: ?Sized> Arc<T> {
557 /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned.
558 #[inline]
559 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
560 if this.is_unique() {
561 unsafe {
562 // See make_mut() for documentation of the threadsafety here.
563 Some(&mut (*this.ptr()).data)
564 }
565 } else {
566 None
567 }
568 }
569
570 /// Provides unique access to the arc _if_ the `Arc` is uniquely owned.
571 pub fn get_unique(this: &mut Self) -> Option<&mut UniqueArc<T>> {
572 Self::try_as_unique(this).ok()
573 }
574
575 /// Whether or not the `Arc` is uniquely owned (is the refcount 1?).
576 pub fn is_unique(&self) -> bool {
577 // See the extensive discussion in [1] for why this needs to be Acquire.
578 //
579 // [1] https://github.com/servo/servo/issues/21186
580 Self::count(self) == 1
581 }
582
583 /// Gets the number of [`Arc`] pointers to this allocation
584 pub fn count(this: &Self) -> usize {
585 this.inner().count.load(Acquire)
586 }
587
588 /// Returns a [`UniqueArc`] if the [`Arc`] has exactly one strong reference.
589 ///
590 /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was
591 /// passed in.
592 ///
593 /// # Examples
594 ///
595 /// ```
596 /// use triomphe::{Arc, UniqueArc};
597 ///
598 /// let x = Arc::new(3);
599 /// assert_eq!(UniqueArc::into_inner(Arc::try_unique(x).unwrap()), 3);
600 ///
601 /// let x = Arc::new(4);
602 /// let _y = Arc::clone(&x);
603 /// assert_eq!(
604 /// *Arc::try_unique(x).map(UniqueArc::into_inner).unwrap_err(),
605 /// 4,
606 /// );
607 /// ```
608 pub fn try_unique(this: Self) -> Result<UniqueArc<T>, Self> {
609 if this.is_unique() {
610 // Safety: The current arc is unique and making a `UniqueArc`
611 // from it is sound
612 unsafe { Ok(UniqueArc::from_arc(this)) }
613 } else {
614 Err(this)
615 }
616 }
617
618 pub(crate) fn try_as_unique(this: &mut Self) -> Result<&mut UniqueArc<T>, &mut Self> {
619 if this.is_unique() {
620 // Safety: The current arc is unique and making a `UniqueArc`
621 // from it is sound
622 unsafe { Ok(UniqueArc::from_arc_ref(this)) }
623 } else {
624 Err(this)
625 }
626 }
627}
628
629impl<T: ?Sized> Drop for Arc<T> {
630 #[inline]
631 fn drop(&mut self) {
632 // Because `fetch_sub` is already atomic, we do not need to synchronize
633 // with other threads unless we are going to delete the object.
634 if self.inner().count.fetch_sub(1, Release) != 1 {
635 return;
636 }
637
638 // FIXME(bholley): Use the updated comment when [2] is merged.
639 //
640 // This load is needed to prevent reordering of use of the data and
641 // deletion of the data. Because it is marked `Release`, the decreasing
642 // of the reference count synchronizes with this `Acquire` load. This
643 // means that use of the data happens before decreasing the reference
644 // count, which happens before this load, which happens before the
645 // deletion of the data.
646 //
647 // As explained in the [Boost documentation][1],
648 //
649 // > It is important to enforce any possible access to the object in one
650 // > thread (through an existing reference) to *happen before* deleting
651 // > the object in a different thread. This is achieved by a "release"
652 // > operation after dropping a reference (any access to the object
653 // > through this reference must obviously happened before), and an
654 // > "acquire" operation before deleting the object.
655 //
656 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
657 // [2]: https://github.com/rust-lang/rust/pull/41714
658 self.inner().count.load(Acquire);
659
660 unsafe {
661 self.drop_slow();
662 }
663 }
664}
665
666impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
667 fn eq(&self, other: &Arc<T>) -> bool {
668 // TODO: pointer equality is incorrect if `T` is not `Eq`.
669 Self::ptr_eq(self, other) || *(*self) == *(*other)
670 }
671
672 #[allow(clippy::partialeq_ne_impl)]
673 fn ne(&self, other: &Arc<T>) -> bool {
674 !Self::ptr_eq(self, other) && *(*self) != *(*other)
675 }
676}
677
678impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
679 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
680 (**self).partial_cmp(&**other)
681 }
682
683 fn lt(&self, other: &Arc<T>) -> bool {
684 *(*self) < *(*other)
685 }
686
687 fn le(&self, other: &Arc<T>) -> bool {
688 *(*self) <= *(*other)
689 }
690
691 fn gt(&self, other: &Arc<T>) -> bool {
692 *(*self) > *(*other)
693 }
694
695 fn ge(&self, other: &Arc<T>) -> bool {
696 *(*self) >= *(*other)
697 }
698}
699
700impl<T: ?Sized + Ord> Ord for Arc<T> {
701 fn cmp(&self, other: &Arc<T>) -> Ordering {
702 (**self).cmp(&**other)
703 }
704}
705
706impl<T: ?Sized + Eq> Eq for Arc<T> {}
707
708impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
709 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
710 fmt::Display::fmt(&**self, f)
711 }
712}
713
714impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
715 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
716 fmt::Debug::fmt(&**self, f)
717 }
718}
719
720impl<T: ?Sized> fmt::Pointer for Arc<T> {
721 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
722 fmt::Pointer::fmt(&self.ptr(), f)
723 }
724}
725
726impl<T: Default> Default for Arc<T> {
727 #[inline]
728 fn default() -> Arc<T> {
729 Arc::new(Default::default())
730 }
731}
732
733impl<T: ?Sized + Hash> Hash for Arc<T> {
734 fn hash<H: Hasher>(&self, state: &mut H) {
735 (**self).hash(state)
736 }
737}
738
739impl<T> From<T> for Arc<T> {
740 #[inline]
741 fn from(t: T) -> Self {
742 Arc::new(t)
743 }
744}
745
746impl<A> FromIterator<A> for Arc<[A]> {
747 fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
748 UniqueArc::from_iter(iter).shareable()
749 }
750}
751
752impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
753 #[inline]
754 fn borrow(&self) -> &T {
755 self
756 }
757}
758
759impl<T: ?Sized> AsRef<T> for Arc<T> {
760 #[inline]
761 fn as_ref(&self) -> &T {
762 self
763 }
764}
765
766#[cfg(feature = "stable_deref_trait")]
767unsafe impl<T: ?Sized> StableDeref for Arc<T> {}
768#[cfg(feature = "stable_deref_trait")]
769unsafe impl<T: ?Sized> CloneStableDeref for Arc<T> {}
770
771#[cfg(feature = "serde")]
772impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> {
773 fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
774 where
775 D: ::serde::de::Deserializer<'de>,
776 {
777 T::deserialize(deserializer).map(Arc::new)
778 }
779}
780
781#[cfg(feature = "serde")]
782impl<T: Serialize> Serialize for Arc<T> {
783 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
784 where
785 S: ::serde::ser::Serializer,
786 {
787 (**self).serialize(serializer)
788 }
789}
790
791// Safety:
792// This implementation must guarantee that it is sound to call replace_ptr with an unsized variant
793// of the pointer retuned in `as_sized_ptr`. The basic property of Unsize coercion is that safety
794// variants and layout is unaffected. The Arc does not rely on any other property of T. This makes
795// any unsized ArcInner valid for being shared with the sized variant.
796// This does _not_ mean that any T can be unsized into an U, but rather than if such unsizing is
797// possible then it can be propagated into the Arc<T>.
798#[cfg(feature = "unsize")]
799unsafe impl<T, U: ?Sized> unsize::CoerciblePtr<U> for Arc<T> {
800 type Pointee = T;
801 type Output = Arc<U>;
802
803 fn as_sized_ptr(&mut self) -> *mut T {
804 // Returns a pointer to the complete inner. The unsizing itself won't care about the
805 // pointer value and promises not to offset it.
806 self.p.as_ptr() as *mut T
807 }
808
809 unsafe fn replace_ptr(self, new: *mut U) -> Arc<U> {
810 // Fix the provenance by ensuring that of `self` is used.
811 let inner = ManuallyDrop::new(self);
812 let p = inner.p.as_ptr() as *mut T;
813 // Safety: This points to an ArcInner of the previous self and holds shared ownership since
814 // the old pointer never decremented the reference count. The caller upholds that `new` is
815 // an unsized version of the previous ArcInner. This assumes that unsizing to the fat
816 // pointer tag of an `ArcInner<U>` and `U` is isomorphic under a direct pointer cast since
817 // in reality we unsized *mut T to *mut U at the address of the ArcInner. This is the case
818 // for all currently envisioned unsized types where the tag of T and ArcInner<T> are simply
819 // the same.
820 Arc::from_raw_inner(p.replace_ptr(new) as *mut ArcInner<U>)
821 }
822}
823
824#[track_caller]
825fn must_be_unique<T: ?Sized>(arc: &mut Arc<T>) -> &mut UniqueArc<T> {
826 match Arc::try_as_unique(arc) {
827 Ok(unique) => unique,
828 Err(this) => panic!("`Arc` must be unique in order for this operation to be safe, there are currently {} copies", Arc::count(this)),
829 }
830}
831
832#[cfg(test)]
833mod tests {
834 use crate::arc::Arc;
835 use alloc::borrow::ToOwned;
836 use alloc::string::String;
837 use alloc::vec::Vec;
838 use core::iter::FromIterator;
839 use core::mem::MaybeUninit;
840 #[cfg(feature = "unsize")]
841 use unsize::{CoerceUnsize, Coercion};
842
843 #[test]
844 fn try_unwrap() {
845 let x = Arc::new(100usize);
846 let y = x.clone();
847
848 // The count should be two so `try_unwrap()` should fail
849 assert_eq!(Arc::count(&x), 2);
850 assert!(Arc::try_unwrap(x).is_err());
851
852 // Since `x` has now been dropped, the count should be 1
853 // and `try_unwrap()` should succeed
854 assert_eq!(Arc::count(&y), 1);
855 assert_eq!(Arc::try_unwrap(y), Ok(100));
856 }
857
858 #[test]
859 #[cfg(feature = "unsize")]
860 fn coerce_to_slice() {
861 let x = Arc::new([0u8; 4]);
862 let y: Arc<[u8]> = x.clone().unsize(Coercion::to_slice());
863 assert_eq!((*x).as_ptr(), (*y).as_ptr());
864 }
865
866 #[test]
867 #[cfg(feature = "unsize")]
868 fn coerce_to_dyn() {
869 let x: Arc<_> = Arc::new(|| 42u32);
870 let x: Arc<_> = x.unsize(Coercion::<_, dyn Fn() -> u32>::to_fn());
871 assert_eq!((*x)(), 42);
872 }
873
874 #[test]
875 #[allow(deprecated)]
876 fn maybeuninit() {
877 let mut arc: Arc<MaybeUninit<_>> = Arc::new_uninit();
878 arc.write(999);
879
880 let arc = unsafe { arc.assume_init() };
881 assert_eq!(*arc, 999);
882 }
883
884 #[test]
885 #[allow(deprecated)]
886 #[should_panic = "`Arc` must be unique in order for this operation to be safe"]
887 fn maybeuninit_ub_to_proceed() {
888 let mut uninit = Arc::new_uninit();
889 let clone = uninit.clone();
890
891 let x: &MaybeUninit<String> = &*clone;
892
893 // This write invalidates `x` reference
894 uninit.write(String::from("nonononono"));
895
896 // Read invalidated reference to trigger UB
897 let _ = &*x;
898 }
899
900 #[test]
901 #[allow(deprecated)]
902 #[should_panic = "`Arc` must be unique in order for this operation to be safe"]
903 fn maybeuninit_slice_ub_to_proceed() {
904 let mut uninit = Arc::new_uninit_slice(13);
905 let clone = uninit.clone();
906
907 let x: &[MaybeUninit<String>] = &*clone;
908
909 // This write invalidates `x` reference
910 uninit.as_mut_slice()[0].write(String::from("nonononono"));
911
912 // Read invalidated reference to trigger UB
913 let _ = &*x;
914 }
915
916 #[test]
917 fn maybeuninit_array() {
918 let mut arc: Arc<[MaybeUninit<_>]> = Arc::new_uninit_slice(5);
919 assert!(arc.is_unique());
920 #[allow(deprecated)]
921 for (uninit, index) in arc.as_mut_slice().iter_mut().zip(0..5) {
922 let ptr = uninit.as_mut_ptr();
923 unsafe { core::ptr::write(ptr, index) };
924 }
925
926 let arc = unsafe { arc.assume_init() };
927 assert!(arc.is_unique());
928 // Using clone to that the layout generated in new_uninit_slice is compatible
929 // with ArcInner.
930 let arcs = [
931 arc.clone(),
932 arc.clone(),
933 arc.clone(),
934 arc.clone(),
935 arc.clone(),
936 ];
937 assert_eq!(6, Arc::count(&arc));
938 // If the layout is not compatible, then the data might be corrupted.
939 assert_eq!(*arc, [0, 1, 2, 3, 4]);
940
941 // Drop the arcs and check the count and the content to
942 // make sure it isn't corrupted.
943 drop(arcs);
944 assert!(arc.is_unique());
945 assert_eq!(*arc, [0, 1, 2, 3, 4]);
946 }
947
948 #[test]
949 fn roundtrip() {
950 let arc: Arc<usize> = Arc::new(0usize);
951 let ptr = Arc::into_raw(arc);
952 unsafe {
953 let _arc = Arc::from_raw(ptr);
954 }
955 }
956
957 #[test]
958 fn from_iterator_exact_size() {
959 let arc = Arc::from_iter(Vec::from_iter(["ololo".to_owned(), "trololo".to_owned()]));
960 assert_eq!(1, Arc::count(&arc));
961 assert_eq!(["ololo".to_owned(), "trololo".to_owned()], *arc);
962 }
963
964 #[test]
965 fn from_iterator_unknown_size() {
966 let arc = Arc::from_iter(
967 Vec::from_iter(["ololo".to_owned(), "trololo".to_owned()])
968 .into_iter()
969 // Filter is opaque to iterators, so the resulting iterator
970 // will report lower bound of 0.
971 .filter(|_| true),
972 );
973 assert_eq!(1, Arc::count(&arc));
974 assert_eq!(["ololo".to_owned(), "trololo".to_owned()], *arc);
975 }
976
977 #[test]
978 fn roundtrip_slice() {
979 let arc = Arc::from(Vec::from_iter([17, 19]));
980 let ptr = Arc::into_raw(arc);
981 let arc = unsafe { Arc::from_raw_slice(ptr) };
982 assert_eq!([17, 19], *arc);
983 assert_eq!(1, Arc::count(&arc));
984 }
985
986 #[test]
987 fn arc_eq_and_cmp() {
988 [
989 [("*", &b"AB"[..]), ("*", &b"ab"[..])],
990 [("*", &b"AB"[..]), ("*", &b"a"[..])],
991 [("*", &b"A"[..]), ("*", &b"ab"[..])],
992 [("A", &b"*"[..]), ("a", &b"*"[..])],
993 [("a", &b"*"[..]), ("A", &b"*"[..])],
994 [("AB", &b"*"[..]), ("a", &b"*"[..])],
995 [("A", &b"*"[..]), ("ab", &b"*"[..])],
996 ]
997 .iter()
998 .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| {
999 let l = Arc::from_header_and_slice(lh, ls);
1000 let r = Arc::from_header_and_slice(rh, rs);
1001
1002 assert_eq!(l, l);
1003 assert_eq!(r, r);
1004
1005 assert_ne!(l, r);
1006 assert_ne!(r, l);
1007
1008 assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}");
1009 assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}");
1010
1011 assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}");
1012 assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}");
1013
1014 assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}");
1015 assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}");
1016
1017 assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}");
1018 assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}");
1019
1020 assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}");
1021 assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}");
1022 })
1023 }
1024
1025 #[test]
1026 fn arc_eq_and_partial_cmp() {
1027 [
1028 [(0.0, &[0.0, 0.0][..]), (1.0, &[0.0, 0.0][..])],
1029 [(1.0, &[0.0, 0.0][..]), (0.0, &[0.0, 0.0][..])],
1030 [(0.0, &[0.0][..]), (0.0, &[0.0, 0.0][..])],
1031 [(0.0, &[0.0, 0.0][..]), (0.0, &[0.0][..])],
1032 [(0.0, &[1.0, 2.0][..]), (0.0, &[10.0, 20.0][..])],
1033 ]
1034 .iter()
1035 .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| {
1036 let l = Arc::from_header_and_slice(lh, ls);
1037 let r = Arc::from_header_and_slice(rh, rs);
1038
1039 assert_eq!(l, l);
1040 assert_eq!(r, r);
1041
1042 assert_ne!(l, r);
1043 assert_ne!(r, l);
1044
1045 assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}");
1046 assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}");
1047
1048 assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}");
1049 assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}");
1050
1051 assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}");
1052 assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}");
1053
1054 assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}");
1055 assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}");
1056
1057 assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}");
1058 assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}");
1059 })
1060 }
1061
1062 #[test]
1063 fn test_strong_count() {
1064 let arc = Arc::new(17);
1065 assert_eq!(1, Arc::strong_count(&arc));
1066 let arc2 = arc.clone();
1067 assert_eq!(2, Arc::strong_count(&arc));
1068 drop(arc);
1069 assert_eq!(1, Arc::strong_count(&arc2));
1070 }
1071
1072 #[test]
1073 fn test_partial_eq_bug() {
1074 let float = f32::NAN;
1075 assert_ne!(float, float);
1076 let arc = Arc::new(f32::NAN);
1077 // TODO: this is a bug.
1078 assert_eq!(arc, arc);
1079 }
1080
1081 #[test]
1082 fn test_into_raw_from_raw_dst() {
1083 trait AnInteger {
1084 fn get_me_an_integer(&self) -> u64;
1085 }
1086
1087 impl AnInteger for u32 {
1088 fn get_me_an_integer(&self) -> u64 {
1089 *self as u64
1090 }
1091 }
1092
1093 let arc = Arc::<u32>::new(19);
1094 let data = Arc::into_raw(arc);
1095 let data: *const dyn AnInteger = data as *const _;
1096 let arc: Arc<dyn AnInteger> = unsafe { Arc::from_raw(data) };
1097 assert_eq!(19, arc.get_me_an_integer());
1098 }
1099
1100 #[allow(dead_code)]
1101 const fn is_partial_ord<T: ?Sized + PartialOrd>() {}
1102
1103 #[allow(dead_code)]
1104 const fn is_ord<T: ?Sized + Ord>() {}
1105
1106 // compile-time check that PartialOrd/Ord is correctly derived
1107 const _: () = is_partial_ord::<Arc<f64>>();
1108 const _: () = is_ord::<Arc<u64>>();
1109}