zerocopy/
split_at.rs

1// Copyright 2025 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10use super::*;
11use crate::pointer::invariant::{Aligned, Exclusive, Invariants, Shared, Valid};
12
13/// Types that can be split in two.
14///
15/// This trait generalizes Rust's existing support for splitting slices to
16/// support slices and slice-based dynamically-sized types ("slice DSTs").
17///
18/// # Implementation
19///
20/// **Do not implement this trait yourself!** Instead, use
21/// [`#[derive(SplitAt)]`][derive]; e.g.:
22///
23/// ```
24/// # use zerocopy_derive::{SplitAt, KnownLayout};
25/// #[derive(SplitAt, KnownLayout)]
26/// #[repr(C)]
27/// struct MyStruct<T: ?Sized> {
28/// # /*
29///     ...,
30/// # */
31///     // `SplitAt` types must have at least one field.
32///     field: T,
33/// }
34/// ```
35///
36/// This derive performs a sophisticated, compile-time safety analysis to
37/// determine whether a type is `SplitAt`.
38///
39/// # Safety
40///
41/// This trait does not convey any safety guarantees to code outside this crate.
42///
43/// You must not rely on the `#[doc(hidden)]` internals of `SplitAt`. Future
44/// releases of zerocopy may make backwards-breaking changes to these items,
45/// including changes that only affect soundness, which may cause code which
46/// uses those items to silently become unsound.
47///
48#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::SplitAt")]
49#[cfg_attr(
50    not(feature = "derive"),
51    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.SplitAt.html"),
52)]
53#[cfg_attr(
54    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
55    diagnostic::on_unimplemented(note = "Consider adding `#[derive(SplitAt)]` to `{Self}`")
56)]
57// # Safety
58//
59// The trailing slice is well-aligned for its element type. `Self` is `[T]`, or
60// a `repr(C)` or `repr(transparent)` slice DST.
61pub unsafe trait SplitAt: KnownLayout<PointerMetadata = usize> {
62    /// The element type of the trailing slice.
63    type Elem;
64
65    #[doc(hidden)]
66    fn only_derive_is_allowed_to_implement_this_trait()
67    where
68        Self: Sized;
69
70    /// Unsafely splits `self` in two.
71    ///
72    /// # Safety
73    ///
74    /// The caller promises that `l_len` is not greater than the length of
75    /// `self`'s trailing slice.
76    ///
77    #[doc = codegen_section!(
78        header = "h5",
79        bench = "split_at_unchecked",
80        format = "coco",
81        arity = 2,
82        [
83            open
84            @index 1
85            @title "Unsized"
86            @variant "dynamic_size"
87        ],
88        [
89            @index 2
90            @title "Dynamically Padded"
91            @variant "dynamic_padding"
92        ]
93    )]
94    #[inline]
95    #[must_use]
96    unsafe fn split_at_unchecked(&self, l_len: usize) -> Split<&Self> {
97        // SAFETY: By precondition on the caller, `l_len <= self.len()`.
98        unsafe { Split::<&Self>::new(self, l_len) }
99    }
100
101    /// Attempts to split `self` in two.
102    ///
103    /// Returns `None` if `l_len` is greater than the length of `self`'s
104    /// trailing slice.
105    ///
106    /// # Examples
107    ///
108    /// ```
109    /// use zerocopy::{SplitAt, FromBytes};
110    /// # use zerocopy_derive::*;
111    ///
112    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)]
113    /// #[repr(C)]
114    /// struct Packet {
115    ///     length: u8,
116    ///     body: [u8],
117    /// }
118    ///
119    /// // These bytes encode a `Packet`.
120    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
121    ///
122    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
123    ///
124    /// assert_eq!(packet.length, 4);
125    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
126    ///
127    /// // Attempt to split `packet` at `length`.
128    /// let split = packet.split_at(packet.length as usize).unwrap();
129    ///
130    /// // Use the `Immutable` bound on `Packet` to prove that it's okay to
131    /// // return concurrent references to `packet` and `rest`.
132    /// let (packet, rest) = split.via_immutable();
133    ///
134    /// assert_eq!(packet.length, 4);
135    /// assert_eq!(packet.body, [1, 2, 3, 4]);
136    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
137    /// ```
138    ///
139    #[doc = codegen_section!(
140        header = "h5",
141        bench = "split_at",
142        format = "coco",
143        arity = 2,
144        [
145            open
146            @index 1
147            @title "Unsized"
148            @variant "dynamic_size"
149        ],
150        [
151            @index 2
152            @title "Dynamically Padded"
153            @variant "dynamic_padding"
154        ]
155    )]
156    #[inline]
157    #[must_use = "has no side effects"]
158    fn split_at(&self, l_len: usize) -> Option<Split<&Self>> {
159        MetadataOf::new_in_bounds(self, l_len).map(
160            #[inline(always)]
161            |l_len| {
162                // SAFETY: We have ensured that `l_len <= self.len()` (by
163                // post-condition on `MetadataOf::new_in_bounds`)
164                unsafe { Split::new(self, l_len.get()) }
165            },
166        )
167    }
168
169    /// Unsafely splits `self` in two.
170    ///
171    /// # Safety
172    ///
173    /// The caller promises that `l_len` is not greater than the length of
174    /// `self`'s trailing slice.
175    ///
176    #[doc = codegen_header!("h5", "split_at_mut_unchecked")]
177    ///
178    /// See [`SplitAt::split_at_unchecked`](#method.split_at_unchecked.codegen).
179    #[inline]
180    #[must_use]
181    unsafe fn split_at_mut_unchecked(&mut self, l_len: usize) -> Split<&mut Self> {
182        // SAFETY: By precondition on the caller, `l_len <= self.len()`.
183        unsafe { Split::<&mut Self>::new(self, l_len) }
184    }
185
186    /// Attempts to split `self` in two.
187    ///
188    /// Returns `None` if `l_len` is greater than the length of `self`'s
189    /// trailing slice, or if the given `l_len` would result in [the trailing
190    /// padding](KnownLayout#slice-dst-layout) of the left portion overlapping
191    /// the right portion.
192    ///
193    ///
194    /// # Examples
195    ///
196    /// ```
197    /// use zerocopy::{SplitAt, FromBytes};
198    /// # use zerocopy_derive::*;
199    ///
200    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)]
201    /// #[repr(C)]
202    /// struct Packet<B: ?Sized> {
203    ///     length: u8,
204    ///     body: B,
205    /// }
206    ///
207    /// // These bytes encode a `Packet`.
208    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
209    ///
210    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
211    ///
212    /// assert_eq!(packet.length, 4);
213    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
214    ///
215    /// {
216    ///     // Attempt to split `packet` at `length`.
217    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
218    ///
219    ///     // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
220    ///     // return concurrent references to `packet` and `rest`.
221    ///     let (packet, rest) = split.via_into_bytes();
222    ///
223    ///     assert_eq!(packet.length, 4);
224    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
225    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
226    ///
227    ///     rest.fill(0);
228    /// }
229    ///
230    /// assert_eq!(packet.length, 4);
231    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
232    /// ```
233    ///
234    #[doc = codegen_header!("h5", "split_at_mut")]
235    ///
236    /// See [`SplitAt::split_at`](#method.split_at.codegen).
237    #[inline]
238    fn split_at_mut(&mut self, l_len: usize) -> Option<Split<&mut Self>> {
239        MetadataOf::new_in_bounds(self, l_len).map(
240            #[inline(always)]
241            |l_len| {
242                // SAFETY: We have ensured that `l_len <= self.len()` (by
243                // post-condition on `MetadataOf::new_in_bounds`)
244                unsafe { Split::new(self, l_len.get()) }
245            },
246        )
247    }
248}
249
250// SAFETY: `[T]`'s trailing slice is `[T]`, which is trivially aligned.
251unsafe impl<T> SplitAt for [T] {
252    type Elem = T;
253
254    #[inline]
255    #[allow(dead_code)]
256    fn only_derive_is_allowed_to_implement_this_trait()
257    where
258        Self: Sized,
259    {
260    }
261}
262
263/// A `T` that has been split into two possibly-overlapping parts.
264///
265/// For some dynamically sized types, the padding that appears after the
266/// trailing slice field [is a dynamic function of the trailing slice
267/// length](KnownLayout#slice-dst-layout). If `T` is split at a length that
268/// requires trailing padding, the trailing padding of the left part of the
269/// split `T` will overlap the right part. If `T` is a mutable reference or
270/// permits interior mutation, you must ensure that the left and right parts do
271/// not overlap. You can do this at zero-cost using using
272/// [`Self::via_immutable`], [`Self::via_into_bytes`], or
273/// [`Self::via_unaligned`], or with a dynamic check by using
274/// [`Self::via_runtime_check`].
275#[derive(Debug)]
276pub struct Split<T> {
277    /// A pointer to the source slice DST.
278    source: T,
279    /// The length of the future left half of `source`.
280    ///
281    /// # Safety
282    ///
283    /// If `source` is a pointer to a slice DST, `l_len` is no greater than
284    /// `source`'s length.
285    l_len: usize,
286}
287
288impl<T> Split<T> {
289    /// Produces a `Split` of `source` with `l_len`.
290    ///
291    /// # Safety
292    ///
293    /// `l_len` is no greater than `source`'s length.
294    #[inline(always)]
295    unsafe fn new(source: T, l_len: usize) -> Self {
296        Self { source, l_len }
297    }
298}
299
300impl<'a, T> Split<&'a T>
301where
302    T: ?Sized + SplitAt,
303{
304    #[inline(always)]
305    fn into_ptr(self) -> Split<Ptr<'a, T, (Shared, Aligned, Valid)>> {
306        let source = Ptr::from_ref(self.source);
307        // SAFETY: `Ptr::from_ref(self.source)` points to exactly `self.source`
308        // and thus maintains the invariants of `self` with respect to `l_len`.
309        unsafe { Split::new(source, self.l_len) }
310    }
311
312    /// Produces the split parts of `self`, using [`Immutable`] to ensure that
313    /// it is sound to have concurrent references to both parts.
314    ///
315    /// # Examples
316    ///
317    /// ```
318    /// use zerocopy::{SplitAt, FromBytes};
319    /// # use zerocopy_derive::*;
320    ///
321    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)]
322    /// #[repr(C)]
323    /// struct Packet {
324    ///     length: u8,
325    ///     body: [u8],
326    /// }
327    ///
328    /// // These bytes encode a `Packet`.
329    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
330    ///
331    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
332    ///
333    /// assert_eq!(packet.length, 4);
334    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
335    ///
336    /// // Attempt to split `packet` at `length`.
337    /// let split = packet.split_at(packet.length as usize).unwrap();
338    ///
339    /// // Use the `Immutable` bound on `Packet` to prove that it's okay to
340    /// // return concurrent references to `packet` and `rest`.
341    /// let (packet, rest) = split.via_immutable();
342    ///
343    /// assert_eq!(packet.length, 4);
344    /// assert_eq!(packet.body, [1, 2, 3, 4]);
345    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
346    /// ```
347    ///
348    #[doc = codegen_section!(
349        header = "h5",
350        bench = "split_via_immutable",
351        format = "coco",
352        arity = 2,
353        [
354            open
355            @index 1
356            @title "Unsized"
357            @variant "dynamic_size"
358        ],
359        [
360            @index 2
361            @title "Dynamically Padded"
362            @variant "dynamic_padding"
363        ]
364    )]
365    #[must_use = "has no side effects"]
366    #[inline(always)]
367    pub fn via_immutable(self) -> (&'a T, &'a [T::Elem])
368    where
369        T: Immutable,
370    {
371        let (l, r) = self.into_ptr().via_immutable();
372        (l.as_ref(), r.as_ref())
373    }
374
375    /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
376    /// it is sound to have concurrent references to both parts.
377    ///
378    /// # Examples
379    ///
380    /// ```
381    /// use zerocopy::{SplitAt, FromBytes};
382    /// # use zerocopy_derive::*;
383    ///
384    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, IntoBytes)]
385    /// #[repr(C)]
386    /// struct Packet<B: ?Sized> {
387    ///     length: u8,
388    ///     body: B,
389    /// }
390    ///
391    /// // These bytes encode a `Packet`.
392    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
393    ///
394    /// let packet = Packet::<[u8]>::ref_from_bytes(bytes).unwrap();
395    ///
396    /// assert_eq!(packet.length, 4);
397    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
398    ///
399    /// // Attempt to split `packet` at `length`.
400    /// let split = packet.split_at(packet.length as usize).unwrap();
401    ///
402    /// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
403    /// // return concurrent references to `packet` and `rest`.
404    /// let (packet, rest) = split.via_into_bytes();
405    ///
406    /// assert_eq!(packet.length, 4);
407    /// assert_eq!(packet.body, [1, 2, 3, 4]);
408    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
409    /// ```
410    ///
411    #[doc = codegen_header!("h5", "split_via_into_bytes")]
412    ///
413    /// See [`Split::via_immutable`](#method.split_via_immutable.codegen).
414    #[must_use = "has no side effects"]
415    #[inline(always)]
416    pub fn via_into_bytes(self) -> (&'a T, &'a [T::Elem])
417    where
418        T: IntoBytes,
419    {
420        let (l, r) = self.into_ptr().via_into_bytes();
421        (l.as_ref(), r.as_ref())
422    }
423
424    /// Produces the split parts of `self`, using [`Unaligned`] to ensure that
425    /// it is sound to have concurrent references to both parts.
426    ///
427    /// # Examples
428    ///
429    /// ```
430    /// use zerocopy::{SplitAt, FromBytes};
431    /// # use zerocopy_derive::*;
432    ///
433    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Unaligned)]
434    /// #[repr(C)]
435    /// struct Packet {
436    ///     length: u8,
437    ///     body: [u8],
438    /// }
439    ///
440    /// // These bytes encode a `Packet`.
441    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
442    ///
443    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
444    ///
445    /// assert_eq!(packet.length, 4);
446    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
447    ///
448    /// // Attempt to split `packet` at `length`.
449    /// let split = packet.split_at(packet.length as usize).unwrap();
450    ///
451    /// // Use the `Unaligned` bound on `Packet` to prove that it's okay to
452    /// // return concurrent references to `packet` and `rest`.
453    /// let (packet, rest) = split.via_unaligned();
454    ///
455    /// assert_eq!(packet.length, 4);
456    /// assert_eq!(packet.body, [1, 2, 3, 4]);
457    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
458    /// ```
459    ///
460    #[doc = codegen_header!("h5", "split_via_unaligned")]
461    ///
462    /// See [`Split::via_immutable`](#method.split_via_immutable.codegen).
463    #[must_use = "has no side effects"]
464    #[inline(always)]
465    pub fn via_unaligned(self) -> (&'a T, &'a [T::Elem])
466    where
467        T: Unaligned,
468    {
469        let (l, r) = self.into_ptr().via_unaligned();
470        (l.as_ref(), r.as_ref())
471    }
472
473    /// Produces the split parts of `self`, using a dynamic check to ensure that
474    /// it is sound to have concurrent references to both parts. You should
475    /// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or
476    /// [`Self::via_unaligned`], which have no runtime cost.
477    ///
478    /// Note that this check is overly conservative if `T` is [`Immutable`]; for
479    /// some types, this check will reject some splits which
480    /// [`Self::via_immutable`] will accept.
481    ///
482    /// # Examples
483    ///
484    /// ```
485    /// use zerocopy::{SplitAt, FromBytes, IntoBytes, network_endian::U16};
486    /// # use zerocopy_derive::*;
487    ///
488    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Debug)]
489    /// #[repr(C, align(2))]
490    /// struct Packet {
491    ///     length: U16,
492    ///     body: [u8],
493    /// }
494    ///
495    /// // These bytes encode a `Packet`.
496    /// let bytes = [
497    ///     4u16.to_be(),
498    ///     1u16.to_be(),
499    ///     2u16.to_be(),
500    ///     3u16.to_be(),
501    ///     4u16.to_be()
502    /// ];
503    ///
504    /// let packet = Packet::ref_from_bytes(bytes.as_bytes()).unwrap();
505    ///
506    /// assert_eq!(packet.length, 4);
507    /// assert_eq!(packet.body, [0, 1, 0, 2, 0, 3, 0, 4]);
508    ///
509    /// // Attempt to split `packet` at `length`.
510    /// let split = packet.split_at(packet.length.into()).unwrap();
511    ///
512    /// // Use a dynamic check to prove that it's okay to return concurrent
513    /// // references to `packet` and `rest`.
514    /// let (packet, rest) = split.via_runtime_check().unwrap();
515    ///
516    /// assert_eq!(packet.length, 4);
517    /// assert_eq!(packet.body, [0, 1, 0, 2]);
518    /// assert_eq!(rest, [0, 3, 0, 4]);
519    ///
520    /// // Attempt to split `packet` at `length - 1`.
521    /// let idx = packet.length.get() - 1;
522    /// let split = packet.split_at(idx as usize).unwrap();
523    ///
524    /// // Attempt (and fail) to use a dynamic check to prove that it's okay
525    /// // to return concurrent references to `packet` and `rest`. Note that
526    /// // this is a case of `via_runtime_check` being overly conservative.
527    /// // Although the left and right parts indeed overlap, the `Immutable`
528    /// // bound ensures that concurrently referencing these overlapping
529    /// // parts is sound.
530    /// assert!(split.via_runtime_check().is_err());
531    /// ```
532    ///
533    #[doc = codegen_section!(
534        header = "h5",
535        bench = "split_via_runtime_check",
536        format = "coco",
537        arity = 2,
538        [
539            open
540            @index 1
541            @title "Unsized"
542            @variant "dynamic_size"
543        ],
544        [
545            @index 2
546            @title "Dynamically Padded"
547            @variant "dynamic_padding"
548        ]
549    )]
550    #[must_use = "has no side effects"]
551    #[inline(always)]
552    pub fn via_runtime_check(self) -> Result<(&'a T, &'a [T::Elem]), Self> {
553        match self.into_ptr().via_runtime_check() {
554            Ok((l, r)) => Ok((l.as_ref(), r.as_ref())),
555            Err(s) => Err(s.into_ref()),
556        }
557    }
558
559    /// Unsafely produces the split parts of `self`.
560    ///
561    /// # Safety
562    ///
563    /// If `T` permits interior mutation, the trailing padding bytes of the left
564    /// portion must not overlap the right portion. For some dynamically sized
565    /// types, the padding that appears after the trailing slice field [is a
566    /// dynamic function of the trailing slice
567    /// length](KnownLayout#slice-dst-layout). Thus, for some types, this
568    /// condition is dependent on the length of the left portion.
569    ///
570    #[doc = codegen_section!(
571        header = "h5",
572        bench = "split_via_unchecked",
573        format = "coco",
574        arity = 2,
575        [
576            open
577            @index 1
578            @title "Unsized"
579            @variant "dynamic_size"
580        ],
581        [
582            @index 2
583            @title "Dynamically Padded"
584            @variant "dynamic_padding"
585        ]
586    )]
587    #[must_use = "has no side effects"]
588    #[inline(always)]
589    pub unsafe fn via_unchecked(self) -> (&'a T, &'a [T::Elem]) {
590        // SAFETY: The aliasing of `self.into_ptr()` is not `Exclusive`, but the
591        // caller has promised that if `T` permits interior mutation then the
592        // left and right portions of `self` split at `l_len` do not overlap.
593        let (l, r) = unsafe { self.into_ptr().via_unchecked() };
594        (l.as_ref(), r.as_ref())
595    }
596}
597
598impl<'a, T> Split<&'a mut T>
599where
600    T: ?Sized + SplitAt,
601{
602    #[inline(always)]
603    fn into_ptr(self) -> Split<Ptr<'a, T, (Exclusive, Aligned, Valid)>> {
604        let source = Ptr::from_mut(self.source);
605        // SAFETY: `Ptr::from_mut(self.source)` points to exactly `self.source`,
606        // and thus maintains the invariants of `self` with respect to `l_len`.
607        unsafe { Split::new(source, self.l_len) }
608    }
609
610    /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
611    /// it is sound to have concurrent references to both parts.
612    ///
613    /// # Examples
614    ///
615    /// ```
616    /// use zerocopy::{SplitAt, FromBytes};
617    /// # use zerocopy_derive::*;
618    ///
619    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)]
620    /// #[repr(C)]
621    /// struct Packet<B: ?Sized> {
622    ///     length: u8,
623    ///     body: B,
624    /// }
625    ///
626    /// // These bytes encode a `Packet`.
627    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
628    ///
629    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
630    ///
631    /// assert_eq!(packet.length, 4);
632    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
633    ///
634    /// {
635    ///     // Attempt to split `packet` at `length`.
636    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
637    ///
638    ///     // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
639    ///     // return concurrent references to `packet` and `rest`.
640    ///     let (packet, rest) = split.via_into_bytes();
641    ///
642    ///     assert_eq!(packet.length, 4);
643    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
644    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
645    ///
646    ///     rest.fill(0);
647    /// }
648    ///
649    /// assert_eq!(packet.length, 4);
650    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
651    /// ```
652    ///
653    /// # Code Generation
654    ///
655    /// See [`Split::via_immutable`](#method.split_via_immutable.codegen).
656    #[must_use = "has no side effects"]
657    #[inline(always)]
658    pub fn via_into_bytes(self) -> (&'a mut T, &'a mut [T::Elem])
659    where
660        T: IntoBytes,
661    {
662        let (l, r) = self.into_ptr().via_into_bytes();
663        (l.as_mut(), r.as_mut())
664    }
665
666    /// Produces the split parts of `self`, using [`Unaligned`] to ensure that
667    /// it is sound to have concurrent references to both parts.
668    ///
669    /// # Examples
670    ///
671    /// ```
672    /// use zerocopy::{SplitAt, FromBytes};
673    /// # use zerocopy_derive::*;
674    ///
675    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Unaligned)]
676    /// #[repr(C)]
677    /// struct Packet<B: ?Sized> {
678    ///     length: u8,
679    ///     body: B,
680    /// }
681    ///
682    /// // These bytes encode a `Packet`.
683    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
684    ///
685    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
686    ///
687    /// assert_eq!(packet.length, 4);
688    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
689    ///
690    /// {
691    ///     // Attempt to split `packet` at `length`.
692    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
693    ///
694    ///     // Use the `Unaligned` bound on `Packet` to prove that it's okay to
695    ///     // return concurrent references to `packet` and `rest`.
696    ///     let (packet, rest) = split.via_unaligned();
697    ///
698    ///     assert_eq!(packet.length, 4);
699    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
700    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
701    ///
702    ///     rest.fill(0);
703    /// }
704    ///
705    /// assert_eq!(packet.length, 4);
706    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
707    /// ```
708    ///
709    /// # Code Generation
710    ///
711    /// See [`Split::via_immutable`](#method.split_via_immutable.codegen).
712    #[must_use = "has no side effects"]
713    #[inline(always)]
714    pub fn via_unaligned(self) -> (&'a mut T, &'a mut [T::Elem])
715    where
716        T: Unaligned,
717    {
718        let (l, r) = self.into_ptr().via_unaligned();
719        (l.as_mut(), r.as_mut())
720    }
721
722    /// Produces the split parts of `self`, using a dynamic check to ensure that
723    /// it is sound to have concurrent references to both parts. You should
724    /// prefer using [`Self::via_into_bytes`] or [`Self::via_unaligned`], which
725    /// have no runtime cost.
726    ///
727    /// # Examples
728    ///
729    /// ```
730    /// use zerocopy::{SplitAt, FromBytes};
731    /// # use zerocopy_derive::*;
732    ///
733    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Debug)]
734    /// #[repr(C)]
735    /// struct Packet<B: ?Sized> {
736    ///     length: u8,
737    ///     body: B,
738    /// }
739    ///
740    /// // These bytes encode a `Packet`.
741    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
742    ///
743    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
744    ///
745    /// assert_eq!(packet.length, 4);
746    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
747    ///
748    /// {
749    ///     // Attempt to split `packet` at `length`.
750    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
751    ///
752    ///     // Use a dynamic check to prove that it's okay to return concurrent
753    ///     // references to `packet` and `rest`.
754    ///     let (packet, rest) = split.via_runtime_check().unwrap();
755    ///
756    ///     assert_eq!(packet.length, 4);
757    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
758    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
759    ///
760    ///     rest.fill(0);
761    /// }
762    ///
763    /// assert_eq!(packet.length, 4);
764    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
765    /// ```
766    ///
767    /// # Code Generation
768    ///
769    /// See [`Split::via_runtime_check`](#method.split_via_runtime_check.codegen).
770    #[must_use = "has no side effects"]
771    #[inline(always)]
772    pub fn via_runtime_check(self) -> Result<(&'a mut T, &'a mut [T::Elem]), Self> {
773        match self.into_ptr().via_runtime_check() {
774            Ok((l, r)) => Ok((l.as_mut(), r.as_mut())),
775            Err(s) => Err(s.into_mut()),
776        }
777    }
778
779    /// Unsafely produces the split parts of `self`.
780    ///
781    /// # Safety
782    ///
783    /// The trailing padding bytes of the left portion must not overlap the
784    /// right portion. For some dynamically sized types, the padding that
785    /// appears after the trailing slice field [is a dynamic function of the
786    /// trailing slice length](KnownLayout#slice-dst-layout). Thus, for some
787    /// types, this condition is dependent on the length of the left portion.
788    ///
789    /// # Code Generation
790    ///
791    /// See [`Split::via_unchecked`](#method.split_via_unchecked.codegen).
792    #[must_use = "has no side effects"]
793    #[inline(always)]
794    pub unsafe fn via_unchecked(self) -> (&'a mut T, &'a mut [T::Elem]) {
795        // SAFETY: The aliasing of `self.into_ptr()` is `Exclusive`, and the
796        // caller has promised that the left and right portions of `self` split
797        // at `l_len` do not overlap.
798        let (l, r) = unsafe { self.into_ptr().via_unchecked() };
799        (l.as_mut(), r.as_mut())
800    }
801}
802
803impl<'a, T, I> Split<Ptr<'a, T, I>>
804where
805    T: ?Sized + SplitAt,
806    I: Invariants<Alignment = Aligned, Validity = Valid>,
807{
808    fn into_ref(self) -> Split<&'a T>
809    where
810        I: Invariants<Aliasing = Shared>,
811    {
812        // SAFETY: `self.source.as_ref()` points to exactly the same referent as
813        // `self.source` and thus maintains the invariants of `self` with
814        // respect to `l_len`.
815        unsafe { Split::new(self.source.as_ref(), self.l_len) }
816    }
817
818    fn into_mut(self) -> Split<&'a mut T>
819    where
820        I: Invariants<Aliasing = Exclusive>,
821    {
822        // SAFETY: `self.source.as_mut()` points to exactly the same referent as
823        // `self.source` and thus maintains the invariants of `self` with
824        // respect to `l_len`.
825        unsafe { Split::new(self.source.unify_invariants().as_mut(), self.l_len) }
826    }
827
828    /// Produces the length of `self`'s left part.
829    #[inline(always)]
830    fn l_len(&self) -> MetadataOf<T> {
831        // SAFETY: By invariant on `Split`, `self.l_len` is not greater than the
832        // length of `self.source`.
833        unsafe { MetadataOf::<T>::new_unchecked(self.l_len) }
834    }
835
836    /// Produces the split parts of `self`, using [`Immutable`] to ensure that
837    /// it is sound to have concurrent references to both parts.
838    #[inline(always)]
839    fn via_immutable(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
840    where
841        T: Immutable,
842        I: Invariants<Aliasing = Shared>,
843    {
844        // SAFETY: `Aliasing = Shared` and `T: Immutable`.
845        unsafe { self.via_unchecked() }
846    }
847
848    /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
849    /// it is sound to have concurrent references to both parts.
850    #[inline(always)]
851    fn via_into_bytes(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
852    where
853        T: IntoBytes,
854    {
855        // SAFETY: By `T: IntoBytes`, `T` has no padding for any length.
856        // Consequently, `T` can be split into non-overlapping parts at any
857        // index.
858        unsafe { self.via_unchecked() }
859    }
860
861    /// Produces the split parts of `self`, using [`Unaligned`] to ensure that
862    /// it is sound to have concurrent references to both parts.
863    #[inline(always)]
864    fn via_unaligned(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
865    where
866        T: Unaligned,
867    {
868        // SAFETY: By `T: SplitAt + Unaligned`, `T` is either a slice or a
869        // `repr(C)` or `repr(transparent)` slice DST that is well-aligned at
870        // any address and length. If `T` is a slice DST with alignment 1,
871        // `repr(C)` or `repr(transparent)` ensures that no padding is placed
872        // after the final element of the trailing slice. Consequently, `T` can
873        // be split into strictly non-overlapping parts any any index.
874        unsafe { self.via_unchecked() }
875    }
876
877    /// Produces the split parts of `self`, using a dynamic check to ensure that
878    /// it is sound to have concurrent references to both parts. You should
879    /// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or
880    /// [`Self::via_unaligned`], which have no runtime cost.
881    #[inline(always)]
882    fn via_runtime_check(self) -> Result<(Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>), Self> {
883        let l_len = self.l_len();
884        // FIXME(#1290): Once we require `KnownLayout` on all fields, add an
885        // `IS_IMMUTABLE` associated const, and add `T::IS_IMMUTABLE ||` to the
886        // below check.
887        if l_len.padding_needed_for() == 0 {
888            // SAFETY: By `T: SplitAt`, `T` is either `[T]`, or a `repr(C)` or
889            // `repr(transparent)` slice DST, for which the trailing padding
890            // needed to accommodate `l_len` trailing elements is
891            // `l_len.padding_needed_for()`. If no trailing padding is required,
892            // the left and right parts are strictly non-overlapping.
893            Ok(unsafe { self.via_unchecked() })
894        } else {
895            Err(self)
896        }
897    }
898
899    /// Unsafely produces the split parts of `self`.
900    ///
901    /// # Safety
902    ///
903    /// The caller promises that if `I::Aliasing` is [`Exclusive`] or `T`
904    /// permits interior mutation, then `l_len.padding_needed_for() == 0`.
905    #[inline(always)]
906    unsafe fn via_unchecked(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>) {
907        let l_len = self.l_len();
908        let inner = self.source.as_inner();
909
910        // SAFETY: By invariant on `Self::l_len`, `l_len` is not greater than
911        // the length of `inner`'s trailing slice.
912        let (left, right) = unsafe { inner.split_at_unchecked(l_len) };
913
914        // Lemma 0: `left` and `right` conform to the aliasing invariant
915        // `I::Aliasing`. Proof: If `I::Aliasing` is `Exclusive` or `T` permits
916        // interior mutation, the caller promises that `l_len.padding_needed_for()
917        // == 0`. Consequently, by post-condition on `PtrInner::split_at_unchecked`,
918        // there is no trailing padding after `left`'s final element that would
919        // overlap into `right`. If `I::Aliasing` is shared and `T` forbids interior
920        // mutation, then overlap between their referents is permissible.
921
922        // SAFETY:
923        // 0. `left` conforms to the aliasing invariant of `I::Aliasing`, by Lemma 0.
924        // 1. `left` conforms to the alignment invariant of `I::Alignment, because
925        //    the referents of `left` and `Self` have the same address and type
926        //    (and, thus, alignment requirement).
927        // 2. `left` conforms to the validity invariant of `I::Validity`, neither
928        //    the type nor bytes of `left`'s referent have been changed.
929        let left = unsafe { Ptr::from_inner(left) };
930
931        // SAFETY:
932        // 0. `right` conforms to the aliasing invariant of `I::Aliasing`, by Lemma
933        //    0.
934        // 1. `right` conforms to the alignment invariant of `I::Alignment, because
935        //    if `ptr` with `I::Alignment = Aligned`, then by invariant on `T:
936        //    SplitAt`, the trailing slice of `ptr` (from which `right` is derived)
937        //    will also be well-aligned.
938        // 2. `right` conforms to the validity invariant of `I::Validity`,
939        //    because `right: [T::Elem]` is derived from the trailing slice of
940        //    `ptr`, which, by contract on `T: SplitAt::Elem`, has type
941        //    `[T::Elem]`. The `left` part cannot be used to invalidate `right`,
942        //    because the caller promises that if `I::Aliasing` is `Exclusive`
943        //    or `T` permits interior mutation, then `l_len.padding_needed_for()
944        //    == 0` and thus the parts will be non-overlapping.
945        let right = unsafe { Ptr::from_inner(right) };
946
947        (left, right)
948    }
949}
950
951#[cfg(test)]
952mod tests {
953    #[cfg(feature = "derive")]
954    #[test]
955    fn test_split_at() {
956        use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt};
957
958        #[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Debug)]
959        #[repr(C)]
960        struct SliceDst<const OFFSET: usize> {
961            prefix: [u8; OFFSET],
962            trailing: [u8],
963        }
964
965        #[allow(clippy::as_conversions)]
966        fn test_split_at<const OFFSET: usize, const BUFFER_SIZE: usize>() {
967            // Test `split_at`
968            let n: usize = BUFFER_SIZE - OFFSET;
969            let arr = [1; BUFFER_SIZE];
970            let dst = SliceDst::<OFFSET>::ref_from_bytes(&arr[..]).unwrap();
971            for i in 0..=n {
972                let (l, r) = dst.split_at(i).unwrap().via_runtime_check().unwrap();
973                let l_sum: u8 = l.trailing.iter().sum();
974                let r_sum: u8 = r.iter().sum();
975                assert_eq!(l_sum, i as u8);
976                assert_eq!(r_sum, (n - i) as u8);
977                assert_eq!(l_sum + r_sum, n as u8);
978            }
979
980            // Test `split_at_mut`
981            let n: usize = BUFFER_SIZE - OFFSET;
982            let mut arr = [1; BUFFER_SIZE];
983            let dst = SliceDst::<OFFSET>::mut_from_bytes(&mut arr[..]).unwrap();
984            for i in 0..=n {
985                let (l, r) = dst.split_at_mut(i).unwrap().via_runtime_check().unwrap();
986                let l_sum: u8 = l.trailing.iter().sum();
987                let r_sum: u8 = r.iter().sum();
988                assert_eq!(l_sum, i as u8);
989                assert_eq!(r_sum, (n - i) as u8);
990                assert_eq!(l_sum + r_sum, n as u8);
991            }
992        }
993
994        test_split_at::<0, 16>();
995        test_split_at::<1, 17>();
996        test_split_at::<2, 18>();
997    }
998
999    #[cfg(feature = "derive")]
1000    #[test]
1001    #[allow(clippy::as_conversions)]
1002    fn test_split_at_overlapping() {
1003        use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt};
1004
1005        #[derive(FromBytes, KnownLayout, SplitAt, Immutable)]
1006        #[repr(C, align(2))]
1007        struct SliceDst {
1008            prefix: u8,
1009            trailing: [u8],
1010        }
1011
1012        const N: usize = 16;
1013
1014        let arr = [1u16; N];
1015        let dst = SliceDst::ref_from_bytes(arr.as_bytes()).unwrap();
1016
1017        for i in 0..N {
1018            let split = dst.split_at(i).unwrap().via_runtime_check();
1019            if i % 2 == 1 {
1020                assert!(split.is_ok());
1021            } else {
1022                assert!(split.is_err());
1023            }
1024        }
1025    }
1026    #[test]
1027    fn test_split_at_unchecked() {
1028        use crate::SplitAt;
1029        let mut arr = [1, 2, 3, 4];
1030        let slice = &arr[..];
1031        // SAFETY: 2 <= arr.len() (4)
1032        let split = unsafe { SplitAt::split_at_unchecked(slice, 2) };
1033        // SAFETY: SplitAt::split_at_unchecked guarantees that the split is valid.
1034        let (l, r) = unsafe { split.via_unchecked() };
1035        assert_eq!(l, &[1, 2]);
1036        assert_eq!(r, &[3, 4]);
1037
1038        let slice_mut = &mut arr[..];
1039        // SAFETY: 2 <= arr.len() (4)
1040        let split = unsafe { SplitAt::split_at_mut_unchecked(slice_mut, 2) };
1041        // SAFETY: SplitAt::split_at_mut_unchecked guarantees that the split is valid.
1042        let (l, r) = unsafe { split.via_unchecked() };
1043        assert_eq!(l, &mut [1, 2]);
1044        assert_eq!(r, &mut [3, 4]);
1045    }
1046
1047    #[test]
1048    fn test_split_at_via_methods() {
1049        use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt};
1050        #[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Debug)]
1051        #[repr(C)]
1052        struct Packet {
1053            length: u8,
1054            body: [u8],
1055        }
1056
1057        let arr = [1, 2, 3, 4];
1058        let packet = Packet::ref_from_bytes(&arr[..]).unwrap();
1059
1060        let split1 = packet.split_at(2).unwrap();
1061        let (l, r) = split1.via_immutable();
1062        assert_eq!(l.length, 1);
1063        assert_eq!(r, &[4]);
1064
1065        let split2 = packet.split_at(2).unwrap();
1066        let (l, r) = split2.via_into_bytes();
1067        assert_eq!(l.length, 1);
1068        assert_eq!(r, &[4]);
1069    }
1070    #[test]
1071    fn test_split_at_via_unaligned() {
1072        use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt, Unaligned};
1073        #[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Unaligned)]
1074        #[repr(C)]
1075        struct Packet {
1076            length: u8,
1077            body: [u8],
1078        }
1079
1080        let arr = [1, 2, 3, 4];
1081        let packet = Packet::ref_from_bytes(&arr[..]).unwrap();
1082
1083        let split = packet.split_at(2).unwrap();
1084        let (l, r) = split.via_unaligned();
1085        assert_eq!(l.length, 1);
1086        assert_eq!(r, &[4]);
1087    }
1088}