zerocopy/lib.rs
1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13// cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//! certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//! instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//! arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//! sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//! qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//! except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//! one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//! mutable reference of one type to a mutable reference of another type of
72//! the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//! mutable or immutable reference of one type to an immutable reference of
75//! another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//! the `alloc` crate is added as a dependency, and some allocation-related
95//! functionality is added.
96//!
97//! - **`std`**
98//! By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//! `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//! support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//! Provides derives for the core marker traits via the `zerocopy-derive`
104//! crate. These derives are re-exported from `zerocopy`, so it is not
105//! necessary to depend on `zerocopy-derive` directly.
106//!
107//! However, you may experience better compile times if you instead directly
108//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//! since doing so will allow Rust to compile these crates in parallel. To do
110//! so, do *not* enable the `derive` feature, and list both dependencies in
111//! your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//! ```toml
114//! [dependencies]
115//! zerocopy = "0.X"
116//! zerocopy-derive = "0.X"
117//! ```
118//!
119//! To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//! one of your dependencies enables zerocopy's `derive` feature, import
121//! derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//! zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//! When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//! `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//! target platform. Note that the layout of SIMD types is not yet stabilized,
128//! so these impls may be removed in the future if layout changes make them
129//! invalid. For more information, see the Unsafe Code Guidelines Reference
130//! page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//! Enables the `simd` feature and adds support for SIMD types which are only
134//! available on nightly. Since these types are unstable, support for any type
135//! may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//! Adds support for the unstable `f16` and `f128` types. These types are
139//! not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Security Ethos
145//!
146//! Zerocopy is expressly designed for use in security-critical contexts. We
147//! strive to ensure that that zerocopy code is sound under Rust's current
148//! memory model, and *any future memory model*. We ensure this by:
149//! - **...not 'guessing' about Rust's semantics.**
150//! We annotate `unsafe` code with a precise rationale for its soundness that
151//! cites a relevant section of Rust's official documentation. When Rust's
152//! documented semantics are unclear, we work with the Rust Operational
153//! Semantics Team to clarify Rust's documentation.
154//! - **...rigorously testing our implementation.**
155//! We run tests using [Miri], ensuring that zerocopy is sound across a wide
156//! array of supported target platforms of varying endianness and pointer
157//! width, and across both current and experimental memory models of Rust.
158//! - **...formally proving the correctness of our implementation.**
159//! We apply formal verification tools like [Kani][kani] to prove zerocopy's
160//! correctness.
161//!
162//! For more information, see our full [soundness policy].
163//!
164//! [Miri]: https://github.com/rust-lang/miri
165//! [Kani]: https://github.com/model-checking/kani
166//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
167//!
168//! # Relationship to Project Safe Transmute
169//!
170//! [Project Safe Transmute] is an official initiative of the Rust Project to
171//! develop language-level support for safer transmutation. The Project consults
172//! with crates like zerocopy to identify aspects of safer transmutation that
173//! would benefit from compiler support, and has developed an [experimental,
174//! compiler-supported analysis][mcp-transmutability] which determines whether,
175//! for a given type, any value of that type may be soundly transmuted into
176//! another type. Once this functionality is sufficiently mature, zerocopy
177//! intends to replace its internal transmutability analysis (implemented by our
178//! custom derives) with the compiler-supported one. This change will likely be
179//! an implementation detail that is invisible to zerocopy's users.
180//!
181//! Project Safe Transmute will not replace the need for most of zerocopy's
182//! higher-level abstractions. The experimental compiler analysis is a tool for
183//! checking the soundness of `unsafe` code, not a tool to avoid writing
184//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
185//! will still be required in order to provide higher-level abstractions on top
186//! of the building block provided by Project Safe Transmute.
187//!
188//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
189//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
190//!
191//! # MSRV
192//!
193//! See our [MSRV policy].
194//!
195//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
196//!
197//! # Changelog
198//!
199//! Zerocopy uses [GitHub Releases].
200//!
201//! [GitHub Releases]: https://github.com/google/zerocopy/releases
202//!
203//! # Thanks
204//!
205//! Zerocopy is maintained by engineers at Google with help from [many wonderful
206//! contributors][contributors]. Thank you to everyone who has lent a hand in
207//! making Rust a little more secure!
208//!
209//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
210
211// Sometimes we want to use lints which were added after our MSRV.
212// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
213// this attribute, any unknown lint would cause a CI failure when testing with
214// our MSRV.
215#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
216#![deny(renamed_and_removed_lints)]
217#![deny(
218 anonymous_parameters,
219 deprecated_in_future,
220 late_bound_lifetime_arguments,
221 missing_copy_implementations,
222 missing_debug_implementations,
223 missing_docs,
224 path_statements,
225 patterns_in_fns_without_body,
226 rust_2018_idioms,
227 trivial_numeric_casts,
228 unreachable_pub,
229 unsafe_op_in_unsafe_fn,
230 unused_extern_crates,
231 // We intentionally choose not to deny `unused_qualifications`. When items
232 // are added to the prelude (e.g., `core::mem::size_of`), this has the
233 // consequence of making some uses trigger this lint on the latest toolchain
234 // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
235 // does not work on older toolchains.
236 //
237 // We tested a more complicated fix in #1413, but ultimately decided that,
238 // since this lint is just a minor style lint, the complexity isn't worth it
239 // - it's fine to occasionally have unused qualifications slip through,
240 // especially since these do not affect our user-facing API in any way.
241 variant_size_differences
242)]
243#![cfg_attr(
244 __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
245 deny(fuzzy_provenance_casts, lossy_provenance_casts)
246)]
247#![deny(
248 clippy::all,
249 clippy::alloc_instead_of_core,
250 clippy::arithmetic_side_effects,
251 clippy::as_underscore,
252 clippy::assertions_on_result_states,
253 clippy::as_conversions,
254 clippy::correctness,
255 clippy::dbg_macro,
256 clippy::decimal_literal_representation,
257 clippy::double_must_use,
258 clippy::get_unwrap,
259 clippy::indexing_slicing,
260 clippy::missing_inline_in_public_items,
261 clippy::missing_safety_doc,
262 clippy::multiple_unsafe_ops_per_block,
263 clippy::must_use_candidate,
264 clippy::must_use_unit,
265 clippy::obfuscated_if_else,
266 clippy::perf,
267 clippy::print_stdout,
268 clippy::return_self_not_must_use,
269 clippy::std_instead_of_core,
270 clippy::style,
271 clippy::suspicious,
272 clippy::todo,
273 clippy::undocumented_unsafe_blocks,
274 clippy::unimplemented,
275 clippy::unnested_or_patterns,
276 clippy::unwrap_used,
277 clippy::use_debug
278)]
279// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
280// has false positives, and we test on our MSRV in CI, so it doesn't help us
281// anyway.
282#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
283#![deny(
284 rustdoc::bare_urls,
285 rustdoc::broken_intra_doc_links,
286 rustdoc::invalid_codeblock_attributes,
287 rustdoc::invalid_html_tags,
288 rustdoc::invalid_rust_codeblocks,
289 rustdoc::missing_crate_level_docs,
290 rustdoc::private_intra_doc_links
291)]
292// In test code, it makes sense to weight more heavily towards concise, readable
293// code over correct or debuggable code.
294#![cfg_attr(any(test, kani), allow(
295 // In tests, you get line numbers and have access to source code, so panic
296 // messages are less important. You also often unwrap a lot, which would
297 // make expect'ing instead very verbose.
298 clippy::unwrap_used,
299 // In tests, there's no harm to "panic risks" - the worst that can happen is
300 // that your test will fail, and you'll fix it. By contrast, panic risks in
301 // production code introduce the possibly of code panicking unexpectedly "in
302 // the field".
303 clippy::arithmetic_side_effects,
304 clippy::indexing_slicing,
305))]
306#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
307#![cfg_attr(
308 all(feature = "simd-nightly", target_arch = "arm"),
309 feature(stdarch_arm_neon_intrinsics)
310)]
311#![cfg_attr(
312 all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
313 feature(stdarch_powerpc)
314)]
315#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
316#![cfg_attr(doc_cfg, feature(doc_cfg))]
317#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))]
318#![cfg_attr(
319 any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri),
320 feature(layout_for_ptr)
321)]
322#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))]
323
324// This is a hack to allow zerocopy-derive derives to work in this crate. They
325// assume that zerocopy is linked as an extern crate, so they access items from
326// it as `zerocopy::Xxx`. This makes that still work.
327#[cfg(any(feature = "derive", test))]
328extern crate self as zerocopy;
329
330#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))]
331extern crate test;
332
333#[doc(hidden)]
334#[macro_use]
335pub mod util;
336
337pub mod byte_slice;
338pub mod byteorder;
339mod deprecated;
340
341#[doc(hidden)]
342pub mod doctests;
343
344// This module is `pub` so that zerocopy's error types and error handling
345// documentation is grouped together in a cohesive module. In practice, we
346// expect most users to use the re-export of `error`'s items to avoid identifier
347// stuttering.
348pub mod error;
349mod impls;
350#[doc(hidden)]
351pub mod layout;
352mod macros;
353#[doc(hidden)]
354pub mod pointer;
355mod r#ref;
356mod split_at;
357// FIXME(#252): If we make this pub, come up with a better name.
358mod wrappers;
359
360use core::{
361 cell::{Cell, UnsafeCell},
362 cmp::Ordering,
363 fmt::{self, Debug, Display, Formatter},
364 hash::Hasher,
365 marker::PhantomData,
366 mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
367 num::{
368 NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
369 NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
370 },
371 ops::{Deref, DerefMut},
372 ptr::{self, NonNull},
373 slice,
374};
375#[cfg(feature = "std")]
376use std::io;
377
378#[doc(hidden)]
379pub use crate::pointer::invariant::{self, BecauseExclusive};
380#[doc(hidden)]
381pub use crate::pointer::PtrInner;
382pub use crate::{
383 byte_slice::*,
384 byteorder::*,
385 error::*,
386 r#ref::*,
387 split_at::{Split, SplitAt},
388 wrappers::*,
389};
390
391#[cfg(any(feature = "alloc", test, kani))]
392extern crate alloc;
393#[cfg(any(feature = "alloc", test))]
394use alloc::{boxed::Box, vec::Vec};
395#[cfg(any(feature = "alloc", test))]
396use core::alloc::Layout;
397
398use util::MetadataOf;
399
400// Used by `KnownLayout`.
401#[doc(hidden)]
402pub use crate::layout::*;
403// Used by `TryFromBytes::is_bit_valid`.
404#[doc(hidden)]
405pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
406// For each trait polyfill, as soon as the corresponding feature is stable, the
407// polyfill import will be unused because method/function resolution will prefer
408// the inherent method/function over a trait method/function. Thus, we suppress
409// the `unused_imports` warning.
410//
411// See the documentation on `util::polyfills` for more information.
412#[allow(unused_imports)]
413use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
414
415#[rustversion::nightly]
416#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))]
417const _: () = {
418 #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS\""]
419 const _WARNING: () = ();
420 #[warn(deprecated)]
421 _WARNING
422};
423
424// These exist so that code which was written against the old names will get
425// less confusing error messages when they upgrade to a more recent version of
426// zerocopy. On our MSRV toolchain, the error messages read, for example:
427//
428// error[E0603]: trait `FromZeroes` is private
429// --> examples/deprecated.rs:1:15
430// |
431// 1 | use zerocopy::FromZeroes;
432// | ^^^^^^^^^^ private trait
433// |
434// note: the trait `FromZeroes` is defined here
435// --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
436// |
437// 1845 | use FromZeros as FromZeroes;
438// | ^^^^^^^^^^^^^^^^^^^^^^^
439//
440// The "note" provides enough context to make it easy to figure out how to fix
441// the error.
442/// Implements [`KnownLayout`].
443///
444/// This derive analyzes various aspects of a type's layout that are needed for
445/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
446/// e.g.:
447///
448/// ```
449/// # use zerocopy_derive::KnownLayout;
450/// #[derive(KnownLayout)]
451/// struct MyStruct {
452/// # /*
453/// ...
454/// # */
455/// }
456///
457/// #[derive(KnownLayout)]
458/// enum MyEnum {
459/// # V00,
460/// # /*
461/// ...
462/// # */
463/// }
464///
465/// #[derive(KnownLayout)]
466/// union MyUnion {
467/// # variant: u8,
468/// # /*
469/// ...
470/// # */
471/// }
472/// ```
473///
474/// # Limitations
475///
476/// This derive cannot currently be applied to unsized structs without an
477/// explicit `repr` attribute.
478///
479/// Some invocations of this derive run afoul of a [known bug] in Rust's type
480/// privacy checker. For example, this code:
481///
482/// ```compile_fail,E0446
483/// use zerocopy::*;
484/// # use zerocopy_derive::*;
485///
486/// #[derive(KnownLayout)]
487/// #[repr(C)]
488/// pub struct PublicType {
489/// leading: Foo,
490/// trailing: Bar,
491/// }
492///
493/// #[derive(KnownLayout)]
494/// struct Foo;
495///
496/// #[derive(KnownLayout)]
497/// struct Bar;
498/// ```
499///
500/// ...results in a compilation error:
501///
502/// ```text
503/// error[E0446]: private type `Bar` in public interface
504/// --> examples/bug.rs:3:10
505/// |
506/// 3 | #[derive(KnownLayout)]
507/// | ^^^^^^^^^^^ can't leak private type
508/// ...
509/// 14 | struct Bar;
510/// | ---------- `Bar` declared as private
511/// |
512/// = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
513/// ```
514///
515/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
516/// structs whose trailing field type is less public than the enclosing struct.
517///
518/// To work around this, mark the trailing field type `pub` and annotate it with
519/// `#[doc(hidden)]`; e.g.:
520///
521/// ```no_run
522/// use zerocopy::*;
523/// # use zerocopy_derive::*;
524///
525/// #[derive(KnownLayout)]
526/// #[repr(C)]
527/// pub struct PublicType {
528/// leading: Foo,
529/// trailing: Bar,
530/// }
531///
532/// #[derive(KnownLayout)]
533/// struct Foo;
534///
535/// #[doc(hidden)]
536/// #[derive(KnownLayout)]
537/// pub struct Bar; // <- `Bar` is now also `pub`
538/// ```
539///
540/// [known bug]: https://github.com/rust-lang/rust/issues/45713
541#[cfg(any(feature = "derive", test))]
542#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
543pub use zerocopy_derive::KnownLayout;
544#[allow(unused)]
545use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
546
547/// Indicates that zerocopy can reason about certain aspects of a type's layout.
548///
549/// This trait is required by many of zerocopy's APIs. It supports sized types,
550/// slices, and [slice DSTs](#dynamically-sized-types).
551///
552/// # Implementation
553///
554/// **Do not implement this trait yourself!** Instead, use
555/// [`#[derive(KnownLayout)]`][derive]; e.g.:
556///
557/// ```
558/// # use zerocopy_derive::KnownLayout;
559/// #[derive(KnownLayout)]
560/// struct MyStruct {
561/// # /*
562/// ...
563/// # */
564/// }
565///
566/// #[derive(KnownLayout)]
567/// enum MyEnum {
568/// # /*
569/// ...
570/// # */
571/// }
572///
573/// #[derive(KnownLayout)]
574/// union MyUnion {
575/// # variant: u8,
576/// # /*
577/// ...
578/// # */
579/// }
580/// ```
581///
582/// This derive performs a sophisticated analysis to deduce the layout
583/// characteristics of types. You **must** implement this trait via the derive.
584///
585/// # Dynamically-sized types
586///
587/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
588///
589/// A slice DST is a type whose trailing field is either a slice or another
590/// slice DST, rather than a type with fixed size. For example:
591///
592/// ```
593/// #[repr(C)]
594/// struct PacketHeader {
595/// # /*
596/// ...
597/// # */
598/// }
599///
600/// #[repr(C)]
601/// struct Packet {
602/// header: PacketHeader,
603/// body: [u8],
604/// }
605/// ```
606///
607/// It can be useful to think of slice DSTs as a generalization of slices - in
608/// other words, a normal slice is just the special case of a slice DST with
609/// zero leading fields. In particular:
610/// - Like slices, slice DSTs can have different lengths at runtime
611/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
612/// or via other indirection such as `Box`
613/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
614/// encodes the number of elements in the trailing slice field
615///
616/// ## Slice DST layout
617///
618/// Just like other composite Rust types, the layout of a slice DST is not
619/// well-defined unless it is specified using an explicit `#[repr(...)]`
620/// attribute such as `#[repr(C)]`. [Other representations are
621/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
622/// example.
623///
624/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
625/// types][repr-c-structs], but the presence of a variable-length field
626/// introduces the possibility of *dynamic padding*. In particular, it may be
627/// necessary to add trailing padding *after* the trailing slice field in order
628/// to satisfy the outer type's alignment, and the amount of padding required
629/// may be a function of the length of the trailing slice field. This is just a
630/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
631/// but it can result in surprising behavior. For example, consider the
632/// following type:
633///
634/// ```
635/// #[repr(C)]
636/// struct Foo {
637/// a: u32,
638/// b: u8,
639/// z: [u16],
640/// }
641/// ```
642///
643/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
644/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
645/// `Foo`:
646///
647/// ```text
648/// byte offset | 01234567
649/// field | aaaab---
650/// ><
651/// ```
652///
653/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
654/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
655/// round up to offset 6. This means that there is one byte of padding between
656/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
657/// then two bytes of padding after `z` in order to satisfy the overall
658/// alignment of `Foo`. The size of this instance is 8 bytes.
659///
660/// What about if `z` has length 1?
661///
662/// ```text
663/// byte offset | 01234567
664/// field | aaaab-zz
665/// ```
666///
667/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
668/// that we no longer need padding after `z` in order to satisfy `Foo`'s
669/// alignment. We've now seen two different values of `Foo` with two different
670/// lengths of `z`, but they both have the same size - 8 bytes.
671///
672/// What about if `z` has length 2?
673///
674/// ```text
675/// byte offset | 012345678901
676/// field | aaaab-zzzz--
677/// ```
678///
679/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
680/// size to 10, and so we now need another 2 bytes of padding after `z` to
681/// satisfy `Foo`'s alignment.
682///
683/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
684/// applied to slice DSTs, but it can be surprising that the amount of trailing
685/// padding becomes a function of the trailing slice field's length, and thus
686/// can only be computed at runtime.
687///
688/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
689/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
690///
691/// ## What is a valid size?
692///
693/// There are two places in zerocopy's API that we refer to "a valid size" of a
694/// type. In normal casts or conversions, where the source is a byte slice, we
695/// need to know whether the source byte slice is a valid size of the
696/// destination type. In prefix or suffix casts, we need to know whether *there
697/// exists* a valid size of the destination type which fits in the source byte
698/// slice and, if so, what the largest such size is.
699///
700/// As outlined above, a slice DST's size is defined by the number of elements
701/// in its trailing slice field. However, there is not necessarily a 1-to-1
702/// mapping between trailing slice field length and overall size. As we saw in
703/// the previous section with the type `Foo`, instances with both 0 and 1
704/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
705///
706/// When we say "x is a valid size of `T`", we mean one of two things:
707/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
708/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
709/// `T` with `len` trailing slice elements has size `x`
710///
711/// When we say "largest possible size of `T` that fits in a byte slice", we
712/// mean one of two things:
713/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
714/// `size_of::<T>()` bytes long
715/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
716/// that the instance of `T` with `len` trailing slice elements fits in the
717/// byte slice, and to choose the largest such `len`, if any
718///
719///
720/// # Safety
721///
722/// This trait does not convey any safety guarantees to code outside this crate.
723///
724/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
725/// releases of zerocopy may make backwards-breaking changes to these items,
726/// including changes that only affect soundness, which may cause code which
727/// uses those items to silently become unsound.
728///
729#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
730#[cfg_attr(
731 not(feature = "derive"),
732 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
733)]
734#[cfg_attr(
735 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
736 diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
737)]
738pub unsafe trait KnownLayout {
739 // The `Self: Sized` bound makes it so that `KnownLayout` can still be
740 // object safe. It's not currently object safe thanks to `const LAYOUT`, and
741 // it likely won't be in the future, but there's no reason not to be
742 // forwards-compatible with object safety.
743 #[doc(hidden)]
744 fn only_derive_is_allowed_to_implement_this_trait()
745 where
746 Self: Sized;
747
748 /// The type of metadata stored in a pointer to `Self`.
749 ///
750 /// This is `()` for sized types and `usize` for slice DSTs.
751 type PointerMetadata: PointerMetadata;
752
753 /// A maybe-uninitialized analog of `Self`
754 ///
755 /// # Safety
756 ///
757 /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
758 /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
759 #[doc(hidden)]
760 type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
761
762 /// The layout of `Self`.
763 ///
764 /// # Safety
765 ///
766 /// Callers may assume that `LAYOUT` accurately reflects the layout of
767 /// `Self`. In particular:
768 /// - `LAYOUT.align` is equal to `Self`'s alignment
769 /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
770 /// where `size == size_of::<Self>()`
771 /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
772 /// SizeInfo::SliceDst(slice_layout)` where:
773 /// - The size, `size`, of an instance of `Self` with `elems` trailing
774 /// slice elements is equal to `slice_layout.offset +
775 /// slice_layout.elem_size * elems` rounded up to the nearest multiple
776 /// of `LAYOUT.align`
777 /// - For such an instance, any bytes in the range `[slice_layout.offset +
778 /// slice_layout.elem_size * elems, size)` are padding and must not be
779 /// assumed to be initialized
780 #[doc(hidden)]
781 const LAYOUT: DstLayout;
782
783 /// SAFETY: The returned pointer has the same address and provenance as
784 /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
785 /// elements in its trailing slice.
786 #[doc(hidden)]
787 fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
788
789 /// Extracts the metadata from a pointer to `Self`.
790 ///
791 /// # Safety
792 ///
793 /// `pointer_to_metadata` always returns the correct metadata stored in
794 /// `ptr`.
795 #[doc(hidden)]
796 fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
797
798 /// Computes the length of the byte range addressed by `ptr`.
799 ///
800 /// Returns `None` if the resulting length would not fit in an `usize`.
801 ///
802 /// # Safety
803 ///
804 /// Callers may assume that `size_of_val_raw` always returns the correct
805 /// size.
806 ///
807 /// Callers may assume that, if `ptr` addresses a byte range whose length
808 /// fits in an `usize`, this will return `Some`.
809 #[doc(hidden)]
810 #[must_use]
811 #[inline(always)]
812 fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
813 let meta = Self::pointer_to_metadata(ptr.as_ptr());
814 // SAFETY: `size_for_metadata` promises to only return `None` if the
815 // resulting size would not fit in a `usize`.
816 Self::size_for_metadata(meta)
817 }
818
819 #[doc(hidden)]
820 #[must_use]
821 #[inline(always)]
822 fn raw_dangling() -> NonNull<Self> {
823 let meta = Self::PointerMetadata::from_elem_count(0);
824 Self::raw_from_ptr_len(NonNull::dangling(), meta)
825 }
826
827 /// Computes the size of an object of type `Self` with the given pointer
828 /// metadata.
829 ///
830 /// # Safety
831 ///
832 /// `size_for_metadata` promises to return `None` if and only if the
833 /// resulting size would not fit in a `usize`. Note that the returned size
834 /// could exceed the actual maximum valid size of an allocated object,
835 /// `isize::MAX`.
836 ///
837 /// # Examples
838 ///
839 /// ```
840 /// use zerocopy::KnownLayout;
841 ///
842 /// assert_eq!(u8::size_for_metadata(()), Some(1));
843 /// assert_eq!(u16::size_for_metadata(()), Some(2));
844 /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42));
845 /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84));
846 ///
847 /// // This size exceeds the maximum valid object size (`isize::MAX`):
848 /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX));
849 ///
850 /// // This size, if computed, would exceed `usize::MAX`:
851 /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None);
852 /// ```
853 #[inline(always)]
854 fn size_for_metadata(meta: Self::PointerMetadata) -> Option<usize> {
855 meta.size_for_metadata(Self::LAYOUT)
856 }
857}
858
859/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
860#[inline(always)]
861pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
862where
863 T: ?Sized + KnownLayout<PointerMetadata = usize>,
864{
865 trait LayoutFacts {
866 const SIZE_INFO: TrailingSliceLayout;
867 }
868
869 impl<T: ?Sized> LayoutFacts for T
870 where
871 T: KnownLayout<PointerMetadata = usize>,
872 {
873 const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
874 crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
875 crate::SizeInfo::SliceDst(info) => info,
876 };
877 }
878
879 T::SIZE_INFO
880}
881
882/// The metadata associated with a [`KnownLayout`] type.
883#[doc(hidden)]
884pub trait PointerMetadata: Copy + Eq + Debug {
885 /// Constructs a `Self` from an element count.
886 ///
887 /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
888 /// `elems`. No other types are currently supported.
889 fn from_elem_count(elems: usize) -> Self;
890
891 /// Computes the size of the object with the given layout and pointer
892 /// metadata.
893 ///
894 /// # Panics
895 ///
896 /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
897 /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
898 /// panic.
899 ///
900 /// # Safety
901 ///
902 /// `size_for_metadata` promises to only return `None` if the resulting size
903 /// would not fit in a `usize`.
904 fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
905}
906
907impl PointerMetadata for () {
908 #[inline]
909 #[allow(clippy::unused_unit)]
910 fn from_elem_count(_elems: usize) -> () {}
911
912 #[inline]
913 fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
914 match layout.size_info {
915 SizeInfo::Sized { size } => Some(size),
916 // NOTE: This branch is unreachable, but we return `None` rather
917 // than `unreachable!()` to avoid generating panic paths.
918 SizeInfo::SliceDst(_) => None,
919 }
920 }
921}
922
923impl PointerMetadata for usize {
924 #[inline]
925 fn from_elem_count(elems: usize) -> usize {
926 elems
927 }
928
929 #[inline]
930 fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
931 match layout.size_info {
932 SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
933 let slice_len = elem_size.checked_mul(self)?;
934 let without_padding = offset.checked_add(slice_len)?;
935 without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
936 }
937 // NOTE: This branch is unreachable, but we return `None` rather
938 // than `unreachable!()` to avoid generating panic paths.
939 SizeInfo::Sized { .. } => None,
940 }
941 }
942}
943
944// SAFETY: Delegates safety to `DstLayout::for_slice`.
945unsafe impl<T> KnownLayout for [T] {
946 #[allow(clippy::missing_inline_in_public_items, dead_code)]
947 #[cfg_attr(
948 all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
949 coverage(off)
950 )]
951 fn only_derive_is_allowed_to_implement_this_trait()
952 where
953 Self: Sized,
954 {
955 }
956
957 type PointerMetadata = usize;
958
959 // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
960 // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
961 // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
962 // identical, because they both lack a fixed-sized prefix and because they
963 // inherit the alignments of their inner element type (which are identical)
964 // [2][3].
965 //
966 // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
967 // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
968 // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
969 // back-to-back [2][3].
970 //
971 // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
972 //
973 // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
974 // `T`
975 //
976 // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
977 //
978 // Slices have the same layout as the section of the array they slice.
979 //
980 // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
981 //
982 // An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
983 // alignment of `T`. Arrays are laid out so that the zero-based `nth`
984 // element of the array is offset from the start of the array by `n *
985 // size_of::<T>()` bytes.
986 type MaybeUninit = [CoreMaybeUninit<T>];
987
988 const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
989
990 // SAFETY: `.cast` preserves address and provenance. The returned pointer
991 // refers to an object with `elems` elements by construction.
992 #[inline(always)]
993 fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
994 // FIXME(#67): Remove this allow. See NonNullExt for more details.
995 #[allow(unstable_name_collisions)]
996 NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
997 }
998
999 #[inline(always)]
1000 fn pointer_to_metadata(ptr: *mut [T]) -> usize {
1001 #[allow(clippy::as_conversions)]
1002 let slc = ptr as *const [()];
1003
1004 // SAFETY:
1005 // - `()` has alignment 1, so `slc` is trivially aligned.
1006 // - `slc` was derived from a non-null pointer.
1007 // - The size is 0 regardless of the length, so it is sound to
1008 // materialize a reference regardless of location.
1009 // - By invariant, `self.ptr` has valid provenance.
1010 let slc = unsafe { &*slc };
1011
1012 // This is correct because the preceding `as` cast preserves the number
1013 // of slice elements. [1]
1014 //
1015 // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
1016 //
1017 // For slice types like `[T]` and `[U]`, the raw pointer types `*const
1018 // [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
1019 // elements in this slice. Casts between these raw pointer types
1020 // preserve the number of elements. ... The same holds for `str` and
1021 // any compound type whose unsized tail is a slice type, such as
1022 // struct `Foo(i32, [u8])` or `(u64, Foo)`.
1023 slc.len()
1024 }
1025}
1026
1027#[rustfmt::skip]
1028impl_known_layout!(
1029 (),
1030 u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1031 bool, char,
1032 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1033 NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1034);
1035#[rustfmt::skip]
1036#[cfg(feature = "float-nightly")]
1037impl_known_layout!(
1038 #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1039 f16,
1040 #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1041 f128
1042);
1043#[rustfmt::skip]
1044impl_known_layout!(
1045 T => Option<T>,
1046 T: ?Sized => PhantomData<T>,
1047 T => Wrapping<T>,
1048 T => CoreMaybeUninit<T>,
1049 T: ?Sized => *const T,
1050 T: ?Sized => *mut T,
1051 T: ?Sized => &'_ T,
1052 T: ?Sized => &'_ mut T,
1053);
1054impl_known_layout!(const N: usize, T => [T; N]);
1055
1056// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1057// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1058//
1059// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1060//
1061// `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1062// `T`
1063//
1064// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1065//
1066// `UnsafeCell<T>` has the same in-memory representation as its inner type
1067// `T`.
1068//
1069// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1070//
1071// `Cell<T>` has the same in-memory representation as `T`.
1072#[allow(clippy::multiple_unsafe_ops_per_block)]
1073const _: () = unsafe {
1074 unsafe_impl_known_layout!(
1075 #[repr([u8])]
1076 str
1077 );
1078 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1079 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1080 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1081};
1082
1083// SAFETY:
1084// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1085// `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1086// - Fixed prefix size
1087// - Alignment
1088// - (For DSTs) trailing slice element size
1089// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1090// require the same kind of pointer metadata, and thus it is valid to perform
1091// an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1092// preserves referent size (ie, `size_of_val_raw`).
1093const _: () = unsafe {
1094 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1095};
1096
1097// FIXME(#196, #2856): Eventually, we'll want to support enums variants and
1098// union fields being treated uniformly since they behave similarly to each
1099// other in terms of projecting validity – specifically, for a type `T` with
1100// validity `V`, if `T` is a struct type, then its fields straightforwardly also
1101// have validity `V`. By contrast, if `T` is an enum or union type, then
1102// validity is not straightforwardly recursive in this way.
1103#[doc(hidden)]
1104pub const STRUCT_VARIANT_ID: i128 = -1;
1105#[doc(hidden)]
1106pub const UNION_VARIANT_ID: i128 = -2;
1107#[doc(hidden)]
1108pub const REPR_C_UNION_VARIANT_ID: i128 = -3;
1109
1110/// # Safety
1111///
1112/// `Self::ProjectToTag` must satisfy its safety invariant.
1113#[doc(hidden)]
1114pub unsafe trait HasTag {
1115 fn only_derive_is_allowed_to_implement_this_trait()
1116 where
1117 Self: Sized;
1118
1119 /// The type's enum tag, or `()` for non-enum types.
1120 type Tag: Immutable;
1121
1122 /// A pointer projection from `Self` to its tag.
1123 ///
1124 /// # Safety
1125 ///
1126 /// It must be the case that, for all `slf: Ptr<'_, Self, I>`, it is sound
1127 /// to project from `slf` to `Ptr<'_, Self::Tag, I>` using this projection.
1128 type ProjectToTag: pointer::cast::Project<Self, Self::Tag>;
1129}
1130
1131/// Projects a given field from `Self`.
1132///
1133/// All implementations of `HasField` for a particular field `f` in `Self`
1134/// should use the same `Field` type; this ensures that `Field` is inferable
1135/// given an explicit `VARIANT_ID` and `FIELD_ID`.
1136///
1137/// # Safety
1138///
1139/// A field `f` is `HasField` for `Self` if and only if:
1140///
1141/// - If `Self` has the layout of a struct or union type, then `VARIANT_ID` is
1142/// `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if
1143/// `Self` has the layout of an enum type, `VARIANT_ID` is the numerical index
1144/// of the enum variant in which `f` appears. Note that `Self` does not need
1145/// to actually *be* such a type – it just needs to have the same layout as
1146/// such a type. For example, a `#[repr(transparent)]` wrapper around an enum
1147/// has the same layout as that enum.
1148/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise,
1149/// if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`.
1150/// - `Field` is a type with the same visibility as `f`.
1151/// - `Type` has the same type as `f`.
1152///
1153/// The caller must **not** assume that a pointer's referent being aligned
1154/// implies that calling `project` on that pointer will result in a pointer to
1155/// an aligned referent. For example, `HasField` may be implemented for
1156/// `#[repr(packed)]` structs.
1157///
1158/// The implementation of `project` must satisfy its safety post-condition.
1159#[doc(hidden)]
1160pub unsafe trait HasField<Field, const VARIANT_ID: i128, const FIELD_ID: i128>:
1161 HasTag
1162{
1163 fn only_derive_is_allowed_to_implement_this_trait()
1164 where
1165 Self: Sized;
1166
1167 /// The type of the field.
1168 type Type: ?Sized;
1169
1170 /// Projects from `slf` to the field.
1171 ///
1172 /// Users should generally not call `project` directly, and instead should
1173 /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`].
1174 ///
1175 /// # Safety
1176 ///
1177 /// The returned pointer refers to a non-strict subset of the bytes of
1178 /// `slf`'s referent, and has the same provenance as `slf`.
1179 #[must_use]
1180 fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type;
1181}
1182
1183/// Projects a given field from `Self`.
1184///
1185/// Implementations of this trait encode the conditions under which a field can
1186/// be projected from a `Ptr<'_, Self, I>`, and how the invariants of that
1187/// [`Ptr`] (`I`) determine the invariants of pointers projected from it. In
1188/// other words, it is a type-level function over invariants; `I` goes in,
1189/// `Self::Invariants` comes out.
1190///
1191/// # Safety
1192///
1193/// `T: ProjectField<Field, I, VARIANT_ID, FIELD_ID>` if, for a
1194/// `ptr: Ptr<'_, T, I>` such that `T::is_projectable(ptr).is_ok()`,
1195/// `<T as HasField<Field, VARIANT_ID, FIELD_ID>>::project(ptr.as_inner())`
1196/// conforms to `T::Invariants`.
1197#[doc(hidden)]
1198pub unsafe trait ProjectField<Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>:
1199 HasField<Field, VARIANT_ID, FIELD_ID>
1200where
1201 I: invariant::Invariants,
1202{
1203 fn only_derive_is_allowed_to_implement_this_trait()
1204 where
1205 Self: Sized;
1206
1207 /// The invariants of the projected field pointer, with respect to the
1208 /// invariants, `I`, of the containing pointer. The aliasing dimension of
1209 /// the invariants is guaranteed to remain unchanged.
1210 type Invariants: invariant::Invariants<Aliasing = I::Aliasing>;
1211
1212 /// The failure mode of projection. `()` if the projection is fallible,
1213 /// otherwise [`core::convert::Infallible`].
1214 type Error;
1215
1216 /// Is the given field projectable from `ptr`?
1217 ///
1218 /// If a field with [`Self::Invariants`] is projectable from the referent,
1219 /// this function produces an `Ok(ptr)` from which the projection can be
1220 /// made; otherwise `Err`.
1221 ///
1222 /// This method must be overriden if the field's projectability depends on
1223 /// the value of the bytes in `ptr`.
1224 #[inline(always)]
1225 fn is_projectable<'a>(_ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> {
1226 trait IsInfallible {
1227 const IS_INFALLIBLE: bool;
1228 }
1229
1230 struct Projection<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>(
1231 PhantomData<(Field, I, T)>,
1232 )
1233 where
1234 T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1235 I: invariant::Invariants;
1236
1237 impl<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128> IsInfallible
1238 for Projection<T, Field, I, VARIANT_ID, FIELD_ID>
1239 where
1240 T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1241 I: invariant::Invariants,
1242 {
1243 const IS_INFALLIBLE: bool = {
1244 let is_infallible = match VARIANT_ID {
1245 // For nondestructive projections of struct and union
1246 // fields, the projected field's satisfaction of
1247 // `Invariants` does not depend on the value of the
1248 // referent. This default implementation of `is_projectable`
1249 // is non-destructive, as it does not overwrite any part of
1250 // the referent.
1251 crate::STRUCT_VARIANT_ID | crate::UNION_VARIANT_ID => true,
1252 _enum_variant => {
1253 use crate::invariant::{Validity, ValidityKind};
1254 match I::Validity::KIND {
1255 // The `Uninit` and `Initialized` validity
1256 // invariants do not depend on the enum's tag. In
1257 // particular, we don't actually care about what
1258 // variant is present – we can treat *any* range of
1259 // uninitialized or initialized memory as containing
1260 // an uninitialized or initialized instance of *any*
1261 // type – the type itself is irrelevant.
1262 ValidityKind::Uninit | ValidityKind::Initialized => true,
1263 // The projectability of an enum field from an
1264 // `AsInitialized` or `Valid` state is a dynamic
1265 // property of its tag.
1266 ValidityKind::AsInitialized | ValidityKind::Valid => false,
1267 }
1268 }
1269 };
1270 const_assert!(is_infallible);
1271 is_infallible
1272 };
1273 }
1274
1275 const_assert!(
1276 <Projection<Self, Field, I, VARIANT_ID, FIELD_ID> as IsInfallible>::IS_INFALLIBLE
1277 );
1278
1279 Ok(())
1280 }
1281}
1282
1283/// Analyzes whether a type is [`FromZeros`].
1284///
1285/// This derive analyzes, at compile time, whether the annotated type satisfies
1286/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1287/// supertraits if it is sound to do so. This derive can be applied to structs,
1288/// enums, and unions; e.g.:
1289///
1290/// ```
1291/// # use zerocopy_derive::{FromZeros, Immutable};
1292/// #[derive(FromZeros)]
1293/// struct MyStruct {
1294/// # /*
1295/// ...
1296/// # */
1297/// }
1298///
1299/// #[derive(FromZeros)]
1300/// #[repr(u8)]
1301/// enum MyEnum {
1302/// # Variant0,
1303/// # /*
1304/// ...
1305/// # */
1306/// }
1307///
1308/// #[derive(FromZeros, Immutable)]
1309/// union MyUnion {
1310/// # variant: u8,
1311/// # /*
1312/// ...
1313/// # */
1314/// }
1315/// ```
1316///
1317/// [safety conditions]: trait@FromZeros#safety
1318///
1319/// # Analysis
1320///
1321/// *This section describes, roughly, the analysis performed by this derive to
1322/// determine whether it is sound to implement `FromZeros` for a given type.
1323/// Unless you are modifying the implementation of this derive, or attempting to
1324/// manually implement `FromZeros` for a type yourself, you don't need to read
1325/// this section.*
1326///
1327/// If a type has the following properties, then this derive can implement
1328/// `FromZeros` for that type:
1329///
1330/// - If the type is a struct, all of its fields must be `FromZeros`.
1331/// - If the type is an enum:
1332/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1333/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1334/// - It must have a variant with a discriminant/tag of `0`, and its fields
1335/// must be `FromZeros`. See [the reference] for a description of
1336/// discriminant values are specified.
1337/// - The fields of that variant must be `FromZeros`.
1338///
1339/// This analysis is subject to change. Unsafe code may *only* rely on the
1340/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1341/// implementation details of this derive.
1342///
1343/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1344///
1345/// ## Why isn't an explicit representation required for structs?
1346///
1347/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1348/// that structs are marked with `#[repr(C)]`.
1349///
1350/// Per the [Rust reference](reference),
1351///
1352/// > The representation of a type can change the padding between fields, but
1353/// > does not change the layout of the fields themselves.
1354///
1355/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1356///
1357/// Since the layout of structs only consists of padding bytes and field bytes,
1358/// a struct is soundly `FromZeros` if:
1359/// 1. its padding is soundly `FromZeros`, and
1360/// 2. its fields are soundly `FromZeros`.
1361///
1362/// The answer to the first question is always yes: padding bytes do not have
1363/// any validity constraints. A [discussion] of this question in the Unsafe Code
1364/// Guidelines Working Group concluded that it would be virtually unimaginable
1365/// for future versions of rustc to add validity constraints to padding bytes.
1366///
1367/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1368///
1369/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1370/// its fields are `FromZeros`.
1371// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1372// attribute.
1373#[cfg(any(feature = "derive", test))]
1374#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1375pub use zerocopy_derive::FromZeros;
1376/// Analyzes whether a type is [`Immutable`].
1377///
1378/// This derive analyzes, at compile time, whether the annotated type satisfies
1379/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1380/// sound to do so. This derive can be applied to structs, enums, and unions;
1381/// e.g.:
1382///
1383/// ```
1384/// # use zerocopy_derive::Immutable;
1385/// #[derive(Immutable)]
1386/// struct MyStruct {
1387/// # /*
1388/// ...
1389/// # */
1390/// }
1391///
1392/// #[derive(Immutable)]
1393/// enum MyEnum {
1394/// # Variant0,
1395/// # /*
1396/// ...
1397/// # */
1398/// }
1399///
1400/// #[derive(Immutable)]
1401/// union MyUnion {
1402/// # variant: u8,
1403/// # /*
1404/// ...
1405/// # */
1406/// }
1407/// ```
1408///
1409/// # Analysis
1410///
1411/// *This section describes, roughly, the analysis performed by this derive to
1412/// determine whether it is sound to implement `Immutable` for a given type.
1413/// Unless you are modifying the implementation of this derive, you don't need
1414/// to read this section.*
1415///
1416/// If a type has the following properties, then this derive can implement
1417/// `Immutable` for that type:
1418///
1419/// - All fields must be `Immutable`.
1420///
1421/// This analysis is subject to change. Unsafe code may *only* rely on the
1422/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1423/// implementation details of this derive.
1424///
1425/// [safety conditions]: trait@Immutable#safety
1426#[cfg(any(feature = "derive", test))]
1427#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1428pub use zerocopy_derive::Immutable;
1429
1430/// Types which are free from interior mutability.
1431///
1432/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1433/// by ownership or an exclusive (`&mut`) borrow.
1434///
1435/// # Implementation
1436///
1437/// **Do not implement this trait yourself!** Instead, use
1438/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1439/// e.g.:
1440///
1441/// ```
1442/// # use zerocopy_derive::Immutable;
1443/// #[derive(Immutable)]
1444/// struct MyStruct {
1445/// # /*
1446/// ...
1447/// # */
1448/// }
1449///
1450/// #[derive(Immutable)]
1451/// enum MyEnum {
1452/// # /*
1453/// ...
1454/// # */
1455/// }
1456///
1457/// #[derive(Immutable)]
1458/// union MyUnion {
1459/// # variant: u8,
1460/// # /*
1461/// ...
1462/// # */
1463/// }
1464/// ```
1465///
1466/// This derive performs a sophisticated, compile-time safety analysis to
1467/// determine whether a type is `Immutable`.
1468///
1469/// # Safety
1470///
1471/// Unsafe code outside of this crate must not make any assumptions about `T`
1472/// based on `T: Immutable`. We reserve the right to relax the requirements for
1473/// `Immutable` in the future, and if unsafe code outside of this crate makes
1474/// assumptions based on `T: Immutable`, future relaxations may cause that code
1475/// to become unsound.
1476///
1477// # Safety (Internal)
1478//
1479// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1480// `t: &T`, `t` does not permit interior mutation of its referent. Because
1481// [`UnsafeCell`] is the only type which permits interior mutation, it is
1482// sufficient (though not necessary) to guarantee that `T` contains no
1483// `UnsafeCell`s.
1484//
1485// [`UnsafeCell`]: core::cell::UnsafeCell
1486#[cfg_attr(
1487 feature = "derive",
1488 doc = "[derive]: zerocopy_derive::Immutable",
1489 doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1490)]
1491#[cfg_attr(
1492 not(feature = "derive"),
1493 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1494 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1495)]
1496#[cfg_attr(
1497 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1498 diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1499)]
1500pub unsafe trait Immutable {
1501 // The `Self: Sized` bound makes it so that `Immutable` is still object
1502 // safe.
1503 #[doc(hidden)]
1504 fn only_derive_is_allowed_to_implement_this_trait()
1505 where
1506 Self: Sized;
1507}
1508
1509/// Implements [`TryFromBytes`].
1510///
1511/// This derive synthesizes the runtime checks required to check whether a
1512/// sequence of initialized bytes corresponds to a valid instance of a type.
1513/// This derive can be applied to structs, enums, and unions; e.g.:
1514///
1515/// ```
1516/// # use zerocopy_derive::{TryFromBytes, Immutable};
1517/// #[derive(TryFromBytes)]
1518/// struct MyStruct {
1519/// # /*
1520/// ...
1521/// # */
1522/// }
1523///
1524/// #[derive(TryFromBytes)]
1525/// #[repr(u8)]
1526/// enum MyEnum {
1527/// # V00,
1528/// # /*
1529/// ...
1530/// # */
1531/// }
1532///
1533/// #[derive(TryFromBytes, Immutable)]
1534/// union MyUnion {
1535/// # variant: u8,
1536/// # /*
1537/// ...
1538/// # */
1539/// }
1540/// ```
1541///
1542/// # Portability
1543///
1544/// To ensure consistent endianness for enums with multi-byte representations,
1545/// explicitly specify and convert each discriminant using `.to_le()` or
1546/// `.to_be()`; e.g.:
1547///
1548/// ```
1549/// # use zerocopy_derive::TryFromBytes;
1550/// // `DataStoreVersion` is encoded in little-endian.
1551/// #[derive(TryFromBytes)]
1552/// #[repr(u32)]
1553/// pub enum DataStoreVersion {
1554/// /// Version 1 of the data store.
1555/// V1 = 9u32.to_le(),
1556///
1557/// /// Version 2 of the data store.
1558/// V2 = 10u32.to_le(),
1559/// }
1560/// ```
1561///
1562/// [safety conditions]: trait@TryFromBytes#safety
1563#[cfg(any(feature = "derive", test))]
1564#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1565pub use zerocopy_derive::TryFromBytes;
1566
1567/// Types for which some bit patterns are valid.
1568///
1569/// A memory region of the appropriate length which contains initialized bytes
1570/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1571/// bytes corresponds to a [*valid instance*] of that type. For example,
1572/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1573/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1574/// `1`.
1575///
1576/// # Implementation
1577///
1578/// **Do not implement this trait yourself!** Instead, use
1579/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1580///
1581/// ```
1582/// # use zerocopy_derive::{TryFromBytes, Immutable};
1583/// #[derive(TryFromBytes)]
1584/// struct MyStruct {
1585/// # /*
1586/// ...
1587/// # */
1588/// }
1589///
1590/// #[derive(TryFromBytes)]
1591/// #[repr(u8)]
1592/// enum MyEnum {
1593/// # V00,
1594/// # /*
1595/// ...
1596/// # */
1597/// }
1598///
1599/// #[derive(TryFromBytes, Immutable)]
1600/// union MyUnion {
1601/// # variant: u8,
1602/// # /*
1603/// ...
1604/// # */
1605/// }
1606/// ```
1607///
1608/// This derive ensures that the runtime check of whether bytes correspond to a
1609/// valid instance is sound. You **must** implement this trait via the derive.
1610///
1611/// # What is a "valid instance"?
1612///
1613/// In Rust, each type has *bit validity*, which refers to the set of bit
1614/// patterns which may appear in an instance of that type. It is impossible for
1615/// safe Rust code to produce values which violate bit validity (ie, values
1616/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1617/// invalid value, this is considered [undefined behavior].
1618///
1619/// Rust's bit validity rules are currently being decided, which means that some
1620/// types have three classes of bit patterns: those which are definitely valid,
1621/// and whose validity is documented in the language; those which may or may not
1622/// be considered valid at some point in the future; and those which are
1623/// definitely invalid.
1624///
1625/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1626/// be valid if its validity is a documented guarantee provided by the
1627/// language.
1628///
1629/// For most use cases, Rust's current guarantees align with programmers'
1630/// intuitions about what ought to be valid. As a result, zerocopy's
1631/// conservatism should not affect most users.
1632///
1633/// If you are negatively affected by lack of support for a particular type,
1634/// we encourage you to let us know by [filing an issue][github-repo].
1635///
1636/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1637///
1638/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1639/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1640/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1641/// IntoBytes`, there exist values of `t: T` such that
1642/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1643/// generally assume that values produced by `IntoBytes` will necessarily be
1644/// accepted as valid by `TryFromBytes`.
1645///
1646/// # Safety
1647///
1648/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1649/// or representation of `T`. It merely provides the ability to perform a
1650/// validity check at runtime via methods like [`try_ref_from_bytes`].
1651///
1652/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1653/// Future releases of zerocopy may make backwards-breaking changes to these
1654/// items, including changes that only affect soundness, which may cause code
1655/// which uses those items to silently become unsound.
1656///
1657/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1658/// [github-repo]: https://github.com/google/zerocopy
1659/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1660/// [*valid instance*]: #what-is-a-valid-instance
1661#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1662#[cfg_attr(
1663 not(feature = "derive"),
1664 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1665)]
1666#[cfg_attr(
1667 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1668 diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1669)]
1670pub unsafe trait TryFromBytes {
1671 // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1672 // safe.
1673 #[doc(hidden)]
1674 fn only_derive_is_allowed_to_implement_this_trait()
1675 where
1676 Self: Sized;
1677
1678 /// Does a given memory range contain a valid instance of `Self`?
1679 ///
1680 /// # Safety
1681 ///
1682 /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1683 /// `*candidate` contains a valid `Self`.
1684 ///
1685 /// # Panics
1686 ///
1687 /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1688 /// `unsafe` code remains sound even in the face of `is_bit_valid`
1689 /// panicking. (We support user-defined validation routines; so long as
1690 /// these routines are not required to be `unsafe`, there is no way to
1691 /// ensure that these do not generate panics.)
1692 ///
1693 /// Besides user-defined validation routines panicking, `is_bit_valid` will
1694 /// either panic or fail to compile if called on a pointer with [`Shared`]
1695 /// aliasing when `Self: !Immutable`.
1696 ///
1697 /// [`UnsafeCell`]: core::cell::UnsafeCell
1698 /// [`Shared`]: invariant::Shared
1699 #[doc(hidden)]
1700 fn is_bit_valid(candidate: Maybe<'_, Self>) -> bool;
1701
1702 /// Attempts to interpret the given `source` as a `&Self`.
1703 ///
1704 /// If the bytes of `source` are a valid instance of `Self`, this method
1705 /// returns a reference to those bytes interpreted as a `Self`. If the
1706 /// length of `source` is not a [valid size of `Self`][valid-size], or if
1707 /// `source` is not appropriately aligned, or if `source` is not a valid
1708 /// instance of `Self`, this returns `Err`. If [`Self:
1709 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1710 /// error][ConvertError::from].
1711 ///
1712 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1713 ///
1714 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1715 /// [self-unaligned]: Unaligned
1716 /// [slice-dst]: KnownLayout#dynamically-sized-types
1717 ///
1718 /// # Compile-Time Assertions
1719 ///
1720 /// This method cannot yet be used on unsized types whose dynamically-sized
1721 /// component is zero-sized. Attempting to use this method on such types
1722 /// results in a compile-time assertion error; e.g.:
1723 ///
1724 /// ```compile_fail,E0080
1725 /// use zerocopy::*;
1726 /// # use zerocopy_derive::*;
1727 ///
1728 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1729 /// #[repr(C)]
1730 /// struct ZSTy {
1731 /// leading_sized: u16,
1732 /// trailing_dst: [()],
1733 /// }
1734 ///
1735 /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš Compile Error!
1736 /// ```
1737 ///
1738 /// # Examples
1739 ///
1740 /// ```
1741 /// use zerocopy::TryFromBytes;
1742 /// # use zerocopy_derive::*;
1743 ///
1744 /// // The only valid value of this type is the byte `0xC0`
1745 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1746 /// #[repr(u8)]
1747 /// enum C0 { xC0 = 0xC0 }
1748 ///
1749 /// // The only valid value of this type is the byte sequence `0xC0C0`.
1750 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1751 /// #[repr(C)]
1752 /// struct C0C0(C0, C0);
1753 ///
1754 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1755 /// #[repr(C)]
1756 /// struct Packet {
1757 /// magic_number: C0C0,
1758 /// mug_size: u8,
1759 /// temperature: u8,
1760 /// marshmallows: [[u8; 2]],
1761 /// }
1762 ///
1763 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1764 ///
1765 /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1766 ///
1767 /// assert_eq!(packet.mug_size, 240);
1768 /// assert_eq!(packet.temperature, 77);
1769 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1770 ///
1771 /// // These bytes are not valid instance of `Packet`.
1772 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1773 /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1774 /// ```
1775 #[must_use = "has no side effects"]
1776 #[inline]
1777 fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1778 where
1779 Self: KnownLayout + Immutable,
1780 {
1781 static_assert_dst_is_not_zst!(Self);
1782 match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1783 Ok(source) => {
1784 // This call may panic. If that happens, it doesn't cause any soundness
1785 // issues, as we have not generated any invalid state which we need to
1786 // fix before returning.
1787 match source.try_into_valid() {
1788 Ok(valid) => Ok(valid.as_ref()),
1789 Err(e) => {
1790 Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1791 }
1792 }
1793 }
1794 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1795 }
1796 }
1797
1798 /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1799 ///
1800 /// This method computes the [largest possible size of `Self`][valid-size]
1801 /// that can fit in the leading bytes of `source`. If that prefix is a valid
1802 /// instance of `Self`, this method returns a reference to those bytes
1803 /// interpreted as `Self`, and a reference to the remaining bytes. If there
1804 /// are insufficient bytes, or if `source` is not appropriately aligned, or
1805 /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1806 /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1807 /// alignment error][ConvertError::from].
1808 ///
1809 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1810 ///
1811 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1812 /// [self-unaligned]: Unaligned
1813 /// [slice-dst]: KnownLayout#dynamically-sized-types
1814 ///
1815 /// # Compile-Time Assertions
1816 ///
1817 /// This method cannot yet be used on unsized types whose dynamically-sized
1818 /// component is zero-sized. Attempting to use this method on such types
1819 /// results in a compile-time assertion error; e.g.:
1820 ///
1821 /// ```compile_fail,E0080
1822 /// use zerocopy::*;
1823 /// # use zerocopy_derive::*;
1824 ///
1825 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1826 /// #[repr(C)]
1827 /// struct ZSTy {
1828 /// leading_sized: u16,
1829 /// trailing_dst: [()],
1830 /// }
1831 ///
1832 /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš Compile Error!
1833 /// ```
1834 ///
1835 /// # Examples
1836 ///
1837 /// ```
1838 /// use zerocopy::TryFromBytes;
1839 /// # use zerocopy_derive::*;
1840 ///
1841 /// // The only valid value of this type is the byte `0xC0`
1842 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1843 /// #[repr(u8)]
1844 /// enum C0 { xC0 = 0xC0 }
1845 ///
1846 /// // The only valid value of this type is the bytes `0xC0C0`.
1847 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1848 /// #[repr(C)]
1849 /// struct C0C0(C0, C0);
1850 ///
1851 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1852 /// #[repr(C)]
1853 /// struct Packet {
1854 /// magic_number: C0C0,
1855 /// mug_size: u8,
1856 /// temperature: u8,
1857 /// marshmallows: [[u8; 2]],
1858 /// }
1859 ///
1860 /// // These are more bytes than are needed to encode a `Packet`.
1861 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1862 ///
1863 /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1864 ///
1865 /// assert_eq!(packet.mug_size, 240);
1866 /// assert_eq!(packet.temperature, 77);
1867 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1868 /// assert_eq!(suffix, &[6u8][..]);
1869 ///
1870 /// // These bytes are not valid instance of `Packet`.
1871 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1872 /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1873 /// ```
1874 #[must_use = "has no side effects"]
1875 #[inline]
1876 fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1877 where
1878 Self: KnownLayout + Immutable,
1879 {
1880 static_assert_dst_is_not_zst!(Self);
1881 try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1882 }
1883
1884 /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1885 ///
1886 /// This method computes the [largest possible size of `Self`][valid-size]
1887 /// that can fit in the trailing bytes of `source`. If that suffix is a
1888 /// valid instance of `Self`, this method returns a reference to those bytes
1889 /// interpreted as `Self`, and a reference to the preceding bytes. If there
1890 /// are insufficient bytes, or if the suffix of `source` would not be
1891 /// appropriately aligned, or if the suffix is not a valid instance of
1892 /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1893 /// can [infallibly discard the alignment error][ConvertError::from].
1894 ///
1895 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1896 ///
1897 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1898 /// [self-unaligned]: Unaligned
1899 /// [slice-dst]: KnownLayout#dynamically-sized-types
1900 ///
1901 /// # Compile-Time Assertions
1902 ///
1903 /// This method cannot yet be used on unsized types whose dynamically-sized
1904 /// component is zero-sized. Attempting to use this method on such types
1905 /// results in a compile-time assertion error; e.g.:
1906 ///
1907 /// ```compile_fail,E0080
1908 /// use zerocopy::*;
1909 /// # use zerocopy_derive::*;
1910 ///
1911 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1912 /// #[repr(C)]
1913 /// struct ZSTy {
1914 /// leading_sized: u16,
1915 /// trailing_dst: [()],
1916 /// }
1917 ///
1918 /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš Compile Error!
1919 /// ```
1920 ///
1921 /// # Examples
1922 ///
1923 /// ```
1924 /// use zerocopy::TryFromBytes;
1925 /// # use zerocopy_derive::*;
1926 ///
1927 /// // The only valid value of this type is the byte `0xC0`
1928 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1929 /// #[repr(u8)]
1930 /// enum C0 { xC0 = 0xC0 }
1931 ///
1932 /// // The only valid value of this type is the bytes `0xC0C0`.
1933 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1934 /// #[repr(C)]
1935 /// struct C0C0(C0, C0);
1936 ///
1937 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1938 /// #[repr(C)]
1939 /// struct Packet {
1940 /// magic_number: C0C0,
1941 /// mug_size: u8,
1942 /// temperature: u8,
1943 /// marshmallows: [[u8; 2]],
1944 /// }
1945 ///
1946 /// // These are more bytes than are needed to encode a `Packet`.
1947 /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1948 ///
1949 /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1950 ///
1951 /// assert_eq!(packet.mug_size, 240);
1952 /// assert_eq!(packet.temperature, 77);
1953 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1954 /// assert_eq!(prefix, &[0u8][..]);
1955 ///
1956 /// // These bytes are not valid instance of `Packet`.
1957 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1958 /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1959 /// ```
1960 #[must_use = "has no side effects"]
1961 #[inline]
1962 fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1963 where
1964 Self: KnownLayout + Immutable,
1965 {
1966 static_assert_dst_is_not_zst!(Self);
1967 try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1968 }
1969
1970 /// Attempts to interpret the given `source` as a `&mut Self` without
1971 /// copying.
1972 ///
1973 /// If the bytes of `source` are a valid instance of `Self`, this method
1974 /// returns a reference to those bytes interpreted as a `Self`. If the
1975 /// length of `source` is not a [valid size of `Self`][valid-size], or if
1976 /// `source` is not appropriately aligned, or if `source` is not a valid
1977 /// instance of `Self`, this returns `Err`. If [`Self:
1978 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1979 /// error][ConvertError::from].
1980 ///
1981 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1982 ///
1983 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1984 /// [self-unaligned]: Unaligned
1985 /// [slice-dst]: KnownLayout#dynamically-sized-types
1986 ///
1987 /// # Compile-Time Assertions
1988 ///
1989 /// This method cannot yet be used on unsized types whose dynamically-sized
1990 /// component is zero-sized. Attempting to use this method on such types
1991 /// results in a compile-time assertion error; e.g.:
1992 ///
1993 /// ```compile_fail,E0080
1994 /// use zerocopy::*;
1995 /// # use zerocopy_derive::*;
1996 ///
1997 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1998 /// #[repr(C, packed)]
1999 /// struct ZSTy {
2000 /// leading_sized: [u8; 2],
2001 /// trailing_dst: [()],
2002 /// }
2003 ///
2004 /// let mut source = [85, 85];
2005 /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš Compile Error!
2006 /// ```
2007 ///
2008 /// # Examples
2009 ///
2010 /// ```
2011 /// use zerocopy::TryFromBytes;
2012 /// # use zerocopy_derive::*;
2013 ///
2014 /// // The only valid value of this type is the byte `0xC0`
2015 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2016 /// #[repr(u8)]
2017 /// enum C0 { xC0 = 0xC0 }
2018 ///
2019 /// // The only valid value of this type is the bytes `0xC0C0`.
2020 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2021 /// #[repr(C)]
2022 /// struct C0C0(C0, C0);
2023 ///
2024 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2025 /// #[repr(C, packed)]
2026 /// struct Packet {
2027 /// magic_number: C0C0,
2028 /// mug_size: u8,
2029 /// temperature: u8,
2030 /// marshmallows: [[u8; 2]],
2031 /// }
2032 ///
2033 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
2034 ///
2035 /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
2036 ///
2037 /// assert_eq!(packet.mug_size, 240);
2038 /// assert_eq!(packet.temperature, 77);
2039 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2040 ///
2041 /// packet.temperature = 111;
2042 ///
2043 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
2044 ///
2045 /// // These bytes are not valid instance of `Packet`.
2046 /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2047 /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
2048 /// ```
2049 #[must_use = "has no side effects"]
2050 #[inline]
2051 fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2052 where
2053 Self: KnownLayout + IntoBytes,
2054 {
2055 static_assert_dst_is_not_zst!(Self);
2056 match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
2057 Ok(source) => {
2058 // This call may panic. If that happens, it doesn't cause any soundness
2059 // issues, as we have not generated any invalid state which we need to
2060 // fix before returning.
2061 match source.try_into_valid() {
2062 Ok(source) => Ok(source.as_mut()),
2063 Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2064 }
2065 }
2066 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2067 }
2068 }
2069
2070 /// Attempts to interpret the prefix of the given `source` as a `&mut
2071 /// Self`.
2072 ///
2073 /// This method computes the [largest possible size of `Self`][valid-size]
2074 /// that can fit in the leading bytes of `source`. If that prefix is a valid
2075 /// instance of `Self`, this method returns a reference to those bytes
2076 /// interpreted as `Self`, and a reference to the remaining bytes. If there
2077 /// are insufficient bytes, or if `source` is not appropriately aligned, or
2078 /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
2079 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
2080 /// alignment error][ConvertError::from].
2081 ///
2082 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2083 ///
2084 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2085 /// [self-unaligned]: Unaligned
2086 /// [slice-dst]: KnownLayout#dynamically-sized-types
2087 ///
2088 /// # Compile-Time Assertions
2089 ///
2090 /// This method cannot yet be used on unsized types whose dynamically-sized
2091 /// component is zero-sized. Attempting to use this method on such types
2092 /// results in a compile-time assertion error; e.g.:
2093 ///
2094 /// ```compile_fail,E0080
2095 /// use zerocopy::*;
2096 /// # use zerocopy_derive::*;
2097 ///
2098 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2099 /// #[repr(C, packed)]
2100 /// struct ZSTy {
2101 /// leading_sized: [u8; 2],
2102 /// trailing_dst: [()],
2103 /// }
2104 ///
2105 /// let mut source = [85, 85];
2106 /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš Compile Error!
2107 /// ```
2108 ///
2109 /// # Examples
2110 ///
2111 /// ```
2112 /// use zerocopy::TryFromBytes;
2113 /// # use zerocopy_derive::*;
2114 ///
2115 /// // The only valid value of this type is the byte `0xC0`
2116 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2117 /// #[repr(u8)]
2118 /// enum C0 { xC0 = 0xC0 }
2119 ///
2120 /// // The only valid value of this type is the bytes `0xC0C0`.
2121 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2122 /// #[repr(C)]
2123 /// struct C0C0(C0, C0);
2124 ///
2125 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2126 /// #[repr(C, packed)]
2127 /// struct Packet {
2128 /// magic_number: C0C0,
2129 /// mug_size: u8,
2130 /// temperature: u8,
2131 /// marshmallows: [[u8; 2]],
2132 /// }
2133 ///
2134 /// // These are more bytes than are needed to encode a `Packet`.
2135 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2136 ///
2137 /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
2138 ///
2139 /// assert_eq!(packet.mug_size, 240);
2140 /// assert_eq!(packet.temperature, 77);
2141 /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2142 /// assert_eq!(suffix, &[6u8][..]);
2143 ///
2144 /// packet.temperature = 111;
2145 /// suffix[0] = 222;
2146 ///
2147 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
2148 ///
2149 /// // These bytes are not valid instance of `Packet`.
2150 /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2151 /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
2152 /// ```
2153 #[must_use = "has no side effects"]
2154 #[inline]
2155 fn try_mut_from_prefix(
2156 source: &mut [u8],
2157 ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2158 where
2159 Self: KnownLayout + IntoBytes,
2160 {
2161 static_assert_dst_is_not_zst!(Self);
2162 try_mut_from_prefix_suffix(source, CastType::Prefix, None)
2163 }
2164
2165 /// Attempts to interpret the suffix of the given `source` as a `&mut
2166 /// Self`.
2167 ///
2168 /// This method computes the [largest possible size of `Self`][valid-size]
2169 /// that can fit in the trailing bytes of `source`. If that suffix is a
2170 /// valid instance of `Self`, this method returns a reference to those bytes
2171 /// interpreted as `Self`, and a reference to the preceding bytes. If there
2172 /// are insufficient bytes, or if the suffix of `source` would not be
2173 /// appropriately aligned, or if the suffix is not a valid instance of
2174 /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
2175 /// can [infallibly discard the alignment error][ConvertError::from].
2176 ///
2177 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2178 ///
2179 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2180 /// [self-unaligned]: Unaligned
2181 /// [slice-dst]: KnownLayout#dynamically-sized-types
2182 ///
2183 /// # Compile-Time Assertions
2184 ///
2185 /// This method cannot yet be used on unsized types whose dynamically-sized
2186 /// component is zero-sized. Attempting to use this method on such types
2187 /// results in a compile-time assertion error; e.g.:
2188 ///
2189 /// ```compile_fail,E0080
2190 /// use zerocopy::*;
2191 /// # use zerocopy_derive::*;
2192 ///
2193 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2194 /// #[repr(C, packed)]
2195 /// struct ZSTy {
2196 /// leading_sized: u16,
2197 /// trailing_dst: [()],
2198 /// }
2199 ///
2200 /// let mut source = [85, 85];
2201 /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš Compile Error!
2202 /// ```
2203 ///
2204 /// # Examples
2205 ///
2206 /// ```
2207 /// use zerocopy::TryFromBytes;
2208 /// # use zerocopy_derive::*;
2209 ///
2210 /// // The only valid value of this type is the byte `0xC0`
2211 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2212 /// #[repr(u8)]
2213 /// enum C0 { xC0 = 0xC0 }
2214 ///
2215 /// // The only valid value of this type is the bytes `0xC0C0`.
2216 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2217 /// #[repr(C)]
2218 /// struct C0C0(C0, C0);
2219 ///
2220 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2221 /// #[repr(C, packed)]
2222 /// struct Packet {
2223 /// magic_number: C0C0,
2224 /// mug_size: u8,
2225 /// temperature: u8,
2226 /// marshmallows: [[u8; 2]],
2227 /// }
2228 ///
2229 /// // These are more bytes than are needed to encode a `Packet`.
2230 /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2231 ///
2232 /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2233 ///
2234 /// assert_eq!(packet.mug_size, 240);
2235 /// assert_eq!(packet.temperature, 77);
2236 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2237 /// assert_eq!(prefix, &[0u8][..]);
2238 ///
2239 /// prefix[0] = 111;
2240 /// packet.temperature = 222;
2241 ///
2242 /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2243 ///
2244 /// // These bytes are not valid instance of `Packet`.
2245 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2246 /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2247 /// ```
2248 #[must_use = "has no side effects"]
2249 #[inline]
2250 fn try_mut_from_suffix(
2251 source: &mut [u8],
2252 ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2253 where
2254 Self: KnownLayout + IntoBytes,
2255 {
2256 static_assert_dst_is_not_zst!(Self);
2257 try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2258 }
2259
2260 /// Attempts to interpret the given `source` as a `&Self` with a DST length
2261 /// equal to `count`.
2262 ///
2263 /// This method attempts to return a reference to `source` interpreted as a
2264 /// `Self` with `count` trailing elements. If the length of `source` is not
2265 /// equal to the size of `Self` with `count` elements, if `source` is not
2266 /// appropriately aligned, or if `source` does not contain a valid instance
2267 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2268 /// you can [infallibly discard the alignment error][ConvertError::from].
2269 ///
2270 /// [self-unaligned]: Unaligned
2271 /// [slice-dst]: KnownLayout#dynamically-sized-types
2272 ///
2273 /// # Examples
2274 ///
2275 /// ```
2276 /// # #![allow(non_camel_case_types)] // For C0::xC0
2277 /// use zerocopy::TryFromBytes;
2278 /// # use zerocopy_derive::*;
2279 ///
2280 /// // The only valid value of this type is the byte `0xC0`
2281 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2282 /// #[repr(u8)]
2283 /// enum C0 { xC0 = 0xC0 }
2284 ///
2285 /// // The only valid value of this type is the bytes `0xC0C0`.
2286 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2287 /// #[repr(C)]
2288 /// struct C0C0(C0, C0);
2289 ///
2290 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2291 /// #[repr(C)]
2292 /// struct Packet {
2293 /// magic_number: C0C0,
2294 /// mug_size: u8,
2295 /// temperature: u8,
2296 /// marshmallows: [[u8; 2]],
2297 /// }
2298 ///
2299 /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2300 ///
2301 /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2302 ///
2303 /// assert_eq!(packet.mug_size, 240);
2304 /// assert_eq!(packet.temperature, 77);
2305 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2306 ///
2307 /// // These bytes are not valid instance of `Packet`.
2308 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2309 /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2310 /// ```
2311 ///
2312 /// Since an explicit `count` is provided, this method supports types with
2313 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2314 /// which do not take an explicit count do not support such types.
2315 ///
2316 /// ```
2317 /// use core::num::NonZeroU16;
2318 /// use zerocopy::*;
2319 /// # use zerocopy_derive::*;
2320 ///
2321 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2322 /// #[repr(C)]
2323 /// struct ZSTy {
2324 /// leading_sized: NonZeroU16,
2325 /// trailing_dst: [()],
2326 /// }
2327 ///
2328 /// let src = 0xCAFEu16.as_bytes();
2329 /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2330 /// assert_eq!(zsty.trailing_dst.len(), 42);
2331 /// ```
2332 ///
2333 /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2334 #[must_use = "has no side effects"]
2335 #[inline]
2336 fn try_ref_from_bytes_with_elems(
2337 source: &[u8],
2338 count: usize,
2339 ) -> Result<&Self, TryCastError<&[u8], Self>>
2340 where
2341 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2342 {
2343 match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2344 {
2345 Ok(source) => {
2346 // This call may panic. If that happens, it doesn't cause any soundness
2347 // issues, as we have not generated any invalid state which we need to
2348 // fix before returning.
2349 match source.try_into_valid() {
2350 Ok(source) => Ok(source.as_ref()),
2351 Err(e) => {
2352 Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2353 }
2354 }
2355 }
2356 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2357 }
2358 }
2359
2360 /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2361 /// a DST length equal to `count`.
2362 ///
2363 /// This method attempts to return a reference to the prefix of `source`
2364 /// interpreted as a `Self` with `count` trailing elements, and a reference
2365 /// to the remaining bytes. If the length of `source` is less than the size
2366 /// of `Self` with `count` elements, if `source` is not appropriately
2367 /// aligned, or if the prefix of `source` does not contain a valid instance
2368 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2369 /// you can [infallibly discard the alignment error][ConvertError::from].
2370 ///
2371 /// [self-unaligned]: Unaligned
2372 /// [slice-dst]: KnownLayout#dynamically-sized-types
2373 ///
2374 /// # Examples
2375 ///
2376 /// ```
2377 /// # #![allow(non_camel_case_types)] // For C0::xC0
2378 /// use zerocopy::TryFromBytes;
2379 /// # use zerocopy_derive::*;
2380 ///
2381 /// // The only valid value of this type is the byte `0xC0`
2382 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2383 /// #[repr(u8)]
2384 /// enum C0 { xC0 = 0xC0 }
2385 ///
2386 /// // The only valid value of this type is the bytes `0xC0C0`.
2387 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2388 /// #[repr(C)]
2389 /// struct C0C0(C0, C0);
2390 ///
2391 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2392 /// #[repr(C)]
2393 /// struct Packet {
2394 /// magic_number: C0C0,
2395 /// mug_size: u8,
2396 /// temperature: u8,
2397 /// marshmallows: [[u8; 2]],
2398 /// }
2399 ///
2400 /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2401 ///
2402 /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2403 ///
2404 /// assert_eq!(packet.mug_size, 240);
2405 /// assert_eq!(packet.temperature, 77);
2406 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2407 /// assert_eq!(suffix, &[8u8][..]);
2408 ///
2409 /// // These bytes are not valid instance of `Packet`.
2410 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2411 /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2412 /// ```
2413 ///
2414 /// Since an explicit `count` is provided, this method supports types with
2415 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2416 /// which do not take an explicit count do not support such types.
2417 ///
2418 /// ```
2419 /// use core::num::NonZeroU16;
2420 /// use zerocopy::*;
2421 /// # use zerocopy_derive::*;
2422 ///
2423 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2424 /// #[repr(C)]
2425 /// struct ZSTy {
2426 /// leading_sized: NonZeroU16,
2427 /// trailing_dst: [()],
2428 /// }
2429 ///
2430 /// let src = 0xCAFEu16.as_bytes();
2431 /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2432 /// assert_eq!(zsty.trailing_dst.len(), 42);
2433 /// ```
2434 ///
2435 /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2436 #[must_use = "has no side effects"]
2437 #[inline]
2438 fn try_ref_from_prefix_with_elems(
2439 source: &[u8],
2440 count: usize,
2441 ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2442 where
2443 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2444 {
2445 try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2446 }
2447
2448 /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2449 /// a DST length equal to `count`.
2450 ///
2451 /// This method attempts to return a reference to the suffix of `source`
2452 /// interpreted as a `Self` with `count` trailing elements, and a reference
2453 /// to the preceding bytes. If the length of `source` is less than the size
2454 /// of `Self` with `count` elements, if the suffix of `source` is not
2455 /// appropriately aligned, or if the suffix of `source` does not contain a
2456 /// valid instance of `Self`, this returns `Err`. If [`Self:
2457 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2458 /// error][ConvertError::from].
2459 ///
2460 /// [self-unaligned]: Unaligned
2461 /// [slice-dst]: KnownLayout#dynamically-sized-types
2462 ///
2463 /// # Examples
2464 ///
2465 /// ```
2466 /// # #![allow(non_camel_case_types)] // For C0::xC0
2467 /// use zerocopy::TryFromBytes;
2468 /// # use zerocopy_derive::*;
2469 ///
2470 /// // The only valid value of this type is the byte `0xC0`
2471 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2472 /// #[repr(u8)]
2473 /// enum C0 { xC0 = 0xC0 }
2474 ///
2475 /// // The only valid value of this type is the bytes `0xC0C0`.
2476 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2477 /// #[repr(C)]
2478 /// struct C0C0(C0, C0);
2479 ///
2480 /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2481 /// #[repr(C)]
2482 /// struct Packet {
2483 /// magic_number: C0C0,
2484 /// mug_size: u8,
2485 /// temperature: u8,
2486 /// marshmallows: [[u8; 2]],
2487 /// }
2488 ///
2489 /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2490 ///
2491 /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2492 ///
2493 /// assert_eq!(packet.mug_size, 240);
2494 /// assert_eq!(packet.temperature, 77);
2495 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2496 /// assert_eq!(prefix, &[123u8][..]);
2497 ///
2498 /// // These bytes are not valid instance of `Packet`.
2499 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2500 /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2501 /// ```
2502 ///
2503 /// Since an explicit `count` is provided, this method supports types with
2504 /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2505 /// which do not take an explicit count do not support such types.
2506 ///
2507 /// ```
2508 /// use core::num::NonZeroU16;
2509 /// use zerocopy::*;
2510 /// # use zerocopy_derive::*;
2511 ///
2512 /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2513 /// #[repr(C)]
2514 /// struct ZSTy {
2515 /// leading_sized: NonZeroU16,
2516 /// trailing_dst: [()],
2517 /// }
2518 ///
2519 /// let src = 0xCAFEu16.as_bytes();
2520 /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2521 /// assert_eq!(zsty.trailing_dst.len(), 42);
2522 /// ```
2523 ///
2524 /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2525 #[must_use = "has no side effects"]
2526 #[inline]
2527 fn try_ref_from_suffix_with_elems(
2528 source: &[u8],
2529 count: usize,
2530 ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2531 where
2532 Self: KnownLayout<PointerMetadata = usize> + Immutable,
2533 {
2534 try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2535 }
2536
2537 /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2538 /// length equal to `count`.
2539 ///
2540 /// This method attempts to return a reference to `source` interpreted as a
2541 /// `Self` with `count` trailing elements. If the length of `source` is not
2542 /// equal to the size of `Self` with `count` elements, if `source` is not
2543 /// appropriately aligned, or if `source` does not contain a valid instance
2544 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2545 /// you can [infallibly discard the alignment error][ConvertError::from].
2546 ///
2547 /// [self-unaligned]: Unaligned
2548 /// [slice-dst]: KnownLayout#dynamically-sized-types
2549 ///
2550 /// # Examples
2551 ///
2552 /// ```
2553 /// # #![allow(non_camel_case_types)] // For C0::xC0
2554 /// use zerocopy::TryFromBytes;
2555 /// # use zerocopy_derive::*;
2556 ///
2557 /// // The only valid value of this type is the byte `0xC0`
2558 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2559 /// #[repr(u8)]
2560 /// enum C0 { xC0 = 0xC0 }
2561 ///
2562 /// // The only valid value of this type is the bytes `0xC0C0`.
2563 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2564 /// #[repr(C)]
2565 /// struct C0C0(C0, C0);
2566 ///
2567 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2568 /// #[repr(C, packed)]
2569 /// struct Packet {
2570 /// magic_number: C0C0,
2571 /// mug_size: u8,
2572 /// temperature: u8,
2573 /// marshmallows: [[u8; 2]],
2574 /// }
2575 ///
2576 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2577 ///
2578 /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2579 ///
2580 /// assert_eq!(packet.mug_size, 240);
2581 /// assert_eq!(packet.temperature, 77);
2582 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2583 ///
2584 /// packet.temperature = 111;
2585 ///
2586 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2587 ///
2588 /// // These bytes are not valid instance of `Packet`.
2589 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2590 /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2591 /// ```
2592 ///
2593 /// Since an explicit `count` is provided, this method supports types with
2594 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2595 /// which do not take an explicit count do not support such types.
2596 ///
2597 /// ```
2598 /// use core::num::NonZeroU16;
2599 /// use zerocopy::*;
2600 /// # use zerocopy_derive::*;
2601 ///
2602 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2603 /// #[repr(C, packed)]
2604 /// struct ZSTy {
2605 /// leading_sized: NonZeroU16,
2606 /// trailing_dst: [()],
2607 /// }
2608 ///
2609 /// let mut src = 0xCAFEu16;
2610 /// let src = src.as_mut_bytes();
2611 /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2612 /// assert_eq!(zsty.trailing_dst.len(), 42);
2613 /// ```
2614 ///
2615 /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2616 #[must_use = "has no side effects"]
2617 #[inline]
2618 fn try_mut_from_bytes_with_elems(
2619 source: &mut [u8],
2620 count: usize,
2621 ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2622 where
2623 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2624 {
2625 match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2626 {
2627 Ok(source) => {
2628 // This call may panic. If that happens, it doesn't cause any soundness
2629 // issues, as we have not generated any invalid state which we need to
2630 // fix before returning.
2631 match source.try_into_valid() {
2632 Ok(source) => Ok(source.as_mut()),
2633 Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2634 }
2635 }
2636 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2637 }
2638 }
2639
2640 /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2641 /// with a DST length equal to `count`.
2642 ///
2643 /// This method attempts to return a reference to the prefix of `source`
2644 /// interpreted as a `Self` with `count` trailing elements, and a reference
2645 /// to the remaining bytes. If the length of `source` is less than the size
2646 /// of `Self` with `count` elements, if `source` is not appropriately
2647 /// aligned, or if the prefix of `source` does not contain a valid instance
2648 /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2649 /// you can [infallibly discard the alignment error][ConvertError::from].
2650 ///
2651 /// [self-unaligned]: Unaligned
2652 /// [slice-dst]: KnownLayout#dynamically-sized-types
2653 ///
2654 /// # Examples
2655 ///
2656 /// ```
2657 /// # #![allow(non_camel_case_types)] // For C0::xC0
2658 /// use zerocopy::TryFromBytes;
2659 /// # use zerocopy_derive::*;
2660 ///
2661 /// // The only valid value of this type is the byte `0xC0`
2662 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2663 /// #[repr(u8)]
2664 /// enum C0 { xC0 = 0xC0 }
2665 ///
2666 /// // The only valid value of this type is the bytes `0xC0C0`.
2667 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2668 /// #[repr(C)]
2669 /// struct C0C0(C0, C0);
2670 ///
2671 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2672 /// #[repr(C, packed)]
2673 /// struct Packet {
2674 /// magic_number: C0C0,
2675 /// mug_size: u8,
2676 /// temperature: u8,
2677 /// marshmallows: [[u8; 2]],
2678 /// }
2679 ///
2680 /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2681 ///
2682 /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2683 ///
2684 /// assert_eq!(packet.mug_size, 240);
2685 /// assert_eq!(packet.temperature, 77);
2686 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2687 /// assert_eq!(suffix, &[8u8][..]);
2688 ///
2689 /// packet.temperature = 111;
2690 /// suffix[0] = 222;
2691 ///
2692 /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2693 ///
2694 /// // These bytes are not valid instance of `Packet`.
2695 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2696 /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2697 /// ```
2698 ///
2699 /// Since an explicit `count` is provided, this method supports types with
2700 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2701 /// which do not take an explicit count do not support such types.
2702 ///
2703 /// ```
2704 /// use core::num::NonZeroU16;
2705 /// use zerocopy::*;
2706 /// # use zerocopy_derive::*;
2707 ///
2708 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2709 /// #[repr(C, packed)]
2710 /// struct ZSTy {
2711 /// leading_sized: NonZeroU16,
2712 /// trailing_dst: [()],
2713 /// }
2714 ///
2715 /// let mut src = 0xCAFEu16;
2716 /// let src = src.as_mut_bytes();
2717 /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2718 /// assert_eq!(zsty.trailing_dst.len(), 42);
2719 /// ```
2720 ///
2721 /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2722 #[must_use = "has no side effects"]
2723 #[inline]
2724 fn try_mut_from_prefix_with_elems(
2725 source: &mut [u8],
2726 count: usize,
2727 ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2728 where
2729 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2730 {
2731 try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2732 }
2733
2734 /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2735 /// with a DST length equal to `count`.
2736 ///
2737 /// This method attempts to return a reference to the suffix of `source`
2738 /// interpreted as a `Self` with `count` trailing elements, and a reference
2739 /// to the preceding bytes. If the length of `source` is less than the size
2740 /// of `Self` with `count` elements, if the suffix of `source` is not
2741 /// appropriately aligned, or if the suffix of `source` does not contain a
2742 /// valid instance of `Self`, this returns `Err`. If [`Self:
2743 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2744 /// error][ConvertError::from].
2745 ///
2746 /// [self-unaligned]: Unaligned
2747 /// [slice-dst]: KnownLayout#dynamically-sized-types
2748 ///
2749 /// # Examples
2750 ///
2751 /// ```
2752 /// # #![allow(non_camel_case_types)] // For C0::xC0
2753 /// use zerocopy::TryFromBytes;
2754 /// # use zerocopy_derive::*;
2755 ///
2756 /// // The only valid value of this type is the byte `0xC0`
2757 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2758 /// #[repr(u8)]
2759 /// enum C0 { xC0 = 0xC0 }
2760 ///
2761 /// // The only valid value of this type is the bytes `0xC0C0`.
2762 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2763 /// #[repr(C)]
2764 /// struct C0C0(C0, C0);
2765 ///
2766 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2767 /// #[repr(C, packed)]
2768 /// struct Packet {
2769 /// magic_number: C0C0,
2770 /// mug_size: u8,
2771 /// temperature: u8,
2772 /// marshmallows: [[u8; 2]],
2773 /// }
2774 ///
2775 /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2776 ///
2777 /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2778 ///
2779 /// assert_eq!(packet.mug_size, 240);
2780 /// assert_eq!(packet.temperature, 77);
2781 /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2782 /// assert_eq!(prefix, &[123u8][..]);
2783 ///
2784 /// prefix[0] = 111;
2785 /// packet.temperature = 222;
2786 ///
2787 /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2788 ///
2789 /// // These bytes are not valid instance of `Packet`.
2790 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2791 /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2792 /// ```
2793 ///
2794 /// Since an explicit `count` is provided, this method supports types with
2795 /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2796 /// which do not take an explicit count do not support such types.
2797 ///
2798 /// ```
2799 /// use core::num::NonZeroU16;
2800 /// use zerocopy::*;
2801 /// # use zerocopy_derive::*;
2802 ///
2803 /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2804 /// #[repr(C, packed)]
2805 /// struct ZSTy {
2806 /// leading_sized: NonZeroU16,
2807 /// trailing_dst: [()],
2808 /// }
2809 ///
2810 /// let mut src = 0xCAFEu16;
2811 /// let src = src.as_mut_bytes();
2812 /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2813 /// assert_eq!(zsty.trailing_dst.len(), 42);
2814 /// ```
2815 ///
2816 /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2817 #[must_use = "has no side effects"]
2818 #[inline]
2819 fn try_mut_from_suffix_with_elems(
2820 source: &mut [u8],
2821 count: usize,
2822 ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2823 where
2824 Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2825 {
2826 try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2827 }
2828
2829 /// Attempts to read the given `source` as a `Self`.
2830 ///
2831 /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2832 /// instance of `Self`, this returns `Err`.
2833 ///
2834 /// # Examples
2835 ///
2836 /// ```
2837 /// use zerocopy::TryFromBytes;
2838 /// # use zerocopy_derive::*;
2839 ///
2840 /// // The only valid value of this type is the byte `0xC0`
2841 /// #[derive(TryFromBytes)]
2842 /// #[repr(u8)]
2843 /// enum C0 { xC0 = 0xC0 }
2844 ///
2845 /// // The only valid value of this type is the bytes `0xC0C0`.
2846 /// #[derive(TryFromBytes)]
2847 /// #[repr(C)]
2848 /// struct C0C0(C0, C0);
2849 ///
2850 /// #[derive(TryFromBytes)]
2851 /// #[repr(C)]
2852 /// struct Packet {
2853 /// magic_number: C0C0,
2854 /// mug_size: u8,
2855 /// temperature: u8,
2856 /// }
2857 ///
2858 /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2859 ///
2860 /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2861 ///
2862 /// assert_eq!(packet.mug_size, 240);
2863 /// assert_eq!(packet.temperature, 77);
2864 ///
2865 /// // These bytes are not valid instance of `Packet`.
2866 /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2867 /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2868 /// ```
2869 #[must_use = "has no side effects"]
2870 #[inline]
2871 fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2872 where
2873 Self: Sized,
2874 {
2875 let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
2876 Ok(candidate) => candidate,
2877 Err(e) => {
2878 return Err(TryReadError::Size(e.with_dst()));
2879 }
2880 };
2881 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2882 // its bytes are initialized.
2883 unsafe { try_read_from(source, candidate) }
2884 }
2885
2886 /// Attempts to read a `Self` from the prefix of the given `source`.
2887 ///
2888 /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2889 /// of `source`, returning that `Self` and any remaining bytes. If
2890 /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2891 /// of `Self`, it returns `Err`.
2892 ///
2893 /// # Examples
2894 ///
2895 /// ```
2896 /// use zerocopy::TryFromBytes;
2897 /// # use zerocopy_derive::*;
2898 ///
2899 /// // The only valid value of this type is the byte `0xC0`
2900 /// #[derive(TryFromBytes)]
2901 /// #[repr(u8)]
2902 /// enum C0 { xC0 = 0xC0 }
2903 ///
2904 /// // The only valid value of this type is the bytes `0xC0C0`.
2905 /// #[derive(TryFromBytes)]
2906 /// #[repr(C)]
2907 /// struct C0C0(C0, C0);
2908 ///
2909 /// #[derive(TryFromBytes)]
2910 /// #[repr(C)]
2911 /// struct Packet {
2912 /// magic_number: C0C0,
2913 /// mug_size: u8,
2914 /// temperature: u8,
2915 /// }
2916 ///
2917 /// // These are more bytes than are needed to encode a `Packet`.
2918 /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2919 ///
2920 /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2921 ///
2922 /// assert_eq!(packet.mug_size, 240);
2923 /// assert_eq!(packet.temperature, 77);
2924 /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2925 ///
2926 /// // These bytes are not valid instance of `Packet`.
2927 /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2928 /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2929 /// ```
2930 #[must_use = "has no side effects"]
2931 #[inline]
2932 fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2933 where
2934 Self: Sized,
2935 {
2936 let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
2937 Ok(candidate) => candidate,
2938 Err(e) => {
2939 return Err(TryReadError::Size(e.with_dst()));
2940 }
2941 };
2942 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2943 // its bytes are initialized.
2944 unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2945 }
2946
2947 /// Attempts to read a `Self` from the suffix of the given `source`.
2948 ///
2949 /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2950 /// of `source`, returning that `Self` and any preceding bytes. If
2951 /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2952 /// of `Self`, it returns `Err`.
2953 ///
2954 /// # Examples
2955 ///
2956 /// ```
2957 /// # #![allow(non_camel_case_types)] // For C0::xC0
2958 /// use zerocopy::TryFromBytes;
2959 /// # use zerocopy_derive::*;
2960 ///
2961 /// // The only valid value of this type is the byte `0xC0`
2962 /// #[derive(TryFromBytes)]
2963 /// #[repr(u8)]
2964 /// enum C0 { xC0 = 0xC0 }
2965 ///
2966 /// // The only valid value of this type is the bytes `0xC0C0`.
2967 /// #[derive(TryFromBytes)]
2968 /// #[repr(C)]
2969 /// struct C0C0(C0, C0);
2970 ///
2971 /// #[derive(TryFromBytes)]
2972 /// #[repr(C)]
2973 /// struct Packet {
2974 /// magic_number: C0C0,
2975 /// mug_size: u8,
2976 /// temperature: u8,
2977 /// }
2978 ///
2979 /// // These are more bytes than are needed to encode a `Packet`.
2980 /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
2981 ///
2982 /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
2983 ///
2984 /// assert_eq!(packet.mug_size, 240);
2985 /// assert_eq!(packet.temperature, 77);
2986 /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
2987 ///
2988 /// // These bytes are not valid instance of `Packet`.
2989 /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
2990 /// assert!(Packet::try_read_from_suffix(bytes).is_err());
2991 /// ```
2992 #[must_use = "has no side effects"]
2993 #[inline]
2994 fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
2995 where
2996 Self: Sized,
2997 {
2998 let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
2999 Ok(candidate) => candidate,
3000 Err(e) => {
3001 return Err(TryReadError::Size(e.with_dst()));
3002 }
3003 };
3004 // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3005 // its bytes are initialized.
3006 unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
3007 }
3008}
3009
3010#[inline(always)]
3011fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
3012 source: &[u8],
3013 cast_type: CastType,
3014 meta: Option<T::PointerMetadata>,
3015) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
3016 match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
3017 Ok((source, prefix_suffix)) => {
3018 // This call may panic. If that happens, it doesn't cause any soundness
3019 // issues, as we have not generated any invalid state which we need to
3020 // fix before returning.
3021 match source.try_into_valid() {
3022 Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
3023 Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
3024 }
3025 }
3026 Err(e) => Err(e.map_src(Ptr::as_ref).into()),
3027 }
3028}
3029
3030#[inline(always)]
3031fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
3032 candidate: &mut [u8],
3033 cast_type: CastType,
3034 meta: Option<T::PointerMetadata>,
3035) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
3036 match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
3037 Ok((candidate, prefix_suffix)) => {
3038 // This call may panic. If that happens, it doesn't cause any soundness
3039 // issues, as we have not generated any invalid state which we need to
3040 // fix before returning.
3041 match candidate.try_into_valid() {
3042 Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
3043 Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
3044 }
3045 }
3046 Err(e) => Err(e.map_src(Ptr::as_mut).into()),
3047 }
3048}
3049
3050#[inline(always)]
3051fn swap<T, U>((t, u): (T, U)) -> (U, T) {
3052 (u, t)
3053}
3054
3055/// # Safety
3056///
3057/// All bytes of `candidate` must be initialized.
3058#[inline(always)]
3059unsafe fn try_read_from<S, T: TryFromBytes>(
3060 source: S,
3061 mut candidate: CoreMaybeUninit<T>,
3062) -> Result<T, TryReadError<S, T>> {
3063 // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
3064 // to add a `T: Immutable` bound.
3065 let c_ptr = Ptr::from_mut(&mut candidate);
3066 // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
3067 // `candidate`, which the caller promises is entirely initialized. Since
3068 // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
3069 // no values written to an `Initialized` `c_ptr` can violate its validity.
3070 // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
3071 // via `c_ptr` so long as it is live, so we don't need to worry about the
3072 // fact that `c_ptr` may have more restricted validity than `candidate`.
3073 let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
3074 let mut c_ptr = c_ptr.cast::<_, crate::pointer::cast::CastSized, _>();
3075
3076 // Since we don't have `T: KnownLayout`, we hack around that by using
3077 // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
3078 //
3079 // This call may panic. If that happens, it doesn't cause any soundness
3080 // issues, as we have not generated any invalid state which we need to fix
3081 // before returning.
3082 if !Wrapping::<T>::is_bit_valid(c_ptr.reborrow_shared().forget_aligned()) {
3083 return Err(ValidityError::new(source).into());
3084 }
3085
3086 fn _assert_same_size_and_validity<T>()
3087 where
3088 Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
3089 T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
3090 {
3091 }
3092
3093 _assert_same_size_and_validity::<T>();
3094
3095 // SAFETY: We just validated that `candidate` contains a valid
3096 // `Wrapping<T>`, which has the same size and bit validity as `T`, as
3097 // guaranteed by the preceding type assertion.
3098 Ok(unsafe { candidate.assume_init() })
3099}
3100
3101/// Types for which a sequence of `0` bytes is a valid instance.
3102///
3103/// Any memory region of the appropriate length which is guaranteed to contain
3104/// only zero bytes can be viewed as any `FromZeros` type with no runtime
3105/// overhead. This is useful whenever memory is known to be in a zeroed state,
3106/// such memory returned from some allocation routines.
3107///
3108/// # Warning: Padding bytes
3109///
3110/// Note that, when a value is moved or copied, only the non-padding bytes of
3111/// that value are guaranteed to be preserved. It is unsound to assume that
3112/// values written to padding bytes are preserved after a move or copy. For more
3113/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
3114///
3115/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
3116///
3117/// # Implementation
3118///
3119/// **Do not implement this trait yourself!** Instead, use
3120/// [`#[derive(FromZeros)]`][derive]; e.g.:
3121///
3122/// ```
3123/// # use zerocopy_derive::{FromZeros, Immutable};
3124/// #[derive(FromZeros)]
3125/// struct MyStruct {
3126/// # /*
3127/// ...
3128/// # */
3129/// }
3130///
3131/// #[derive(FromZeros)]
3132/// #[repr(u8)]
3133/// enum MyEnum {
3134/// # Variant0,
3135/// # /*
3136/// ...
3137/// # */
3138/// }
3139///
3140/// #[derive(FromZeros, Immutable)]
3141/// union MyUnion {
3142/// # variant: u8,
3143/// # /*
3144/// ...
3145/// # */
3146/// }
3147/// ```
3148///
3149/// This derive performs a sophisticated, compile-time safety analysis to
3150/// determine whether a type is `FromZeros`.
3151///
3152/// # Safety
3153///
3154/// *This section describes what is required in order for `T: FromZeros`, and
3155/// what unsafe code may assume of such types. If you don't plan on implementing
3156/// `FromZeros` manually, and you don't plan on writing unsafe code that
3157/// operates on `FromZeros` types, then you don't need to read this section.*
3158///
3159/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
3160/// `T` whose bytes are all initialized to zero. If a type is marked as
3161/// `FromZeros` which violates this contract, it may cause undefined behavior.
3162///
3163/// `#[derive(FromZeros)]` only permits [types which satisfy these
3164/// requirements][derive-analysis].
3165///
3166#[cfg_attr(
3167 feature = "derive",
3168 doc = "[derive]: zerocopy_derive::FromZeros",
3169 doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3170)]
3171#[cfg_attr(
3172 not(feature = "derive"),
3173 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3174 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3175)]
3176#[cfg_attr(
3177 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3178 diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3179)]
3180pub unsafe trait FromZeros: TryFromBytes {
3181 // The `Self: Sized` bound makes it so that `FromZeros` is still object
3182 // safe.
3183 #[doc(hidden)]
3184 fn only_derive_is_allowed_to_implement_this_trait()
3185 where
3186 Self: Sized;
3187
3188 /// Overwrites `self` with zeros.
3189 ///
3190 /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3191 /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3192 /// drop the current value and replace it with a new one — it simply
3193 /// modifies the bytes of the existing value.
3194 ///
3195 /// # Examples
3196 ///
3197 /// ```
3198 /// # use zerocopy::FromZeros;
3199 /// # use zerocopy_derive::*;
3200 /// #
3201 /// #[derive(FromZeros)]
3202 /// #[repr(C)]
3203 /// struct PacketHeader {
3204 /// src_port: [u8; 2],
3205 /// dst_port: [u8; 2],
3206 /// length: [u8; 2],
3207 /// checksum: [u8; 2],
3208 /// }
3209 ///
3210 /// let mut header = PacketHeader {
3211 /// src_port: 100u16.to_be_bytes(),
3212 /// dst_port: 200u16.to_be_bytes(),
3213 /// length: 300u16.to_be_bytes(),
3214 /// checksum: 400u16.to_be_bytes(),
3215 /// };
3216 ///
3217 /// header.zero();
3218 ///
3219 /// assert_eq!(header.src_port, [0, 0]);
3220 /// assert_eq!(header.dst_port, [0, 0]);
3221 /// assert_eq!(header.length, [0, 0]);
3222 /// assert_eq!(header.checksum, [0, 0]);
3223 /// ```
3224 #[inline(always)]
3225 fn zero(&mut self) {
3226 let slf: *mut Self = self;
3227 let len = mem::size_of_val(self);
3228 // SAFETY:
3229 // - `self` is guaranteed by the type system to be valid for writes of
3230 // size `size_of_val(self)`.
3231 // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3232 // as required by `u8`.
3233 // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3234 // of `Self.`
3235 //
3236 // FIXME(#429): Add references to docs and quotes.
3237 unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3238 }
3239
3240 /// Creates an instance of `Self` from zeroed bytes.
3241 ///
3242 /// # Examples
3243 ///
3244 /// ```
3245 /// # use zerocopy::FromZeros;
3246 /// # use zerocopy_derive::*;
3247 /// #
3248 /// #[derive(FromZeros)]
3249 /// #[repr(C)]
3250 /// struct PacketHeader {
3251 /// src_port: [u8; 2],
3252 /// dst_port: [u8; 2],
3253 /// length: [u8; 2],
3254 /// checksum: [u8; 2],
3255 /// }
3256 ///
3257 /// let header: PacketHeader = FromZeros::new_zeroed();
3258 ///
3259 /// assert_eq!(header.src_port, [0, 0]);
3260 /// assert_eq!(header.dst_port, [0, 0]);
3261 /// assert_eq!(header.length, [0, 0]);
3262 /// assert_eq!(header.checksum, [0, 0]);
3263 /// ```
3264 #[must_use = "has no side effects"]
3265 #[inline(always)]
3266 fn new_zeroed() -> Self
3267 where
3268 Self: Sized,
3269 {
3270 // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3271 unsafe { mem::zeroed() }
3272 }
3273
3274 /// Creates a `Box<Self>` from zeroed bytes.
3275 ///
3276 /// This function is useful for allocating large values on the heap and
3277 /// zero-initializing them, without ever creating a temporary instance of
3278 /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3279 /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3280 /// storing `[u8; 1048576]` in a temporary variable on the stack.
3281 ///
3282 /// On systems that use a heap implementation that supports allocating from
3283 /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3284 /// have performance benefits.
3285 ///
3286 /// # Errors
3287 ///
3288 /// Returns an error on allocation failure. Allocation failure is guaranteed
3289 /// never to cause a panic or an abort.
3290 #[must_use = "has no side effects (other than allocation)"]
3291 #[cfg(any(feature = "alloc", test))]
3292 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3293 #[inline]
3294 fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3295 where
3296 Self: Sized,
3297 {
3298 // If `T` is a ZST, then return a proper boxed instance of it. There is
3299 // no allocation, but `Box` does require a correct dangling pointer.
3300 let layout = Layout::new::<Self>();
3301 if layout.size() == 0 {
3302 // Construct the `Box` from a dangling pointer to avoid calling
3303 // `Self::new_zeroed`. This ensures that stack space is never
3304 // allocated for `Self` even on lower opt-levels where this branch
3305 // might not get optimized out.
3306
3307 // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3308 // requirements are that the pointer is non-null and sufficiently
3309 // aligned. Per [2], `NonNull::dangling` produces a pointer which
3310 // is sufficiently aligned. Since the produced pointer is a
3311 // `NonNull`, it is non-null.
3312 //
3313 // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout:
3314 //
3315 // For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3316 //
3317 // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3318 //
3319 // Creates a new `NonNull` that is dangling, but well-aligned.
3320 return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3321 }
3322
3323 // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3324 #[allow(clippy::undocumented_unsafe_blocks)]
3325 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3326 if ptr.is_null() {
3327 return Err(AllocError);
3328 }
3329 // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3330 #[allow(clippy::undocumented_unsafe_blocks)]
3331 Ok(unsafe { Box::from_raw(ptr) })
3332 }
3333
3334 /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3335 ///
3336 /// This function is useful for allocating large values of `[Self]` on the
3337 /// heap and zero-initializing them, without ever creating a temporary
3338 /// instance of `[Self; _]` on the stack. For example,
3339 /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3340 /// the heap; it does not require storing the slice on the stack.
3341 ///
3342 /// On systems that use a heap implementation that supports allocating from
3343 /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3344 /// benefits.
3345 ///
3346 /// If `Self` is a zero-sized type, then this function will return a
3347 /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3348 /// actual information, but its `len()` property will report the correct
3349 /// value.
3350 ///
3351 /// # Errors
3352 ///
3353 /// Returns an error on allocation failure. Allocation failure is
3354 /// guaranteed never to cause a panic or an abort.
3355 #[must_use = "has no side effects (other than allocation)"]
3356 #[cfg(feature = "alloc")]
3357 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3358 #[inline]
3359 fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3360 where
3361 Self: KnownLayout<PointerMetadata = usize>,
3362 {
3363 // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3364 // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3365 // (and, consequently, the `Box` derived from it) is a valid instance of
3366 // `Self`, because `Self` is `FromZeros`.
3367 unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3368 }
3369
3370 #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3371 #[doc(hidden)]
3372 #[cfg(feature = "alloc")]
3373 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3374 #[must_use = "has no side effects (other than allocation)"]
3375 #[inline(always)]
3376 fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3377 where
3378 Self: Sized,
3379 {
3380 <[Self]>::new_box_zeroed_with_elems(len)
3381 }
3382
3383 /// Creates a `Vec<Self>` from zeroed bytes.
3384 ///
3385 /// This function is useful for allocating large values of `Vec`s and
3386 /// zero-initializing them, without ever creating a temporary instance of
3387 /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3388 /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3389 /// heap; it does not require storing intermediate values on the stack.
3390 ///
3391 /// On systems that use a heap implementation that supports allocating from
3392 /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3393 ///
3394 /// If `Self` is a zero-sized type, then this function will return a
3395 /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3396 /// actual information, but its `len()` property will report the correct
3397 /// value.
3398 ///
3399 /// # Errors
3400 ///
3401 /// Returns an error on allocation failure. Allocation failure is
3402 /// guaranteed never to cause a panic or an abort.
3403 #[must_use = "has no side effects (other than allocation)"]
3404 #[cfg(feature = "alloc")]
3405 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3406 #[inline(always)]
3407 fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3408 where
3409 Self: Sized,
3410 {
3411 <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3412 }
3413
3414 /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3415 /// the vector. The new items are initialized with zeros.
3416 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3417 #[cfg(feature = "alloc")]
3418 #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3419 #[inline(always)]
3420 fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3421 where
3422 Self: Sized,
3423 {
3424 // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3425 // panic condition is not satisfied.
3426 <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3427 }
3428
3429 /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3430 /// items are initialized with zeros.
3431 ///
3432 /// # Panics
3433 ///
3434 /// Panics if `position > v.len()`.
3435 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3436 #[cfg(feature = "alloc")]
3437 #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3438 #[inline]
3439 fn insert_vec_zeroed(
3440 v: &mut Vec<Self>,
3441 position: usize,
3442 additional: usize,
3443 ) -> Result<(), AllocError>
3444 where
3445 Self: Sized,
3446 {
3447 assert!(position <= v.len());
3448 // We only conditionally compile on versions on which `try_reserve` is
3449 // stable; the Clippy lint is a false positive.
3450 v.try_reserve(additional).map_err(|_| AllocError)?;
3451 // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3452 // * `ptr.add(position)`
3453 // * `position + additional`
3454 // * `v.len() + additional`
3455 //
3456 // `v.len() - position` cannot overflow because we asserted that
3457 // `position <= v.len()`.
3458 #[allow(clippy::multiple_unsafe_ops_per_block)]
3459 unsafe {
3460 // This is a potentially overlapping copy.
3461 let ptr = v.as_mut_ptr();
3462 #[allow(clippy::arithmetic_side_effects)]
3463 ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3464 ptr.add(position).write_bytes(0, additional);
3465 #[allow(clippy::arithmetic_side_effects)]
3466 v.set_len(v.len() + additional);
3467 }
3468
3469 Ok(())
3470 }
3471}
3472
3473/// Analyzes whether a type is [`FromBytes`].
3474///
3475/// This derive analyzes, at compile time, whether the annotated type satisfies
3476/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3477/// supertraits if it is sound to do so. This derive can be applied to structs,
3478/// enums, and unions;
3479/// e.g.:
3480///
3481/// ```
3482/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3483/// #[derive(FromBytes)]
3484/// struct MyStruct {
3485/// # /*
3486/// ...
3487/// # */
3488/// }
3489///
3490/// #[derive(FromBytes)]
3491/// #[repr(u8)]
3492/// enum MyEnum {
3493/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3494/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3495/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3496/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3497/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3498/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3499/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3500/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3501/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3502/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3503/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3504/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3505/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3506/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3507/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3508/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3509/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3510/// # VFF,
3511/// # /*
3512/// ...
3513/// # */
3514/// }
3515///
3516/// #[derive(FromBytes, Immutable)]
3517/// union MyUnion {
3518/// # variant: u8,
3519/// # /*
3520/// ...
3521/// # */
3522/// }
3523/// ```
3524///
3525/// [safety conditions]: trait@FromBytes#safety
3526///
3527/// # Analysis
3528///
3529/// *This section describes, roughly, the analysis performed by this derive to
3530/// determine whether it is sound to implement `FromBytes` for a given type.
3531/// Unless you are modifying the implementation of this derive, or attempting to
3532/// manually implement `FromBytes` for a type yourself, you don't need to read
3533/// this section.*
3534///
3535/// If a type has the following properties, then this derive can implement
3536/// `FromBytes` for that type:
3537///
3538/// - If the type is a struct, all of its fields must be `FromBytes`.
3539/// - If the type is an enum:
3540/// - It must have a defined representation which is one of `u8`, `u16`, `i8`,
3541/// or `i16`.
3542/// - The maximum number of discriminants must be used (so that every possible
3543/// bit pattern is a valid one).
3544/// - Its fields must be `FromBytes`.
3545///
3546/// This analysis is subject to change. Unsafe code may *only* rely on the
3547/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3548/// implementation details of this derive.
3549///
3550/// ## Why isn't an explicit representation required for structs?
3551///
3552/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3553/// that structs are marked with `#[repr(C)]`.
3554///
3555/// Per the [Rust reference](reference),
3556///
3557/// > The representation of a type can change the padding between fields, but
3558/// > does not change the layout of the fields themselves.
3559///
3560/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3561///
3562/// Since the layout of structs only consists of padding bytes and field bytes,
3563/// a struct is soundly `FromBytes` if:
3564/// 1. its padding is soundly `FromBytes`, and
3565/// 2. its fields are soundly `FromBytes`.
3566///
3567/// The answer to the first question is always yes: padding bytes do not have
3568/// any validity constraints. A [discussion] of this question in the Unsafe Code
3569/// Guidelines Working Group concluded that it would be virtually unimaginable
3570/// for future versions of rustc to add validity constraints to padding bytes.
3571///
3572/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3573///
3574/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3575/// its fields are `FromBytes`.
3576#[cfg(any(feature = "derive", test))]
3577#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3578pub use zerocopy_derive::FromBytes;
3579
3580/// Types for which any bit pattern is valid.
3581///
3582/// Any memory region of the appropriate length which contains initialized bytes
3583/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3584/// useful for efficiently parsing bytes as structured data.
3585///
3586/// # Warning: Padding bytes
3587///
3588/// Note that, when a value is moved or copied, only the non-padding bytes of
3589/// that value are guaranteed to be preserved. It is unsound to assume that
3590/// values written to padding bytes are preserved after a move or copy. For
3591/// example, the following is unsound:
3592///
3593/// ```rust,no_run
3594/// use core::mem::{size_of, transmute};
3595/// use zerocopy::FromZeros;
3596/// # use zerocopy_derive::*;
3597///
3598/// // Assume `Foo` is a type with padding bytes.
3599/// #[derive(FromZeros, Default)]
3600/// struct Foo {
3601/// # /*
3602/// ...
3603/// # */
3604/// }
3605///
3606/// let mut foo: Foo = Foo::default();
3607/// FromZeros::zero(&mut foo);
3608/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3609/// // those writes are not guaranteed to be preserved in padding bytes when
3610/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3611/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3612/// ```
3613///
3614/// # Implementation
3615///
3616/// **Do not implement this trait yourself!** Instead, use
3617/// [`#[derive(FromBytes)]`][derive]; e.g.:
3618///
3619/// ```
3620/// # use zerocopy_derive::{FromBytes, Immutable};
3621/// #[derive(FromBytes)]
3622/// struct MyStruct {
3623/// # /*
3624/// ...
3625/// # */
3626/// }
3627///
3628/// #[derive(FromBytes)]
3629/// #[repr(u8)]
3630/// enum MyEnum {
3631/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3632/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3633/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3634/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3635/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3636/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3637/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3638/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3639/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3640/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3641/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3642/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3643/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3644/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3645/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3646/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3647/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3648/// # VFF,
3649/// # /*
3650/// ...
3651/// # */
3652/// }
3653///
3654/// #[derive(FromBytes, Immutable)]
3655/// union MyUnion {
3656/// # variant: u8,
3657/// # /*
3658/// ...
3659/// # */
3660/// }
3661/// ```
3662///
3663/// This derive performs a sophisticated, compile-time safety analysis to
3664/// determine whether a type is `FromBytes`.
3665///
3666/// # Safety
3667///
3668/// *This section describes what is required in order for `T: FromBytes`, and
3669/// what unsafe code may assume of such types. If you don't plan on implementing
3670/// `FromBytes` manually, and you don't plan on writing unsafe code that
3671/// operates on `FromBytes` types, then you don't need to read this section.*
3672///
3673/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3674/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3675/// words, any byte value which is not uninitialized). If a type is marked as
3676/// `FromBytes` which violates this contract, it may cause undefined behavior.
3677///
3678/// `#[derive(FromBytes)]` only permits [types which satisfy these
3679/// requirements][derive-analysis].
3680///
3681#[cfg_attr(
3682 feature = "derive",
3683 doc = "[derive]: zerocopy_derive::FromBytes",
3684 doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3685)]
3686#[cfg_attr(
3687 not(feature = "derive"),
3688 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3689 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3690)]
3691#[cfg_attr(
3692 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3693 diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3694)]
3695pub unsafe trait FromBytes: FromZeros {
3696 // The `Self: Sized` bound makes it so that `FromBytes` is still object
3697 // safe.
3698 #[doc(hidden)]
3699 fn only_derive_is_allowed_to_implement_this_trait()
3700 where
3701 Self: Sized;
3702
3703 /// Interprets the given `source` as a `&Self`.
3704 ///
3705 /// This method attempts to return a reference to `source` interpreted as a
3706 /// `Self`. If the length of `source` is not a [valid size of
3707 /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3708 /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3709 /// [infallibly discard the alignment error][size-error-from].
3710 ///
3711 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3712 ///
3713 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3714 /// [self-unaligned]: Unaligned
3715 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3716 /// [slice-dst]: KnownLayout#dynamically-sized-types
3717 ///
3718 /// # Compile-Time Assertions
3719 ///
3720 /// This method cannot yet be used on unsized types whose dynamically-sized
3721 /// component is zero-sized. Attempting to use this method on such types
3722 /// results in a compile-time assertion error; e.g.:
3723 ///
3724 /// ```compile_fail,E0080
3725 /// use zerocopy::*;
3726 /// # use zerocopy_derive::*;
3727 ///
3728 /// #[derive(FromBytes, Immutable, KnownLayout)]
3729 /// #[repr(C)]
3730 /// struct ZSTy {
3731 /// leading_sized: u16,
3732 /// trailing_dst: [()],
3733 /// }
3734 ///
3735 /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš Compile Error!
3736 /// ```
3737 ///
3738 /// # Examples
3739 ///
3740 /// ```
3741 /// use zerocopy::FromBytes;
3742 /// # use zerocopy_derive::*;
3743 ///
3744 /// #[derive(FromBytes, KnownLayout, Immutable)]
3745 /// #[repr(C)]
3746 /// struct PacketHeader {
3747 /// src_port: [u8; 2],
3748 /// dst_port: [u8; 2],
3749 /// length: [u8; 2],
3750 /// checksum: [u8; 2],
3751 /// }
3752 ///
3753 /// #[derive(FromBytes, KnownLayout, Immutable)]
3754 /// #[repr(C)]
3755 /// struct Packet {
3756 /// header: PacketHeader,
3757 /// body: [u8],
3758 /// }
3759 ///
3760 /// // These bytes encode a `Packet`.
3761 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3762 ///
3763 /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3764 ///
3765 /// assert_eq!(packet.header.src_port, [0, 1]);
3766 /// assert_eq!(packet.header.dst_port, [2, 3]);
3767 /// assert_eq!(packet.header.length, [4, 5]);
3768 /// assert_eq!(packet.header.checksum, [6, 7]);
3769 /// assert_eq!(packet.body, [8, 9, 10, 11]);
3770 /// ```
3771 #[must_use = "has no side effects"]
3772 #[inline]
3773 fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3774 where
3775 Self: KnownLayout + Immutable,
3776 {
3777 static_assert_dst_is_not_zst!(Self);
3778 match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3779 Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3780 Err(err) => Err(err.map_src(|src| src.as_ref())),
3781 }
3782 }
3783
3784 /// Interprets the prefix of the given `source` as a `&Self` without
3785 /// copying.
3786 ///
3787 /// This method computes the [largest possible size of `Self`][valid-size]
3788 /// that can fit in the leading bytes of `source`, then attempts to return
3789 /// both a reference to those bytes interpreted as a `Self`, and a reference
3790 /// to the remaining bytes. If there are insufficient bytes, or if `source`
3791 /// is not appropriately aligned, this returns `Err`. If [`Self:
3792 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3793 /// error][size-error-from].
3794 ///
3795 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3796 ///
3797 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3798 /// [self-unaligned]: Unaligned
3799 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3800 /// [slice-dst]: KnownLayout#dynamically-sized-types
3801 ///
3802 /// # Compile-Time Assertions
3803 ///
3804 /// This method cannot yet be used on unsized types whose dynamically-sized
3805 /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3806 /// support such types. Attempting to use this method on such types results
3807 /// in a compile-time assertion error; e.g.:
3808 ///
3809 /// ```compile_fail,E0080
3810 /// use zerocopy::*;
3811 /// # use zerocopy_derive::*;
3812 ///
3813 /// #[derive(FromBytes, Immutable, KnownLayout)]
3814 /// #[repr(C)]
3815 /// struct ZSTy {
3816 /// leading_sized: u16,
3817 /// trailing_dst: [()],
3818 /// }
3819 ///
3820 /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš Compile Error!
3821 /// ```
3822 ///
3823 /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3824 ///
3825 /// # Examples
3826 ///
3827 /// ```
3828 /// use zerocopy::FromBytes;
3829 /// # use zerocopy_derive::*;
3830 ///
3831 /// #[derive(FromBytes, KnownLayout, Immutable)]
3832 /// #[repr(C)]
3833 /// struct PacketHeader {
3834 /// src_port: [u8; 2],
3835 /// dst_port: [u8; 2],
3836 /// length: [u8; 2],
3837 /// checksum: [u8; 2],
3838 /// }
3839 ///
3840 /// #[derive(FromBytes, KnownLayout, Immutable)]
3841 /// #[repr(C)]
3842 /// struct Packet {
3843 /// header: PacketHeader,
3844 /// body: [[u8; 2]],
3845 /// }
3846 ///
3847 /// // These are more bytes than are needed to encode a `Packet`.
3848 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3849 ///
3850 /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3851 ///
3852 /// assert_eq!(packet.header.src_port, [0, 1]);
3853 /// assert_eq!(packet.header.dst_port, [2, 3]);
3854 /// assert_eq!(packet.header.length, [4, 5]);
3855 /// assert_eq!(packet.header.checksum, [6, 7]);
3856 /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3857 /// assert_eq!(suffix, &[14u8][..]);
3858 /// ```
3859 #[must_use = "has no side effects"]
3860 #[inline]
3861 fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3862 where
3863 Self: KnownLayout + Immutable,
3864 {
3865 static_assert_dst_is_not_zst!(Self);
3866 ref_from_prefix_suffix(source, None, CastType::Prefix)
3867 }
3868
3869 /// Interprets the suffix of the given bytes as a `&Self`.
3870 ///
3871 /// This method computes the [largest possible size of `Self`][valid-size]
3872 /// that can fit in the trailing bytes of `source`, then attempts to return
3873 /// both a reference to those bytes interpreted as a `Self`, and a reference
3874 /// to the preceding bytes. If there are insufficient bytes, or if that
3875 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3876 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3877 /// alignment error][size-error-from].
3878 ///
3879 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3880 ///
3881 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3882 /// [self-unaligned]: Unaligned
3883 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3884 /// [slice-dst]: KnownLayout#dynamically-sized-types
3885 ///
3886 /// # Compile-Time Assertions
3887 ///
3888 /// This method cannot yet be used on unsized types whose dynamically-sized
3889 /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3890 /// support such types. Attempting to use this method on such types results
3891 /// in a compile-time assertion error; e.g.:
3892 ///
3893 /// ```compile_fail,E0080
3894 /// use zerocopy::*;
3895 /// # use zerocopy_derive::*;
3896 ///
3897 /// #[derive(FromBytes, Immutable, KnownLayout)]
3898 /// #[repr(C)]
3899 /// struct ZSTy {
3900 /// leading_sized: u16,
3901 /// trailing_dst: [()],
3902 /// }
3903 ///
3904 /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš Compile Error!
3905 /// ```
3906 ///
3907 /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3908 ///
3909 /// # Examples
3910 ///
3911 /// ```
3912 /// use zerocopy::FromBytes;
3913 /// # use zerocopy_derive::*;
3914 ///
3915 /// #[derive(FromBytes, Immutable, KnownLayout)]
3916 /// #[repr(C)]
3917 /// struct PacketTrailer {
3918 /// frame_check_sequence: [u8; 4],
3919 /// }
3920 ///
3921 /// // These are more bytes than are needed to encode a `PacketTrailer`.
3922 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3923 ///
3924 /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3925 ///
3926 /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3927 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3928 /// ```
3929 #[must_use = "has no side effects"]
3930 #[inline]
3931 fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3932 where
3933 Self: Immutable + KnownLayout,
3934 {
3935 static_assert_dst_is_not_zst!(Self);
3936 ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3937 }
3938
3939 /// Interprets the given `source` as a `&mut Self`.
3940 ///
3941 /// This method attempts to return a reference to `source` interpreted as a
3942 /// `Self`. If the length of `source` is not a [valid size of
3943 /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3944 /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3945 /// [infallibly discard the alignment error][size-error-from].
3946 ///
3947 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3948 ///
3949 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3950 /// [self-unaligned]: Unaligned
3951 /// [size-error-from]: error/struct.SizeError.html#method.from-1
3952 /// [slice-dst]: KnownLayout#dynamically-sized-types
3953 ///
3954 /// # Compile-Time Assertions
3955 ///
3956 /// This method cannot yet be used on unsized types whose dynamically-sized
3957 /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
3958 /// support such types. Attempting to use this method on such types results
3959 /// in a compile-time assertion error; e.g.:
3960 ///
3961 /// ```compile_fail,E0080
3962 /// use zerocopy::*;
3963 /// # use zerocopy_derive::*;
3964 ///
3965 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3966 /// #[repr(C, packed)]
3967 /// struct ZSTy {
3968 /// leading_sized: [u8; 2],
3969 /// trailing_dst: [()],
3970 /// }
3971 ///
3972 /// let mut source = [85, 85];
3973 /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš Compile Error!
3974 /// ```
3975 ///
3976 /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
3977 ///
3978 /// # Examples
3979 ///
3980 /// ```
3981 /// use zerocopy::FromBytes;
3982 /// # use zerocopy_derive::*;
3983 ///
3984 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3985 /// #[repr(C)]
3986 /// struct PacketHeader {
3987 /// src_port: [u8; 2],
3988 /// dst_port: [u8; 2],
3989 /// length: [u8; 2],
3990 /// checksum: [u8; 2],
3991 /// }
3992 ///
3993 /// // These bytes encode a `PacketHeader`.
3994 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
3995 ///
3996 /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
3997 ///
3998 /// assert_eq!(header.src_port, [0, 1]);
3999 /// assert_eq!(header.dst_port, [2, 3]);
4000 /// assert_eq!(header.length, [4, 5]);
4001 /// assert_eq!(header.checksum, [6, 7]);
4002 ///
4003 /// header.checksum = [0, 0];
4004 ///
4005 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
4006 /// ```
4007 #[must_use = "has no side effects"]
4008 #[inline]
4009 fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
4010 where
4011 Self: IntoBytes + KnownLayout,
4012 {
4013 static_assert_dst_is_not_zst!(Self);
4014 match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
4015 Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
4016 Err(err) => Err(err.map_src(|src| src.as_mut())),
4017 }
4018 }
4019
4020 /// Interprets the prefix of the given `source` as a `&mut Self` without
4021 /// copying.
4022 ///
4023 /// This method computes the [largest possible size of `Self`][valid-size]
4024 /// that can fit in the leading bytes of `source`, then attempts to return
4025 /// both a reference to those bytes interpreted as a `Self`, and a reference
4026 /// to the remaining bytes. If there are insufficient bytes, or if `source`
4027 /// is not appropriately aligned, this returns `Err`. If [`Self:
4028 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4029 /// error][size-error-from].
4030 ///
4031 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4032 ///
4033 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4034 /// [self-unaligned]: Unaligned
4035 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4036 /// [slice-dst]: KnownLayout#dynamically-sized-types
4037 ///
4038 /// # Compile-Time Assertions
4039 ///
4040 /// This method cannot yet be used on unsized types whose dynamically-sized
4041 /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
4042 /// support such types. Attempting to use this method on such types results
4043 /// in a compile-time assertion error; e.g.:
4044 ///
4045 /// ```compile_fail,E0080
4046 /// use zerocopy::*;
4047 /// # use zerocopy_derive::*;
4048 ///
4049 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4050 /// #[repr(C, packed)]
4051 /// struct ZSTy {
4052 /// leading_sized: [u8; 2],
4053 /// trailing_dst: [()],
4054 /// }
4055 ///
4056 /// let mut source = [85, 85];
4057 /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš Compile Error!
4058 /// ```
4059 ///
4060 /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
4061 ///
4062 /// # Examples
4063 ///
4064 /// ```
4065 /// use zerocopy::FromBytes;
4066 /// # use zerocopy_derive::*;
4067 ///
4068 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4069 /// #[repr(C)]
4070 /// struct PacketHeader {
4071 /// src_port: [u8; 2],
4072 /// dst_port: [u8; 2],
4073 /// length: [u8; 2],
4074 /// checksum: [u8; 2],
4075 /// }
4076 ///
4077 /// // These are more bytes than are needed to encode a `PacketHeader`.
4078 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4079 ///
4080 /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
4081 ///
4082 /// assert_eq!(header.src_port, [0, 1]);
4083 /// assert_eq!(header.dst_port, [2, 3]);
4084 /// assert_eq!(header.length, [4, 5]);
4085 /// assert_eq!(header.checksum, [6, 7]);
4086 /// assert_eq!(body, &[8, 9][..]);
4087 ///
4088 /// header.checksum = [0, 0];
4089 /// body.fill(1);
4090 ///
4091 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
4092 /// ```
4093 #[must_use = "has no side effects"]
4094 #[inline]
4095 fn mut_from_prefix(
4096 source: &mut [u8],
4097 ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4098 where
4099 Self: IntoBytes + KnownLayout,
4100 {
4101 static_assert_dst_is_not_zst!(Self);
4102 mut_from_prefix_suffix(source, None, CastType::Prefix)
4103 }
4104
4105 /// Interprets the suffix of the given `source` as a `&mut Self` without
4106 /// copying.
4107 ///
4108 /// This method computes the [largest possible size of `Self`][valid-size]
4109 /// that can fit in the trailing bytes of `source`, then attempts to return
4110 /// both a reference to those bytes interpreted as a `Self`, and a reference
4111 /// to the preceding bytes. If there are insufficient bytes, or if that
4112 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4113 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4114 /// alignment error][size-error-from].
4115 ///
4116 /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4117 ///
4118 /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4119 /// [self-unaligned]: Unaligned
4120 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4121 /// [slice-dst]: KnownLayout#dynamically-sized-types
4122 ///
4123 /// # Compile-Time Assertions
4124 ///
4125 /// This method cannot yet be used on unsized types whose dynamically-sized
4126 /// component is zero-sized. Attempting to use this method on such types
4127 /// results in a compile-time assertion error; e.g.:
4128 ///
4129 /// ```compile_fail,E0080
4130 /// use zerocopy::*;
4131 /// # use zerocopy_derive::*;
4132 ///
4133 /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4134 /// #[repr(C, packed)]
4135 /// struct ZSTy {
4136 /// leading_sized: [u8; 2],
4137 /// trailing_dst: [()],
4138 /// }
4139 ///
4140 /// let mut source = [85, 85];
4141 /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš Compile Error!
4142 /// ```
4143 ///
4144 /// # Examples
4145 ///
4146 /// ```
4147 /// use zerocopy::FromBytes;
4148 /// # use zerocopy_derive::*;
4149 ///
4150 /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4151 /// #[repr(C)]
4152 /// struct PacketTrailer {
4153 /// frame_check_sequence: [u8; 4],
4154 /// }
4155 ///
4156 /// // These are more bytes than are needed to encode a `PacketTrailer`.
4157 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4158 ///
4159 /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
4160 ///
4161 /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
4162 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4163 ///
4164 /// prefix.fill(0);
4165 /// trailer.frame_check_sequence.fill(1);
4166 ///
4167 /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
4168 /// ```
4169 #[must_use = "has no side effects"]
4170 #[inline]
4171 fn mut_from_suffix(
4172 source: &mut [u8],
4173 ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4174 where
4175 Self: IntoBytes + KnownLayout,
4176 {
4177 static_assert_dst_is_not_zst!(Self);
4178 mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4179 }
4180
4181 /// Interprets the given `source` as a `&Self` with a DST length equal to
4182 /// `count`.
4183 ///
4184 /// This method attempts to return a reference to `source` interpreted as a
4185 /// `Self` with `count` trailing elements. If the length of `source` is not
4186 /// equal to the size of `Self` with `count` elements, or if `source` is not
4187 /// appropriately aligned, this returns `Err`. If [`Self:
4188 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4189 /// error][size-error-from].
4190 ///
4191 /// [self-unaligned]: Unaligned
4192 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4193 ///
4194 /// # Examples
4195 ///
4196 /// ```
4197 /// use zerocopy::FromBytes;
4198 /// # use zerocopy_derive::*;
4199 ///
4200 /// # #[derive(Debug, PartialEq, Eq)]
4201 /// #[derive(FromBytes, Immutable)]
4202 /// #[repr(C)]
4203 /// struct Pixel {
4204 /// r: u8,
4205 /// g: u8,
4206 /// b: u8,
4207 /// a: u8,
4208 /// }
4209 ///
4210 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4211 ///
4212 /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4213 ///
4214 /// assert_eq!(pixels, &[
4215 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4216 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4217 /// ]);
4218 ///
4219 /// ```
4220 ///
4221 /// Since an explicit `count` is provided, this method supports types with
4222 /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4223 /// which do not take an explicit count do not support such types.
4224 ///
4225 /// ```
4226 /// use zerocopy::*;
4227 /// # use zerocopy_derive::*;
4228 ///
4229 /// #[derive(FromBytes, Immutable, KnownLayout)]
4230 /// #[repr(C)]
4231 /// struct ZSTy {
4232 /// leading_sized: [u8; 2],
4233 /// trailing_dst: [()],
4234 /// }
4235 ///
4236 /// let src = &[85, 85][..];
4237 /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4238 /// assert_eq!(zsty.trailing_dst.len(), 42);
4239 /// ```
4240 ///
4241 /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4242 #[must_use = "has no side effects"]
4243 #[inline]
4244 fn ref_from_bytes_with_elems(
4245 source: &[u8],
4246 count: usize,
4247 ) -> Result<&Self, CastError<&[u8], Self>>
4248 where
4249 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4250 {
4251 let source = Ptr::from_ref(source);
4252 let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4253 match maybe_slf {
4254 Ok(slf) => Ok(slf.recall_validity().as_ref()),
4255 Err(err) => Err(err.map_src(|s| s.as_ref())),
4256 }
4257 }
4258
4259 /// Interprets the prefix of the given `source` as a DST `&Self` with length
4260 /// equal to `count`.
4261 ///
4262 /// This method attempts to return a reference to the prefix of `source`
4263 /// interpreted as a `Self` with `count` trailing elements, and a reference
4264 /// to the remaining bytes. If there are insufficient bytes, or if `source`
4265 /// is not appropriately aligned, this returns `Err`. If [`Self:
4266 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4267 /// error][size-error-from].
4268 ///
4269 /// [self-unaligned]: Unaligned
4270 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4271 ///
4272 /// # Examples
4273 ///
4274 /// ```
4275 /// use zerocopy::FromBytes;
4276 /// # use zerocopy_derive::*;
4277 ///
4278 /// # #[derive(Debug, PartialEq, Eq)]
4279 /// #[derive(FromBytes, Immutable)]
4280 /// #[repr(C)]
4281 /// struct Pixel {
4282 /// r: u8,
4283 /// g: u8,
4284 /// b: u8,
4285 /// a: u8,
4286 /// }
4287 ///
4288 /// // These are more bytes than are needed to encode two `Pixel`s.
4289 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4290 ///
4291 /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4292 ///
4293 /// assert_eq!(pixels, &[
4294 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4295 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4296 /// ]);
4297 ///
4298 /// assert_eq!(suffix, &[8, 9]);
4299 /// ```
4300 ///
4301 /// Since an explicit `count` is provided, this method supports types with
4302 /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4303 /// which do not take an explicit count do not support such types.
4304 ///
4305 /// ```
4306 /// use zerocopy::*;
4307 /// # use zerocopy_derive::*;
4308 ///
4309 /// #[derive(FromBytes, Immutable, KnownLayout)]
4310 /// #[repr(C)]
4311 /// struct ZSTy {
4312 /// leading_sized: [u8; 2],
4313 /// trailing_dst: [()],
4314 /// }
4315 ///
4316 /// let src = &[85, 85][..];
4317 /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4318 /// assert_eq!(zsty.trailing_dst.len(), 42);
4319 /// ```
4320 ///
4321 /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4322 #[must_use = "has no side effects"]
4323 #[inline]
4324 fn ref_from_prefix_with_elems(
4325 source: &[u8],
4326 count: usize,
4327 ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4328 where
4329 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4330 {
4331 ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4332 }
4333
4334 /// Interprets the suffix of the given `source` as a DST `&Self` with length
4335 /// equal to `count`.
4336 ///
4337 /// This method attempts to return a reference to the suffix of `source`
4338 /// interpreted as a `Self` with `count` trailing elements, and a reference
4339 /// to the preceding bytes. If there are insufficient bytes, or if that
4340 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4341 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4342 /// alignment error][size-error-from].
4343 ///
4344 /// [self-unaligned]: Unaligned
4345 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4346 ///
4347 /// # Examples
4348 ///
4349 /// ```
4350 /// use zerocopy::FromBytes;
4351 /// # use zerocopy_derive::*;
4352 ///
4353 /// # #[derive(Debug, PartialEq, Eq)]
4354 /// #[derive(FromBytes, Immutable)]
4355 /// #[repr(C)]
4356 /// struct Pixel {
4357 /// r: u8,
4358 /// g: u8,
4359 /// b: u8,
4360 /// a: u8,
4361 /// }
4362 ///
4363 /// // These are more bytes than are needed to encode two `Pixel`s.
4364 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4365 ///
4366 /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4367 ///
4368 /// assert_eq!(prefix, &[0, 1]);
4369 ///
4370 /// assert_eq!(pixels, &[
4371 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
4372 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
4373 /// ]);
4374 /// ```
4375 ///
4376 /// Since an explicit `count` is provided, this method supports types with
4377 /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4378 /// which do not take an explicit count do not support such types.
4379 ///
4380 /// ```
4381 /// use zerocopy::*;
4382 /// # use zerocopy_derive::*;
4383 ///
4384 /// #[derive(FromBytes, Immutable, KnownLayout)]
4385 /// #[repr(C)]
4386 /// struct ZSTy {
4387 /// leading_sized: [u8; 2],
4388 /// trailing_dst: [()],
4389 /// }
4390 ///
4391 /// let src = &[85, 85][..];
4392 /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4393 /// assert_eq!(zsty.trailing_dst.len(), 42);
4394 /// ```
4395 ///
4396 /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4397 #[must_use = "has no side effects"]
4398 #[inline]
4399 fn ref_from_suffix_with_elems(
4400 source: &[u8],
4401 count: usize,
4402 ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4403 where
4404 Self: KnownLayout<PointerMetadata = usize> + Immutable,
4405 {
4406 ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4407 }
4408
4409 /// Interprets the given `source` as a `&mut Self` with a DST length equal
4410 /// to `count`.
4411 ///
4412 /// This method attempts to return a reference to `source` interpreted as a
4413 /// `Self` with `count` trailing elements. If the length of `source` is not
4414 /// equal to the size of `Self` with `count` elements, or if `source` is not
4415 /// appropriately aligned, this returns `Err`. If [`Self:
4416 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4417 /// error][size-error-from].
4418 ///
4419 /// [self-unaligned]: Unaligned
4420 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4421 ///
4422 /// # Examples
4423 ///
4424 /// ```
4425 /// use zerocopy::FromBytes;
4426 /// # use zerocopy_derive::*;
4427 ///
4428 /// # #[derive(Debug, PartialEq, Eq)]
4429 /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4430 /// #[repr(C)]
4431 /// struct Pixel {
4432 /// r: u8,
4433 /// g: u8,
4434 /// b: u8,
4435 /// a: u8,
4436 /// }
4437 ///
4438 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4439 ///
4440 /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4441 ///
4442 /// assert_eq!(pixels, &[
4443 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4444 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4445 /// ]);
4446 ///
4447 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4448 ///
4449 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4450 /// ```
4451 ///
4452 /// Since an explicit `count` is provided, this method supports types with
4453 /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`]
4454 /// which do not take an explicit count do not support such types.
4455 ///
4456 /// ```
4457 /// use zerocopy::*;
4458 /// # use zerocopy_derive::*;
4459 ///
4460 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4461 /// #[repr(C, packed)]
4462 /// struct ZSTy {
4463 /// leading_sized: [u8; 2],
4464 /// trailing_dst: [()],
4465 /// }
4466 ///
4467 /// let src = &mut [85, 85][..];
4468 /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4469 /// assert_eq!(zsty.trailing_dst.len(), 42);
4470 /// ```
4471 ///
4472 /// [`mut_from_bytes`]: FromBytes::mut_from_bytes
4473 #[must_use = "has no side effects"]
4474 #[inline]
4475 fn mut_from_bytes_with_elems(
4476 source: &mut [u8],
4477 count: usize,
4478 ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4479 where
4480 Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4481 {
4482 let source = Ptr::from_mut(source);
4483 let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4484 match maybe_slf {
4485 Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()),
4486 Err(err) => Err(err.map_src(|s| s.as_mut())),
4487 }
4488 }
4489
4490 /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4491 /// length equal to `count`.
4492 ///
4493 /// This method attempts to return a reference to the prefix of `source`
4494 /// interpreted as a `Self` with `count` trailing elements, and a reference
4495 /// to the preceding bytes. If there are insufficient bytes, or if `source`
4496 /// is not appropriately aligned, this returns `Err`. If [`Self:
4497 /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4498 /// error][size-error-from].
4499 ///
4500 /// [self-unaligned]: Unaligned
4501 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4502 ///
4503 /// # Examples
4504 ///
4505 /// ```
4506 /// use zerocopy::FromBytes;
4507 /// # use zerocopy_derive::*;
4508 ///
4509 /// # #[derive(Debug, PartialEq, Eq)]
4510 /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4511 /// #[repr(C)]
4512 /// struct Pixel {
4513 /// r: u8,
4514 /// g: u8,
4515 /// b: u8,
4516 /// a: u8,
4517 /// }
4518 ///
4519 /// // These are more bytes than are needed to encode two `Pixel`s.
4520 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4521 ///
4522 /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4523 ///
4524 /// assert_eq!(pixels, &[
4525 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
4526 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
4527 /// ]);
4528 ///
4529 /// assert_eq!(suffix, &[8, 9]);
4530 ///
4531 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4532 /// suffix.fill(1);
4533 ///
4534 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4535 /// ```
4536 ///
4537 /// Since an explicit `count` is provided, this method supports types with
4538 /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4539 /// which do not take an explicit count do not support such types.
4540 ///
4541 /// ```
4542 /// use zerocopy::*;
4543 /// # use zerocopy_derive::*;
4544 ///
4545 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4546 /// #[repr(C, packed)]
4547 /// struct ZSTy {
4548 /// leading_sized: [u8; 2],
4549 /// trailing_dst: [()],
4550 /// }
4551 ///
4552 /// let src = &mut [85, 85][..];
4553 /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4554 /// assert_eq!(zsty.trailing_dst.len(), 42);
4555 /// ```
4556 ///
4557 /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4558 #[must_use = "has no side effects"]
4559 #[inline]
4560 fn mut_from_prefix_with_elems(
4561 source: &mut [u8],
4562 count: usize,
4563 ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4564 where
4565 Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4566 {
4567 mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4568 }
4569
4570 /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4571 /// length equal to `count`.
4572 ///
4573 /// This method attempts to return a reference to the suffix of `source`
4574 /// interpreted as a `Self` with `count` trailing elements, and a reference
4575 /// to the remaining bytes. If there are insufficient bytes, or if that
4576 /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4577 /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4578 /// alignment error][size-error-from].
4579 ///
4580 /// [self-unaligned]: Unaligned
4581 /// [size-error-from]: error/struct.SizeError.html#method.from-1
4582 ///
4583 /// # Examples
4584 ///
4585 /// ```
4586 /// use zerocopy::FromBytes;
4587 /// # use zerocopy_derive::*;
4588 ///
4589 /// # #[derive(Debug, PartialEq, Eq)]
4590 /// #[derive(FromBytes, IntoBytes, Immutable)]
4591 /// #[repr(C)]
4592 /// struct Pixel {
4593 /// r: u8,
4594 /// g: u8,
4595 /// b: u8,
4596 /// a: u8,
4597 /// }
4598 ///
4599 /// // These are more bytes than are needed to encode two `Pixel`s.
4600 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4601 ///
4602 /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4603 ///
4604 /// assert_eq!(prefix, &[0, 1]);
4605 ///
4606 /// assert_eq!(pixels, &[
4607 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
4608 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
4609 /// ]);
4610 ///
4611 /// prefix.fill(9);
4612 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4613 ///
4614 /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4615 /// ```
4616 ///
4617 /// Since an explicit `count` is provided, this method supports types with
4618 /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4619 /// which do not take an explicit count do not support such types.
4620 ///
4621 /// ```
4622 /// use zerocopy::*;
4623 /// # use zerocopy_derive::*;
4624 ///
4625 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4626 /// #[repr(C, packed)]
4627 /// struct ZSTy {
4628 /// leading_sized: [u8; 2],
4629 /// trailing_dst: [()],
4630 /// }
4631 ///
4632 /// let src = &mut [85, 85][..];
4633 /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4634 /// assert_eq!(zsty.trailing_dst.len(), 42);
4635 /// ```
4636 ///
4637 /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4638 #[must_use = "has no side effects"]
4639 #[inline]
4640 fn mut_from_suffix_with_elems(
4641 source: &mut [u8],
4642 count: usize,
4643 ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4644 where
4645 Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4646 {
4647 mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4648 }
4649
4650 /// Reads a copy of `Self` from the given `source`.
4651 ///
4652 /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4653 ///
4654 /// # Examples
4655 ///
4656 /// ```
4657 /// use zerocopy::FromBytes;
4658 /// # use zerocopy_derive::*;
4659 ///
4660 /// #[derive(FromBytes)]
4661 /// #[repr(C)]
4662 /// struct PacketHeader {
4663 /// src_port: [u8; 2],
4664 /// dst_port: [u8; 2],
4665 /// length: [u8; 2],
4666 /// checksum: [u8; 2],
4667 /// }
4668 ///
4669 /// // These bytes encode a `PacketHeader`.
4670 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4671 ///
4672 /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4673 ///
4674 /// assert_eq!(header.src_port, [0, 1]);
4675 /// assert_eq!(header.dst_port, [2, 3]);
4676 /// assert_eq!(header.length, [4, 5]);
4677 /// assert_eq!(header.checksum, [6, 7]);
4678 /// ```
4679 #[must_use = "has no side effects"]
4680 #[inline]
4681 fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4682 where
4683 Self: Sized,
4684 {
4685 match Ref::<_, Unalign<Self>>::sized_from(source) {
4686 Ok(r) => Ok(Ref::read(&r).into_inner()),
4687 Err(CastError::Size(e)) => Err(e.with_dst()),
4688 Err(CastError::Alignment(_)) => {
4689 // SAFETY: `Unalign<Self>` is trivially aligned, so
4690 // `Ref::sized_from` cannot fail due to unmet alignment
4691 // requirements.
4692 unsafe { core::hint::unreachable_unchecked() }
4693 }
4694 Err(CastError::Validity(i)) => match i {},
4695 }
4696 }
4697
4698 /// Reads a copy of `Self` from the prefix of the given `source`.
4699 ///
4700 /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4701 /// of `source`, returning that `Self` and any remaining bytes. If
4702 /// `source.len() < size_of::<Self>()`, it returns `Err`.
4703 ///
4704 /// # Examples
4705 ///
4706 /// ```
4707 /// use zerocopy::FromBytes;
4708 /// # use zerocopy_derive::*;
4709 ///
4710 /// #[derive(FromBytes)]
4711 /// #[repr(C)]
4712 /// struct PacketHeader {
4713 /// src_port: [u8; 2],
4714 /// dst_port: [u8; 2],
4715 /// length: [u8; 2],
4716 /// checksum: [u8; 2],
4717 /// }
4718 ///
4719 /// // These are more bytes than are needed to encode a `PacketHeader`.
4720 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4721 ///
4722 /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4723 ///
4724 /// assert_eq!(header.src_port, [0, 1]);
4725 /// assert_eq!(header.dst_port, [2, 3]);
4726 /// assert_eq!(header.length, [4, 5]);
4727 /// assert_eq!(header.checksum, [6, 7]);
4728 /// assert_eq!(body, [8, 9]);
4729 /// ```
4730 #[must_use = "has no side effects"]
4731 #[inline]
4732 fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4733 where
4734 Self: Sized,
4735 {
4736 match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4737 Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4738 Err(CastError::Size(e)) => Err(e.with_dst()),
4739 Err(CastError::Alignment(_)) => {
4740 // SAFETY: `Unalign<Self>` is trivially aligned, so
4741 // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4742 // requirements.
4743 unsafe { core::hint::unreachable_unchecked() }
4744 }
4745 Err(CastError::Validity(i)) => match i {},
4746 }
4747 }
4748
4749 /// Reads a copy of `Self` from the suffix of the given `source`.
4750 ///
4751 /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4752 /// of `source`, returning that `Self` and any preceding bytes. If
4753 /// `source.len() < size_of::<Self>()`, it returns `Err`.
4754 ///
4755 /// # Examples
4756 ///
4757 /// ```
4758 /// use zerocopy::FromBytes;
4759 /// # use zerocopy_derive::*;
4760 ///
4761 /// #[derive(FromBytes)]
4762 /// #[repr(C)]
4763 /// struct PacketTrailer {
4764 /// frame_check_sequence: [u8; 4],
4765 /// }
4766 ///
4767 /// // These are more bytes than are needed to encode a `PacketTrailer`.
4768 /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4769 ///
4770 /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4771 ///
4772 /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4773 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4774 /// ```
4775 #[must_use = "has no side effects"]
4776 #[inline]
4777 fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4778 where
4779 Self: Sized,
4780 {
4781 match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4782 Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4783 Err(CastError::Size(e)) => Err(e.with_dst()),
4784 Err(CastError::Alignment(_)) => {
4785 // SAFETY: `Unalign<Self>` is trivially aligned, so
4786 // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4787 // requirements.
4788 unsafe { core::hint::unreachable_unchecked() }
4789 }
4790 Err(CastError::Validity(i)) => match i {},
4791 }
4792 }
4793
4794 /// Reads a copy of `self` from an `io::Read`.
4795 ///
4796 /// This is useful for interfacing with operating system byte sinks (files,
4797 /// sockets, etc.).
4798 ///
4799 /// # Examples
4800 ///
4801 /// ```no_run
4802 /// use zerocopy::{byteorder::big_endian::*, FromBytes};
4803 /// use std::fs::File;
4804 /// # use zerocopy_derive::*;
4805 ///
4806 /// #[derive(FromBytes)]
4807 /// #[repr(C)]
4808 /// struct BitmapFileHeader {
4809 /// signature: [u8; 2],
4810 /// size: U32,
4811 /// reserved: U64,
4812 /// offset: U64,
4813 /// }
4814 ///
4815 /// let mut file = File::open("image.bin").unwrap();
4816 /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
4817 /// ```
4818 #[cfg(feature = "std")]
4819 #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
4820 #[inline(always)]
4821 fn read_from_io<R>(mut src: R) -> io::Result<Self>
4822 where
4823 Self: Sized,
4824 R: io::Read,
4825 {
4826 // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
4827 // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
4828 // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
4829 // will not necessarily preserve zeros written to those padding byte
4830 // locations, and so `buf` could contain uninitialized bytes.
4831 let mut buf = CoreMaybeUninit::<Self>::uninit();
4832 buf.zero();
4833
4834 let ptr = Ptr::from_mut(&mut buf);
4835 // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
4836 // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
4837 // cannot be used to write values which will violate `buf`'s bit
4838 // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
4839 // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
4840 // cannot be violated even though `buf` may have more permissive bit
4841 // validity than `ptr`.
4842 let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
4843 let ptr = ptr.as_bytes();
4844 src.read_exact(ptr.as_mut())?;
4845 // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
4846 // `FromBytes`.
4847 Ok(unsafe { buf.assume_init() })
4848 }
4849
4850 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4851 #[doc(hidden)]
4852 #[must_use = "has no side effects"]
4853 #[inline(always)]
4854 fn ref_from(source: &[u8]) -> Option<&Self>
4855 where
4856 Self: KnownLayout + Immutable,
4857 {
4858 Self::ref_from_bytes(source).ok()
4859 }
4860
4861 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4862 #[doc(hidden)]
4863 #[must_use = "has no side effects"]
4864 #[inline(always)]
4865 fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4866 where
4867 Self: KnownLayout + IntoBytes,
4868 {
4869 Self::mut_from_bytes(source).ok()
4870 }
4871
4872 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4873 #[doc(hidden)]
4874 #[must_use = "has no side effects"]
4875 #[inline(always)]
4876 fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4877 where
4878 Self: Sized + Immutable,
4879 {
4880 <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4881 }
4882
4883 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4884 #[doc(hidden)]
4885 #[must_use = "has no side effects"]
4886 #[inline(always)]
4887 fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4888 where
4889 Self: Sized + Immutable,
4890 {
4891 <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4892 }
4893
4894 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4895 #[doc(hidden)]
4896 #[must_use = "has no side effects"]
4897 #[inline(always)]
4898 fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4899 where
4900 Self: Sized + IntoBytes,
4901 {
4902 <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4903 }
4904
4905 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4906 #[doc(hidden)]
4907 #[must_use = "has no side effects"]
4908 #[inline(always)]
4909 fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4910 where
4911 Self: Sized + IntoBytes,
4912 {
4913 <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4914 }
4915
4916 #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4917 #[doc(hidden)]
4918 #[must_use = "has no side effects"]
4919 #[inline(always)]
4920 fn read_from(source: &[u8]) -> Option<Self>
4921 where
4922 Self: Sized,
4923 {
4924 Self::read_from_bytes(source).ok()
4925 }
4926}
4927
4928/// Interprets the given affix of the given bytes as a `&Self`.
4929///
4930/// This method computes the largest possible size of `Self` that can fit in the
4931/// prefix or suffix bytes of `source`, then attempts to return both a reference
4932/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4933/// If there are insufficient bytes, or if that affix of `source` is not
4934/// appropriately aligned, this returns `Err`.
4935#[inline(always)]
4936fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4937 source: &[u8],
4938 meta: Option<T::PointerMetadata>,
4939 cast_type: CastType,
4940) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4941 let (slf, prefix_suffix) = Ptr::from_ref(source)
4942 .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4943 .map_err(|err| err.map_src(|s| s.as_ref()))?;
4944 Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
4945}
4946
4947/// Interprets the given affix of the given bytes as a `&mut Self` without
4948/// copying.
4949///
4950/// This method computes the largest possible size of `Self` that can fit in the
4951/// prefix or suffix bytes of `source`, then attempts to return both a reference
4952/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4953/// If there are insufficient bytes, or if that affix of `source` is not
4954/// appropriately aligned, this returns `Err`.
4955#[inline(always)]
4956fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
4957 source: &mut [u8],
4958 meta: Option<T::PointerMetadata>,
4959 cast_type: CastType,
4960) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
4961 let (slf, prefix_suffix) = Ptr::from_mut(source)
4962 .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
4963 .map_err(|err| err.map_src(|s| s.as_mut()))?;
4964 Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
4965}
4966
4967/// Analyzes whether a type is [`IntoBytes`].
4968///
4969/// This derive analyzes, at compile time, whether the annotated type satisfies
4970/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
4971/// sound to do so. This derive can be applied to structs and enums (see below
4972/// for union support); e.g.:
4973///
4974/// ```
4975/// # use zerocopy_derive::{IntoBytes};
4976/// #[derive(IntoBytes)]
4977/// #[repr(C)]
4978/// struct MyStruct {
4979/// # /*
4980/// ...
4981/// # */
4982/// }
4983///
4984/// #[derive(IntoBytes)]
4985/// #[repr(u8)]
4986/// enum MyEnum {
4987/// # Variant,
4988/// # /*
4989/// ...
4990/// # */
4991/// }
4992/// ```
4993///
4994/// [safety conditions]: trait@IntoBytes#safety
4995///
4996/// # Error Messages
4997///
4998/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
4999/// for `IntoBytes` is implemented, you may get an error like this:
5000///
5001/// ```text
5002/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
5003/// --> lib.rs:23:10
5004/// |
5005/// 1 | #[derive(IntoBytes)]
5006/// | ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
5007/// |
5008/// = help: the following implementations were found:
5009/// <() as PaddingFree<T, false>>
5010/// ```
5011///
5012/// This error indicates that the type being annotated has padding bytes, which
5013/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
5014/// fields by using types in the [`byteorder`] module, wrapping field types in
5015/// [`Unalign`], adding explicit struct fields where those padding bytes would
5016/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
5017/// layout] for more information about type layout and padding.
5018///
5019/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
5020///
5021/// # Unions
5022///
5023/// Currently, union bit validity is [up in the air][union-validity], and so
5024/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
5025/// However, implementing `IntoBytes` on a union type is likely sound on all
5026/// existing Rust toolchains - it's just that it may become unsound in the
5027/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
5028/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
5029///
5030/// ```shell
5031/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
5032/// ```
5033///
5034/// However, it is your responsibility to ensure that this derive is sound on
5035/// the specific versions of the Rust toolchain you are using! We make no
5036/// stability or soundness guarantees regarding this cfg, and may remove it at
5037/// any point.
5038///
5039/// We are actively working with Rust to stabilize the necessary language
5040/// guarantees to support this in a forwards-compatible way, which will enable
5041/// us to remove the cfg gate. As part of this effort, we need to know how much
5042/// demand there is for this feature. If you would like to use `IntoBytes` on
5043/// unions, [please let us know][discussion].
5044///
5045/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
5046/// [discussion]: https://github.com/google/zerocopy/discussions/1802
5047///
5048/// # Analysis
5049///
5050/// *This section describes, roughly, the analysis performed by this derive to
5051/// determine whether it is sound to implement `IntoBytes` for a given type.
5052/// Unless you are modifying the implementation of this derive, or attempting to
5053/// manually implement `IntoBytes` for a type yourself, you don't need to read
5054/// this section.*
5055///
5056/// If a type has the following properties, then this derive can implement
5057/// `IntoBytes` for that type:
5058///
5059/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
5060/// - if the type is `repr(transparent)` or `repr(packed)`, it is
5061/// [`IntoBytes`] if its fields are [`IntoBytes`]; else,
5062/// - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
5063/// if its field is [`IntoBytes`]; else,
5064/// - if the type has no generic parameters, it is [`IntoBytes`] if the type
5065/// is sized and has no padding bytes; else,
5066/// - if the type is `repr(C)`, its fields must be [`Unaligned`].
5067/// - If the type is an enum:
5068/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
5069/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
5070/// - It must have no padding bytes.
5071/// - Its fields must be [`IntoBytes`].
5072///
5073/// This analysis is subject to change. Unsafe code may *only* rely on the
5074/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
5075/// implementation details of this derive.
5076///
5077/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
5078#[cfg(any(feature = "derive", test))]
5079#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5080pub use zerocopy_derive::IntoBytes;
5081
5082/// Types that can be converted to an immutable slice of initialized bytes.
5083///
5084/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
5085/// same size. This is useful for efficiently serializing structured data as raw
5086/// bytes.
5087///
5088/// # Implementation
5089///
5090/// **Do not implement this trait yourself!** Instead, use
5091/// [`#[derive(IntoBytes)]`][derive]; e.g.:
5092///
5093/// ```
5094/// # use zerocopy_derive::IntoBytes;
5095/// #[derive(IntoBytes)]
5096/// #[repr(C)]
5097/// struct MyStruct {
5098/// # /*
5099/// ...
5100/// # */
5101/// }
5102///
5103/// #[derive(IntoBytes)]
5104/// #[repr(u8)]
5105/// enum MyEnum {
5106/// # Variant0,
5107/// # /*
5108/// ...
5109/// # */
5110/// }
5111/// ```
5112///
5113/// This derive performs a sophisticated, compile-time safety analysis to
5114/// determine whether a type is `IntoBytes`. See the [derive
5115/// documentation][derive] for guidance on how to interpret error messages
5116/// produced by the derive's analysis.
5117///
5118/// # Safety
5119///
5120/// *This section describes what is required in order for `T: IntoBytes`, and
5121/// what unsafe code may assume of such types. If you don't plan on implementing
5122/// `IntoBytes` manually, and you don't plan on writing unsafe code that
5123/// operates on `IntoBytes` types, then you don't need to read this section.*
5124///
5125/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
5126/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
5127/// marked as `IntoBytes` which violates this contract, it may cause undefined
5128/// behavior.
5129///
5130/// `#[derive(IntoBytes)]` only permits [types which satisfy these
5131/// requirements][derive-analysis].
5132///
5133#[cfg_attr(
5134 feature = "derive",
5135 doc = "[derive]: zerocopy_derive::IntoBytes",
5136 doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
5137)]
5138#[cfg_attr(
5139 not(feature = "derive"),
5140 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
5141 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
5142)]
5143#[cfg_attr(
5144 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5145 diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
5146)]
5147pub unsafe trait IntoBytes {
5148 // The `Self: Sized` bound makes it so that this function doesn't prevent
5149 // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
5150 // prevent object safety, but those provide a benefit in exchange for object
5151 // safety. If at some point we remove those methods, change their type
5152 // signatures, or move them out of this trait so that `IntoBytes` is object
5153 // safe again, it's important that this function not prevent object safety.
5154 #[doc(hidden)]
5155 fn only_derive_is_allowed_to_implement_this_trait()
5156 where
5157 Self: Sized;
5158
5159 /// Gets the bytes of this value.
5160 ///
5161 /// # Examples
5162 ///
5163 /// ```
5164 /// use zerocopy::IntoBytes;
5165 /// # use zerocopy_derive::*;
5166 ///
5167 /// #[derive(IntoBytes, Immutable)]
5168 /// #[repr(C)]
5169 /// struct PacketHeader {
5170 /// src_port: [u8; 2],
5171 /// dst_port: [u8; 2],
5172 /// length: [u8; 2],
5173 /// checksum: [u8; 2],
5174 /// }
5175 ///
5176 /// let header = PacketHeader {
5177 /// src_port: [0, 1],
5178 /// dst_port: [2, 3],
5179 /// length: [4, 5],
5180 /// checksum: [6, 7],
5181 /// };
5182 ///
5183 /// let bytes = header.as_bytes();
5184 ///
5185 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5186 /// ```
5187 #[must_use = "has no side effects"]
5188 #[inline(always)]
5189 fn as_bytes(&self) -> &[u8]
5190 where
5191 Self: Immutable,
5192 {
5193 // Note that this method does not have a `Self: Sized` bound;
5194 // `size_of_val` works for unsized values too.
5195 let len = mem::size_of_val(self);
5196 let slf: *const Self = self;
5197
5198 // SAFETY:
5199 // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5200 // many bytes because...
5201 // - `slf` is the same pointer as `self`, and `self` is a reference
5202 // which points to an object whose size is `len`. Thus...
5203 // - The entire region of `len` bytes starting at `slf` is contained
5204 // within a single allocation.
5205 // - `slf` is non-null.
5206 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5207 // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5208 // initialized.
5209 // - Since `slf` is derived from `self`, and `self` is an immutable
5210 // reference, the only other references to this memory region that
5211 // could exist are other immutable references, which by `Self:
5212 // Immutable` don't permit mutation.
5213 // - The total size of the resulting slice is no larger than
5214 // `isize::MAX` because no allocation produced by safe code can be
5215 // larger than `isize::MAX`.
5216 //
5217 // FIXME(#429): Add references to docs and quotes.
5218 unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5219 }
5220
5221 /// Gets the bytes of this value mutably.
5222 ///
5223 /// # Examples
5224 ///
5225 /// ```
5226 /// use zerocopy::IntoBytes;
5227 /// # use zerocopy_derive::*;
5228 ///
5229 /// # #[derive(Eq, PartialEq, Debug)]
5230 /// #[derive(FromBytes, IntoBytes, Immutable)]
5231 /// #[repr(C)]
5232 /// struct PacketHeader {
5233 /// src_port: [u8; 2],
5234 /// dst_port: [u8; 2],
5235 /// length: [u8; 2],
5236 /// checksum: [u8; 2],
5237 /// }
5238 ///
5239 /// let mut header = PacketHeader {
5240 /// src_port: [0, 1],
5241 /// dst_port: [2, 3],
5242 /// length: [4, 5],
5243 /// checksum: [6, 7],
5244 /// };
5245 ///
5246 /// let bytes = header.as_mut_bytes();
5247 ///
5248 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5249 ///
5250 /// bytes.reverse();
5251 ///
5252 /// assert_eq!(header, PacketHeader {
5253 /// src_port: [7, 6],
5254 /// dst_port: [5, 4],
5255 /// length: [3, 2],
5256 /// checksum: [1, 0],
5257 /// });
5258 /// ```
5259 #[must_use = "has no side effects"]
5260 #[inline(always)]
5261 fn as_mut_bytes(&mut self) -> &mut [u8]
5262 where
5263 Self: FromBytes,
5264 {
5265 // Note that this method does not have a `Self: Sized` bound;
5266 // `size_of_val` works for unsized values too.
5267 let len = mem::size_of_val(self);
5268 let slf: *mut Self = self;
5269
5270 // SAFETY:
5271 // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5272 // size_of::<u8>()` many bytes because...
5273 // - `slf` is the same pointer as `self`, and `self` is a reference
5274 // which points to an object whose size is `len`. Thus...
5275 // - The entire region of `len` bytes starting at `slf` is contained
5276 // within a single allocation.
5277 // - `slf` is non-null.
5278 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5279 // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5280 // initialized.
5281 // - `Self: FromBytes` ensures that no write to this memory region
5282 // could result in it containing an invalid `Self`.
5283 // - Since `slf` is derived from `self`, and `self` is a mutable
5284 // reference, no other references to this memory region can exist.
5285 // - The total size of the resulting slice is no larger than
5286 // `isize::MAX` because no allocation produced by safe code can be
5287 // larger than `isize::MAX`.
5288 //
5289 // FIXME(#429): Add references to docs and quotes.
5290 unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5291 }
5292
5293 /// Writes a copy of `self` to `dst`.
5294 ///
5295 /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5296 ///
5297 /// # Examples
5298 ///
5299 /// ```
5300 /// use zerocopy::IntoBytes;
5301 /// # use zerocopy_derive::*;
5302 ///
5303 /// #[derive(IntoBytes, Immutable)]
5304 /// #[repr(C)]
5305 /// struct PacketHeader {
5306 /// src_port: [u8; 2],
5307 /// dst_port: [u8; 2],
5308 /// length: [u8; 2],
5309 /// checksum: [u8; 2],
5310 /// }
5311 ///
5312 /// let header = PacketHeader {
5313 /// src_port: [0, 1],
5314 /// dst_port: [2, 3],
5315 /// length: [4, 5],
5316 /// checksum: [6, 7],
5317 /// };
5318 ///
5319 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5320 ///
5321 /// header.write_to(&mut bytes[..]);
5322 ///
5323 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5324 /// ```
5325 ///
5326 /// If too many or too few target bytes are provided, `write_to` returns
5327 /// `Err` and leaves the target bytes unmodified:
5328 ///
5329 /// ```
5330 /// # use zerocopy::IntoBytes;
5331 /// # let header = u128::MAX;
5332 /// let mut excessive_bytes = &mut [0u8; 128][..];
5333 ///
5334 /// let write_result = header.write_to(excessive_bytes);
5335 ///
5336 /// assert!(write_result.is_err());
5337 /// assert_eq!(excessive_bytes, [0u8; 128]);
5338 /// ```
5339 #[must_use = "callers should check the return value to see if the operation succeeded"]
5340 #[inline]
5341 #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5342 fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5343 where
5344 Self: Immutable,
5345 {
5346 let src = self.as_bytes();
5347 if dst.len() == src.len() {
5348 // SAFETY: Within this branch of the conditional, we have ensured
5349 // that `dst.len()` is equal to `src.len()`. Neither the size of the
5350 // source nor the size of the destination change between the above
5351 // size check and the invocation of `copy_unchecked`.
5352 unsafe { util::copy_unchecked(src, dst) }
5353 Ok(())
5354 } else {
5355 Err(SizeError::new(self))
5356 }
5357 }
5358
5359 /// Writes a copy of `self` to the prefix of `dst`.
5360 ///
5361 /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5362 /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5363 ///
5364 /// # Examples
5365 ///
5366 /// ```
5367 /// use zerocopy::IntoBytes;
5368 /// # use zerocopy_derive::*;
5369 ///
5370 /// #[derive(IntoBytes, Immutable)]
5371 /// #[repr(C)]
5372 /// struct PacketHeader {
5373 /// src_port: [u8; 2],
5374 /// dst_port: [u8; 2],
5375 /// length: [u8; 2],
5376 /// checksum: [u8; 2],
5377 /// }
5378 ///
5379 /// let header = PacketHeader {
5380 /// src_port: [0, 1],
5381 /// dst_port: [2, 3],
5382 /// length: [4, 5],
5383 /// checksum: [6, 7],
5384 /// };
5385 ///
5386 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5387 ///
5388 /// header.write_to_prefix(&mut bytes[..]);
5389 ///
5390 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5391 /// ```
5392 ///
5393 /// If insufficient target bytes are provided, `write_to_prefix` returns
5394 /// `Err` and leaves the target bytes unmodified:
5395 ///
5396 /// ```
5397 /// # use zerocopy::IntoBytes;
5398 /// # let header = u128::MAX;
5399 /// let mut insufficient_bytes = &mut [0, 0][..];
5400 ///
5401 /// let write_result = header.write_to_suffix(insufficient_bytes);
5402 ///
5403 /// assert!(write_result.is_err());
5404 /// assert_eq!(insufficient_bytes, [0, 0]);
5405 /// ```
5406 #[must_use = "callers should check the return value to see if the operation succeeded"]
5407 #[inline]
5408 #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5409 fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5410 where
5411 Self: Immutable,
5412 {
5413 let src = self.as_bytes();
5414 match dst.get_mut(..src.len()) {
5415 Some(dst) => {
5416 // SAFETY: Within this branch of the `match`, we have ensured
5417 // through fallible subslicing that `dst.len()` is equal to
5418 // `src.len()`. Neither the size of the source nor the size of
5419 // the destination change between the above subslicing operation
5420 // and the invocation of `copy_unchecked`.
5421 unsafe { util::copy_unchecked(src, dst) }
5422 Ok(())
5423 }
5424 None => Err(SizeError::new(self)),
5425 }
5426 }
5427
5428 /// Writes a copy of `self` to the suffix of `dst`.
5429 ///
5430 /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5431 /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5432 ///
5433 /// # Examples
5434 ///
5435 /// ```
5436 /// use zerocopy::IntoBytes;
5437 /// # use zerocopy_derive::*;
5438 ///
5439 /// #[derive(IntoBytes, Immutable)]
5440 /// #[repr(C)]
5441 /// struct PacketHeader {
5442 /// src_port: [u8; 2],
5443 /// dst_port: [u8; 2],
5444 /// length: [u8; 2],
5445 /// checksum: [u8; 2],
5446 /// }
5447 ///
5448 /// let header = PacketHeader {
5449 /// src_port: [0, 1],
5450 /// dst_port: [2, 3],
5451 /// length: [4, 5],
5452 /// checksum: [6, 7],
5453 /// };
5454 ///
5455 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5456 ///
5457 /// header.write_to_suffix(&mut bytes[..]);
5458 ///
5459 /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5460 ///
5461 /// let mut insufficient_bytes = &mut [0, 0][..];
5462 ///
5463 /// let write_result = header.write_to_suffix(insufficient_bytes);
5464 ///
5465 /// assert!(write_result.is_err());
5466 /// assert_eq!(insufficient_bytes, [0, 0]);
5467 /// ```
5468 ///
5469 /// If insufficient target bytes are provided, `write_to_suffix` returns
5470 /// `Err` and leaves the target bytes unmodified:
5471 ///
5472 /// ```
5473 /// # use zerocopy::IntoBytes;
5474 /// # let header = u128::MAX;
5475 /// let mut insufficient_bytes = &mut [0, 0][..];
5476 ///
5477 /// let write_result = header.write_to_suffix(insufficient_bytes);
5478 ///
5479 /// assert!(write_result.is_err());
5480 /// assert_eq!(insufficient_bytes, [0, 0]);
5481 /// ```
5482 #[must_use = "callers should check the return value to see if the operation succeeded"]
5483 #[inline]
5484 #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5485 fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5486 where
5487 Self: Immutable,
5488 {
5489 let src = self.as_bytes();
5490 let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5491 start
5492 } else {
5493 return Err(SizeError::new(self));
5494 };
5495 let dst = if let Some(dst) = dst.get_mut(start..) {
5496 dst
5497 } else {
5498 // get_mut() should never return None here. We return a `SizeError`
5499 // rather than .unwrap() because in the event the branch is not
5500 // optimized away, returning a value is generally lighter-weight
5501 // than panicking.
5502 return Err(SizeError::new(self));
5503 };
5504 // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5505 // `dst.len()` is equal to `src.len()`. Neither the size of the source
5506 // nor the size of the destination change between the above subslicing
5507 // operation and the invocation of `copy_unchecked`.
5508 unsafe {
5509 util::copy_unchecked(src, dst);
5510 }
5511 Ok(())
5512 }
5513
5514 /// Writes a copy of `self` to an `io::Write`.
5515 ///
5516 /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5517 /// for interfacing with operating system byte sinks (files, sockets, etc.).
5518 ///
5519 /// # Examples
5520 ///
5521 /// ```no_run
5522 /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5523 /// use std::fs::File;
5524 /// # use zerocopy_derive::*;
5525 ///
5526 /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5527 /// #[repr(C, packed)]
5528 /// struct GrayscaleImage {
5529 /// height: U16,
5530 /// width: U16,
5531 /// pixels: [U16],
5532 /// }
5533 ///
5534 /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5535 /// let mut file = File::create("image.bin").unwrap();
5536 /// image.write_to_io(&mut file).unwrap();
5537 /// ```
5538 ///
5539 /// If the write fails, `write_to_io` returns `Err` and a partial write may
5540 /// have occurred; e.g.:
5541 ///
5542 /// ```
5543 /// # use zerocopy::IntoBytes;
5544 ///
5545 /// let src = u128::MAX;
5546 /// let mut dst = [0u8; 2];
5547 ///
5548 /// let write_result = src.write_to_io(&mut dst[..]);
5549 ///
5550 /// assert!(write_result.is_err());
5551 /// assert_eq!(dst, [255, 255]);
5552 /// ```
5553 #[cfg(feature = "std")]
5554 #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5555 #[inline(always)]
5556 fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5557 where
5558 Self: Immutable,
5559 W: io::Write,
5560 {
5561 dst.write_all(self.as_bytes())
5562 }
5563
5564 #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5565 #[doc(hidden)]
5566 #[inline]
5567 fn as_bytes_mut(&mut self) -> &mut [u8]
5568 where
5569 Self: FromBytes,
5570 {
5571 self.as_mut_bytes()
5572 }
5573}
5574
5575/// Analyzes whether a type is [`Unaligned`].
5576///
5577/// This derive analyzes, at compile time, whether the annotated type satisfies
5578/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5579/// sound to do so. This derive can be applied to structs, enums, and unions;
5580/// e.g.:
5581///
5582/// ```
5583/// # use zerocopy_derive::Unaligned;
5584/// #[derive(Unaligned)]
5585/// #[repr(C)]
5586/// struct MyStruct {
5587/// # /*
5588/// ...
5589/// # */
5590/// }
5591///
5592/// #[derive(Unaligned)]
5593/// #[repr(u8)]
5594/// enum MyEnum {
5595/// # Variant0,
5596/// # /*
5597/// ...
5598/// # */
5599/// }
5600///
5601/// #[derive(Unaligned)]
5602/// #[repr(packed)]
5603/// union MyUnion {
5604/// # variant: u8,
5605/// # /*
5606/// ...
5607/// # */
5608/// }
5609/// ```
5610///
5611/// # Analysis
5612///
5613/// *This section describes, roughly, the analysis performed by this derive to
5614/// determine whether it is sound to implement `Unaligned` for a given type.
5615/// Unless you are modifying the implementation of this derive, or attempting to
5616/// manually implement `Unaligned` for a type yourself, you don't need to read
5617/// this section.*
5618///
5619/// If a type has the following properties, then this derive can implement
5620/// `Unaligned` for that type:
5621///
5622/// - If the type is a struct or union:
5623/// - If `repr(align(N))` is provided, `N` must equal 1.
5624/// - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5625/// [`Unaligned`].
5626/// - If the type is not `repr(C)` or `repr(transparent)`, it must be
5627/// `repr(packed)` or `repr(packed(1))`.
5628/// - If the type is an enum:
5629/// - If `repr(align(N))` is provided, `N` must equal 1.
5630/// - It must be a field-less enum (meaning that all variants have no fields).
5631/// - It must be `repr(i8)` or `repr(u8)`.
5632///
5633/// [safety conditions]: trait@Unaligned#safety
5634#[cfg(any(feature = "derive", test))]
5635#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5636pub use zerocopy_derive::Unaligned;
5637
5638/// Types with no alignment requirement.
5639///
5640/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5641///
5642/// # Implementation
5643///
5644/// **Do not implement this trait yourself!** Instead, use
5645/// [`#[derive(Unaligned)]`][derive]; e.g.:
5646///
5647/// ```
5648/// # use zerocopy_derive::Unaligned;
5649/// #[derive(Unaligned)]
5650/// #[repr(C)]
5651/// struct MyStruct {
5652/// # /*
5653/// ...
5654/// # */
5655/// }
5656///
5657/// #[derive(Unaligned)]
5658/// #[repr(u8)]
5659/// enum MyEnum {
5660/// # Variant0,
5661/// # /*
5662/// ...
5663/// # */
5664/// }
5665///
5666/// #[derive(Unaligned)]
5667/// #[repr(packed)]
5668/// union MyUnion {
5669/// # variant: u8,
5670/// # /*
5671/// ...
5672/// # */
5673/// }
5674/// ```
5675///
5676/// This derive performs a sophisticated, compile-time safety analysis to
5677/// determine whether a type is `Unaligned`.
5678///
5679/// # Safety
5680///
5681/// *This section describes what is required in order for `T: Unaligned`, and
5682/// what unsafe code may assume of such types. If you don't plan on implementing
5683/// `Unaligned` manually, and you don't plan on writing unsafe code that
5684/// operates on `Unaligned` types, then you don't need to read this section.*
5685///
5686/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5687/// reference to `T` at any memory location regardless of alignment. If a type
5688/// is marked as `Unaligned` which violates this contract, it may cause
5689/// undefined behavior.
5690///
5691/// `#[derive(Unaligned)]` only permits [types which satisfy these
5692/// requirements][derive-analysis].
5693///
5694#[cfg_attr(
5695 feature = "derive",
5696 doc = "[derive]: zerocopy_derive::Unaligned",
5697 doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5698)]
5699#[cfg_attr(
5700 not(feature = "derive"),
5701 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5702 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5703)]
5704#[cfg_attr(
5705 not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5706 diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5707)]
5708pub unsafe trait Unaligned {
5709 // The `Self: Sized` bound makes it so that `Unaligned` is still object
5710 // safe.
5711 #[doc(hidden)]
5712 fn only_derive_is_allowed_to_implement_this_trait()
5713 where
5714 Self: Sized;
5715}
5716
5717/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
5718///
5719/// This derive can be applied to structs and enums implementing both
5720/// [`Immutable`] and [`IntoBytes`]; e.g.:
5721///
5722/// ```
5723/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5724/// #[derive(ByteEq, Immutable, IntoBytes)]
5725/// #[repr(C)]
5726/// struct MyStruct {
5727/// # /*
5728/// ...
5729/// # */
5730/// }
5731///
5732/// #[derive(ByteEq, Immutable, IntoBytes)]
5733/// #[repr(u8)]
5734/// enum MyEnum {
5735/// # Variant,
5736/// # /*
5737/// ...
5738/// # */
5739/// }
5740/// ```
5741///
5742/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
5743/// equality by individually comparing each field. Instead, the implementation
5744/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
5745/// `self` and `other` to byte slices and compares those slices for equality.
5746/// This may have performance advantages.
5747#[cfg(any(feature = "derive", test))]
5748#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5749pub use zerocopy_derive::ByteEq;
5750/// Derives an optimized [`Hash`] implementation.
5751///
5752/// This derive can be applied to structs and enums implementing both
5753/// [`Immutable`] and [`IntoBytes`]; e.g.:
5754///
5755/// ```
5756/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
5757/// #[derive(ByteHash, Immutable, IntoBytes)]
5758/// #[repr(C)]
5759/// struct MyStruct {
5760/// # /*
5761/// ...
5762/// # */
5763/// }
5764///
5765/// #[derive(ByteHash, Immutable, IntoBytes)]
5766/// #[repr(u8)]
5767/// enum MyEnum {
5768/// # Variant,
5769/// # /*
5770/// ...
5771/// # */
5772/// }
5773/// ```
5774///
5775/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
5776/// individually hashing each field and combining the results. Instead, the
5777/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
5778/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
5779/// it in a single call to [`Hasher::write()`]. This may have performance
5780/// advantages.
5781///
5782/// [`Hash`]: core::hash::Hash
5783/// [`Hash::hash()`]: core::hash::Hash::hash()
5784/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
5785#[cfg(any(feature = "derive", test))]
5786#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5787pub use zerocopy_derive::ByteHash;
5788/// Implements [`SplitAt`].
5789///
5790/// This derive can be applied to structs; e.g.:
5791///
5792/// ```
5793/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5794/// #[derive(ByteEq, Immutable, IntoBytes)]
5795/// #[repr(C)]
5796/// struct MyStruct {
5797/// # /*
5798/// ...
5799/// # */
5800/// }
5801/// ```
5802#[cfg(any(feature = "derive", test))]
5803#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5804pub use zerocopy_derive::SplitAt;
5805
5806#[cfg(feature = "alloc")]
5807#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5808#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5809mod alloc_support {
5810 use super::*;
5811
5812 /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5813 /// vector. The new items are initialized with zeros.
5814 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5815 #[doc(hidden)]
5816 #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5817 #[inline(always)]
5818 pub fn extend_vec_zeroed<T: FromZeros>(
5819 v: &mut Vec<T>,
5820 additional: usize,
5821 ) -> Result<(), AllocError> {
5822 <T as FromZeros>::extend_vec_zeroed(v, additional)
5823 }
5824
5825 /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5826 /// items are initialized with zeros.
5827 ///
5828 /// # Panics
5829 ///
5830 /// Panics if `position > v.len()`.
5831 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5832 #[doc(hidden)]
5833 #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5834 #[inline(always)]
5835 pub fn insert_vec_zeroed<T: FromZeros>(
5836 v: &mut Vec<T>,
5837 position: usize,
5838 additional: usize,
5839 ) -> Result<(), AllocError> {
5840 <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5841 }
5842}
5843
5844#[cfg(feature = "alloc")]
5845#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5846#[doc(hidden)]
5847pub use alloc_support::*;
5848
5849#[cfg(test)]
5850#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5851mod tests {
5852 use static_assertions::assert_impl_all;
5853
5854 use super::*;
5855 use crate::util::testutil::*;
5856
5857 // An unsized type.
5858 //
5859 // This is used to test the custom derives of our traits. The `[u8]` type
5860 // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5861 #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5862 #[repr(transparent)]
5863 struct Unsized([u8]);
5864
5865 impl Unsized {
5866 fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5867 // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5868 // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5869 // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5870 // guaranteed by the language spec, we can just change this since
5871 // it's in test code.
5872 //
5873 // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5874 unsafe { mem::transmute(slc) }
5875 }
5876 }
5877
5878 #[test]
5879 fn test_known_layout() {
5880 // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5881 // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5882 // of `$ty`.
5883 macro_rules! test {
5884 ($ty:ty, $expect:expr) => {
5885 let expect = $expect;
5886 assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5887 assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5888 assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5889 };
5890 }
5891
5892 let layout =
5893 |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout {
5894 align: NonZeroUsize::new(align).unwrap(),
5895 size_info: match trailing_slice_elem_size {
5896 None => SizeInfo::Sized { size: offset },
5897 Some(elem_size) => {
5898 SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size })
5899 }
5900 },
5901 statically_shallow_unpadded,
5902 };
5903
5904 test!((), layout(0, 1, None, false));
5905 test!(u8, layout(1, 1, None, false));
5906 // Use `align_of` because `u64` alignment may be smaller than 8 on some
5907 // platforms.
5908 test!(u64, layout(8, mem::align_of::<u64>(), None, false));
5909 test!(AU64, layout(8, 8, None, false));
5910
5911 test!(Option<&'static ()>, usize::LAYOUT);
5912
5913 test!([()], layout(0, 1, Some(0), true));
5914 test!([u8], layout(0, 1, Some(1), true));
5915 test!(str, layout(0, 1, Some(1), true));
5916 }
5917
5918 #[cfg(feature = "derive")]
5919 #[test]
5920 fn test_known_layout_derive() {
5921 // In this and other files (`late_compile_pass.rs`,
5922 // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5923 // modes of `derive(KnownLayout)` for the following combination of
5924 // properties:
5925 //
5926 // +------------+--------------------------------------+-----------+
5927 // | | trailing field properties | |
5928 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5929 // |------------+----------+----------------+----------+-----------|
5930 // | N | N | N | N | KL00 |
5931 // | N | N | N | Y | KL01 |
5932 // | N | N | Y | N | KL02 |
5933 // | N | N | Y | Y | KL03 |
5934 // | N | Y | N | N | KL04 |
5935 // | N | Y | N | Y | KL05 |
5936 // | N | Y | Y | N | KL06 |
5937 // | N | Y | Y | Y | KL07 |
5938 // | Y | N | N | N | KL08 |
5939 // | Y | N | N | Y | KL09 |
5940 // | Y | N | Y | N | KL10 |
5941 // | Y | N | Y | Y | KL11 |
5942 // | Y | Y | N | N | KL12 |
5943 // | Y | Y | N | Y | KL13 |
5944 // | Y | Y | Y | N | KL14 |
5945 // | Y | Y | Y | Y | KL15 |
5946 // +------------+----------+----------------+----------+-----------+
5947
5948 struct NotKnownLayout<T = ()> {
5949 _t: T,
5950 }
5951
5952 #[derive(KnownLayout)]
5953 #[repr(C)]
5954 struct AlignSize<const ALIGN: usize, const SIZE: usize>
5955 where
5956 elain::Align<ALIGN>: elain::Alignment,
5957 {
5958 _align: elain::Align<ALIGN>,
5959 size: [u8; SIZE],
5960 }
5961
5962 type AU16 = AlignSize<2, 2>;
5963 type AU32 = AlignSize<4, 4>;
5964
5965 fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
5966
5967 let sized_layout = |align, size| DstLayout {
5968 align: NonZeroUsize::new(align).unwrap(),
5969 size_info: SizeInfo::Sized { size },
5970 statically_shallow_unpadded: false,
5971 };
5972
5973 let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout {
5974 align: NonZeroUsize::new(align).unwrap(),
5975 size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5976 statically_shallow_unpadded,
5977 };
5978
5979 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5980 // | N | N | N | Y | KL01 |
5981 #[allow(dead_code)]
5982 #[derive(KnownLayout)]
5983 struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5984
5985 let expected = DstLayout::for_type::<KL01>();
5986
5987 assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
5988 assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
5989
5990 // ...with `align(N)`:
5991 #[allow(dead_code)]
5992 #[derive(KnownLayout)]
5993 #[repr(align(64))]
5994 struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5995
5996 let expected = DstLayout::for_type::<KL01Align>();
5997
5998 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
5999 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6000
6001 // ...with `packed`:
6002 #[allow(dead_code)]
6003 #[derive(KnownLayout)]
6004 #[repr(packed)]
6005 struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6006
6007 let expected = DstLayout::for_type::<KL01Packed>();
6008
6009 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6010 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6011
6012 // ...with `packed(N)`:
6013 #[allow(dead_code)]
6014 #[derive(KnownLayout)]
6015 #[repr(packed(2))]
6016 struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6017
6018 assert_impl_all!(KL01PackedN: KnownLayout);
6019
6020 let expected = DstLayout::for_type::<KL01PackedN>();
6021
6022 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6023 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6024
6025 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6026 // | N | N | Y | Y | KL03 |
6027 #[allow(dead_code)]
6028 #[derive(KnownLayout)]
6029 struct KL03(NotKnownLayout, u8);
6030
6031 let expected = DstLayout::for_type::<KL03>();
6032
6033 assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6034 assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6035
6036 // ... with `align(N)`
6037 #[allow(dead_code)]
6038 #[derive(KnownLayout)]
6039 #[repr(align(64))]
6040 struct KL03Align(NotKnownLayout<AU32>, u8);
6041
6042 let expected = DstLayout::for_type::<KL03Align>();
6043
6044 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6045 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6046
6047 // ... with `packed`:
6048 #[allow(dead_code)]
6049 #[derive(KnownLayout)]
6050 #[repr(packed)]
6051 struct KL03Packed(NotKnownLayout<AU32>, u8);
6052
6053 let expected = DstLayout::for_type::<KL03Packed>();
6054
6055 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6056 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6057
6058 // ... with `packed(N)`
6059 #[allow(dead_code)]
6060 #[derive(KnownLayout)]
6061 #[repr(packed(2))]
6062 struct KL03PackedN(NotKnownLayout<AU32>, u8);
6063
6064 assert_impl_all!(KL03PackedN: KnownLayout);
6065
6066 let expected = DstLayout::for_type::<KL03PackedN>();
6067
6068 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6069 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6070
6071 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6072 // | N | Y | N | Y | KL05 |
6073 #[allow(dead_code)]
6074 #[derive(KnownLayout)]
6075 struct KL05<T>(u8, T);
6076
6077 fn _test_kl05<T>(t: T) -> impl KnownLayout {
6078 KL05(0u8, t)
6079 }
6080
6081 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6082 // | N | Y | Y | Y | KL07 |
6083 #[allow(dead_code)]
6084 #[derive(KnownLayout)]
6085 struct KL07<T: KnownLayout>(u8, T);
6086
6087 fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6088 let _ = KL07(0u8, t);
6089 }
6090
6091 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6092 // | Y | N | Y | N | KL10 |
6093 #[allow(dead_code)]
6094 #[derive(KnownLayout)]
6095 #[repr(C)]
6096 struct KL10(NotKnownLayout<AU32>, [u8]);
6097
6098 let expected = DstLayout::new_zst(None)
6099 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6100 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6101 .pad_to_align();
6102
6103 assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6104 assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4, false));
6105
6106 // ...with `align(N)`:
6107 #[allow(dead_code)]
6108 #[derive(KnownLayout)]
6109 #[repr(C, align(64))]
6110 struct KL10Align(NotKnownLayout<AU32>, [u8]);
6111
6112 let repr_align = NonZeroUsize::new(64);
6113
6114 let expected = DstLayout::new_zst(repr_align)
6115 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6116 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6117 .pad_to_align();
6118
6119 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6120 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4, false));
6121
6122 // ...with `packed`:
6123 #[allow(dead_code)]
6124 #[derive(KnownLayout)]
6125 #[repr(C, packed)]
6126 struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6127
6128 let repr_packed = NonZeroUsize::new(1);
6129
6130 let expected = DstLayout::new_zst(None)
6131 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6132 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6133 .pad_to_align();
6134
6135 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6136 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4, false));
6137
6138 // ...with `packed(N)`:
6139 #[allow(dead_code)]
6140 #[derive(KnownLayout)]
6141 #[repr(C, packed(2))]
6142 struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6143
6144 let repr_packed = NonZeroUsize::new(2);
6145
6146 let expected = DstLayout::new_zst(None)
6147 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6148 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6149 .pad_to_align();
6150
6151 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6152 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6153
6154 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6155 // | Y | N | Y | Y | KL11 |
6156 #[allow(dead_code)]
6157 #[derive(KnownLayout)]
6158 #[repr(C)]
6159 struct KL11(NotKnownLayout<AU64>, u8);
6160
6161 let expected = DstLayout::new_zst(None)
6162 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6163 .extend(<u8 as KnownLayout>::LAYOUT, None)
6164 .pad_to_align();
6165
6166 assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6167 assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6168
6169 // ...with `align(N)`:
6170 #[allow(dead_code)]
6171 #[derive(KnownLayout)]
6172 #[repr(C, align(64))]
6173 struct KL11Align(NotKnownLayout<AU64>, u8);
6174
6175 let repr_align = NonZeroUsize::new(64);
6176
6177 let expected = DstLayout::new_zst(repr_align)
6178 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6179 .extend(<u8 as KnownLayout>::LAYOUT, None)
6180 .pad_to_align();
6181
6182 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6183 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6184
6185 // ...with `packed`:
6186 #[allow(dead_code)]
6187 #[derive(KnownLayout)]
6188 #[repr(C, packed)]
6189 struct KL11Packed(NotKnownLayout<AU64>, u8);
6190
6191 let repr_packed = NonZeroUsize::new(1);
6192
6193 let expected = DstLayout::new_zst(None)
6194 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6195 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6196 .pad_to_align();
6197
6198 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6199 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6200
6201 // ...with `packed(N)`:
6202 #[allow(dead_code)]
6203 #[derive(KnownLayout)]
6204 #[repr(C, packed(2))]
6205 struct KL11PackedN(NotKnownLayout<AU64>, u8);
6206
6207 let repr_packed = NonZeroUsize::new(2);
6208
6209 let expected = DstLayout::new_zst(None)
6210 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6211 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6212 .pad_to_align();
6213
6214 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6215 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6216
6217 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6218 // | Y | Y | Y | N | KL14 |
6219 #[allow(dead_code)]
6220 #[derive(KnownLayout)]
6221 #[repr(C)]
6222 struct KL14<T: ?Sized + KnownLayout>(u8, T);
6223
6224 fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6225 _assert_kl(kl)
6226 }
6227
6228 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6229 // | Y | Y | Y | Y | KL15 |
6230 #[allow(dead_code)]
6231 #[derive(KnownLayout)]
6232 #[repr(C)]
6233 struct KL15<T: KnownLayout>(u8, T);
6234
6235 fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6236 let _ = KL15(0u8, t);
6237 }
6238
6239 // Test a variety of combinations of field types:
6240 // - ()
6241 // - u8
6242 // - AU16
6243 // - [()]
6244 // - [u8]
6245 // - [AU16]
6246
6247 #[allow(clippy::upper_case_acronyms, dead_code)]
6248 #[derive(KnownLayout)]
6249 #[repr(C)]
6250 struct KLTU<T, U: ?Sized>(T, U);
6251
6252 assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6253
6254 assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6255
6256 assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6257
6258 assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false));
6259
6260 assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false));
6261
6262 assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false));
6263
6264 assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6265
6266 assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6267
6268 assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6269
6270 assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false));
6271
6272 assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6273
6274 assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6275
6276 assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6277
6278 assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6279
6280 assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6281
6282 assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false));
6283
6284 assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false));
6285
6286 assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6287
6288 // Test a variety of field counts.
6289
6290 #[derive(KnownLayout)]
6291 #[repr(C)]
6292 struct KLF0;
6293
6294 assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6295
6296 #[derive(KnownLayout)]
6297 #[repr(C)]
6298 struct KLF1([u8]);
6299
6300 assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, true));
6301
6302 #[derive(KnownLayout)]
6303 #[repr(C)]
6304 struct KLF2(NotKnownLayout<u8>, [u8]);
6305
6306 assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6307
6308 #[derive(KnownLayout)]
6309 #[repr(C)]
6310 struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6311
6312 assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6313
6314 #[derive(KnownLayout)]
6315 #[repr(C)]
6316 struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6317
6318 assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8, false));
6319 }
6320
6321 #[test]
6322 fn test_object_safety() {
6323 fn _takes_immutable(_: &dyn Immutable) {}
6324 fn _takes_unaligned(_: &dyn Unaligned) {}
6325 }
6326
6327 #[test]
6328 fn test_from_zeros_only() {
6329 // Test types that implement `FromZeros` but not `FromBytes`.
6330
6331 assert!(!bool::new_zeroed());
6332 assert_eq!(char::new_zeroed(), '\0');
6333
6334 #[cfg(feature = "alloc")]
6335 {
6336 assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6337 assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6338
6339 assert_eq!(
6340 <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6341 [false, false, false]
6342 );
6343 assert_eq!(
6344 <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6345 ['\0', '\0', '\0']
6346 );
6347
6348 assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6349 assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6350 }
6351
6352 let mut string = "hello".to_string();
6353 let s: &mut str = string.as_mut();
6354 assert_eq!(s, "hello");
6355 s.zero();
6356 assert_eq!(s, "\0\0\0\0\0");
6357 }
6358
6359 #[test]
6360 fn test_zst_count_preserved() {
6361 // Test that, when an explicit count is provided to for a type with a
6362 // ZST trailing slice element, that count is preserved. This is
6363 // important since, for such types, all element counts result in objects
6364 // of the same size, and so the correct behavior is ambiguous. However,
6365 // preserving the count as requested by the user is the behavior that we
6366 // document publicly.
6367
6368 // FromZeros methods
6369 #[cfg(feature = "alloc")]
6370 assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6371 #[cfg(feature = "alloc")]
6372 assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6373
6374 // FromBytes methods
6375 assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6376 assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6377 assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6378 assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6379 assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6380 assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6381 }
6382
6383 #[test]
6384 fn test_read_write() {
6385 const VAL: u64 = 0x12345678;
6386 #[cfg(target_endian = "big")]
6387 const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6388 #[cfg(target_endian = "little")]
6389 const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6390 const ZEROS: [u8; 8] = [0u8; 8];
6391
6392 // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6393
6394 assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6395 // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6396 // zeros.
6397 let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6398 assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6399 assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6400 // The first 8 bytes are all zeros and the second 8 bytes are from
6401 // `VAL_BYTES`
6402 let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6403 assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6404 assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6405
6406 // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6407
6408 let mut bytes = [0u8; 8];
6409 assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6410 assert_eq!(bytes, VAL_BYTES);
6411 let mut bytes = [0u8; 16];
6412 assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6413 let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6414 assert_eq!(bytes, want);
6415 let mut bytes = [0u8; 16];
6416 assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6417 let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6418 assert_eq!(bytes, want);
6419 }
6420
6421 #[test]
6422 #[cfg(feature = "std")]
6423 fn test_read_io_with_padding_soundness() {
6424 // This test is designed to exhibit potential UB in
6425 // `FromBytes::read_from_io`. (see #2319, #2320).
6426
6427 // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6428 // will have inter-field padding between `x` and `y`.
6429 #[derive(FromBytes)]
6430 #[repr(C)]
6431 struct WithPadding {
6432 x: u8,
6433 y: u16,
6434 }
6435 struct ReadsInRead;
6436 impl std::io::Read for ReadsInRead {
6437 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6438 // This body branches on every byte of `buf`, ensuring that it
6439 // exhibits UB if any byte of `buf` is uninitialized.
6440 if buf.iter().all(|&x| x == 0) {
6441 Ok(buf.len())
6442 } else {
6443 buf.iter_mut().for_each(|x| *x = 0);
6444 Ok(buf.len())
6445 }
6446 }
6447 }
6448 assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6449 }
6450
6451 #[test]
6452 #[cfg(feature = "std")]
6453 fn test_read_write_io() {
6454 let mut long_buffer = [0, 0, 0, 0];
6455 assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6456 assert_eq!(long_buffer, [255, 255, 0, 0]);
6457 assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6458
6459 let mut short_buffer = [0, 0];
6460 assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6461 assert_eq!(short_buffer, [255, 255]);
6462 assert!(u32::read_from_io(&short_buffer[..]).is_err());
6463 }
6464
6465 #[test]
6466 fn test_try_from_bytes_try_read_from() {
6467 assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6468 assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6469
6470 assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6471 assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6472
6473 assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6474 assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6475
6476 // If we don't pass enough bytes, it fails.
6477 assert!(matches!(
6478 <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6479 Err(TryReadError::Size(_))
6480 ));
6481 assert!(matches!(
6482 <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6483 Err(TryReadError::Size(_))
6484 ));
6485 assert!(matches!(
6486 <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6487 Err(TryReadError::Size(_))
6488 ));
6489
6490 // If we pass too many bytes, it fails.
6491 assert!(matches!(
6492 <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6493 Err(TryReadError::Size(_))
6494 ));
6495
6496 // If we pass an invalid value, it fails.
6497 assert!(matches!(
6498 <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6499 Err(TryReadError::Validity(_))
6500 ));
6501 assert!(matches!(
6502 <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6503 Err(TryReadError::Validity(_))
6504 ));
6505 assert!(matches!(
6506 <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6507 Err(TryReadError::Validity(_))
6508 ));
6509
6510 // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6511 // alignment is 8, and since we read from two adjacent addresses one
6512 // byte apart, it is guaranteed that at least one of them (though
6513 // possibly both) will be misaligned.
6514 let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6515 assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6516 assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6517
6518 assert_eq!(
6519 <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6520 Ok((AU64(0), &[][..]))
6521 );
6522 assert_eq!(
6523 <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6524 Ok((AU64(0), &[][..]))
6525 );
6526
6527 assert_eq!(
6528 <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6529 Ok((&[][..], AU64(0)))
6530 );
6531 assert_eq!(
6532 <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6533 Ok((&[][..], AU64(0)))
6534 );
6535 }
6536
6537 #[test]
6538 fn test_ref_from_mut_from_bytes() {
6539 // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6540 // success cases. Exhaustive coverage for these methods is covered by
6541 // the `Ref` tests above, which these helper methods defer to.
6542
6543 let mut buf =
6544 Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6545
6546 assert_eq!(
6547 AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6548 [8, 9, 10, 11, 12, 13, 14, 15]
6549 );
6550 let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6551 suffix.0 = 0x0101010101010101;
6552 // The `[u8:9]` is a non-half size of the full buffer, which would catch
6553 // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6554 assert_eq!(
6555 <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6556 (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6557 );
6558 let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6559 assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6560 suffix.0 = 0x0202020202020202;
6561 let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6562 assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6563 suffix[0] = 42;
6564 assert_eq!(
6565 <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6566 (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6567 );
6568 <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6569 assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6570 }
6571
6572 #[test]
6573 fn test_ref_from_mut_from_bytes_error() {
6574 // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6575 // error cases.
6576
6577 // Fail because the buffer is too large.
6578 let mut buf = Align::<[u8; 16], AU64>::default();
6579 // `buf.t` should be aligned to 8, so only the length check should fail.
6580 assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6581 assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6582 assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6583 assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6584
6585 // Fail because the buffer is too small.
6586 let mut buf = Align::<[u8; 4], AU64>::default();
6587 assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6588 assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6589 assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6590 assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6591 assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6592 assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6593 assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6594 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6595 assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6596 assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6597 assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6598 assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6599
6600 // Fail because the alignment is insufficient.
6601 let mut buf = Align::<[u8; 13], AU64>::default();
6602 assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6603 assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6604 assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6605 assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6606 assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6607 assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6608 assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6609 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6610 }
6611
6612 #[test]
6613 fn test_to_methods() {
6614 /// Run a series of tests by calling `IntoBytes` methods on `t`.
6615 ///
6616 /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6617 /// before `t` has been modified. `post_mutation` is the expected
6618 /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6619 /// has had its bits flipped (by applying `^= 0xFF`).
6620 ///
6621 /// `N` is the size of `t` in bytes.
6622 fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6623 t: &mut T,
6624 bytes: &[u8],
6625 post_mutation: &T,
6626 ) {
6627 // Test that we can access the underlying bytes, and that we get the
6628 // right bytes and the right number of bytes.
6629 assert_eq!(t.as_bytes(), bytes);
6630
6631 // Test that changes to the underlying byte slices are reflected in
6632 // the original object.
6633 t.as_mut_bytes()[0] ^= 0xFF;
6634 assert_eq!(t, post_mutation);
6635 t.as_mut_bytes()[0] ^= 0xFF;
6636
6637 // `write_to` rejects slices that are too small or too large.
6638 assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6639 assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6640
6641 // `write_to` works as expected.
6642 let mut bytes = [0; N];
6643 assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6644 assert_eq!(bytes, t.as_bytes());
6645
6646 // `write_to_prefix` rejects slices that are too small.
6647 assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6648
6649 // `write_to_prefix` works with exact-sized slices.
6650 let mut bytes = [0; N];
6651 assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6652 assert_eq!(bytes, t.as_bytes());
6653
6654 // `write_to_prefix` works with too-large slices, and any bytes past
6655 // the prefix aren't modified.
6656 let mut too_many_bytes = vec![0; N + 1];
6657 too_many_bytes[N] = 123;
6658 assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6659 assert_eq!(&too_many_bytes[..N], t.as_bytes());
6660 assert_eq!(too_many_bytes[N], 123);
6661
6662 // `write_to_suffix` rejects slices that are too small.
6663 assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6664
6665 // `write_to_suffix` works with exact-sized slices.
6666 let mut bytes = [0; N];
6667 assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6668 assert_eq!(bytes, t.as_bytes());
6669
6670 // `write_to_suffix` works with too-large slices, and any bytes
6671 // before the suffix aren't modified.
6672 let mut too_many_bytes = vec![0; N + 1];
6673 too_many_bytes[0] = 123;
6674 assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6675 assert_eq!(&too_many_bytes[1..], t.as_bytes());
6676 assert_eq!(too_many_bytes[0], 123);
6677 }
6678
6679 #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6680 #[repr(C)]
6681 struct Foo {
6682 a: u32,
6683 b: Wrapping<u32>,
6684 c: Option<NonZeroU32>,
6685 }
6686
6687 let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6688 vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6689 } else {
6690 vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6691 };
6692 let post_mutation_expected_a =
6693 if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6694 test::<_, 12>(
6695 &mut Foo { a: 1, b: Wrapping(2), c: None },
6696 expected_bytes.as_bytes(),
6697 &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6698 );
6699 test::<_, 3>(
6700 Unsized::from_mut_slice(&mut [1, 2, 3]),
6701 &[1, 2, 3],
6702 Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6703 );
6704 }
6705
6706 #[test]
6707 fn test_array() {
6708 #[derive(FromBytes, IntoBytes, Immutable)]
6709 #[repr(C)]
6710 struct Foo {
6711 a: [u16; 33],
6712 }
6713
6714 let foo = Foo { a: [0xFFFF; 33] };
6715 let expected = [0xFFu8; 66];
6716 assert_eq!(foo.as_bytes(), &expected[..]);
6717 }
6718
6719 #[test]
6720 fn test_new_zeroed() {
6721 assert!(!bool::new_zeroed());
6722 assert_eq!(u64::new_zeroed(), 0);
6723 // This test exists in order to exercise unsafe code, especially when
6724 // running under Miri.
6725 #[allow(clippy::unit_cmp)]
6726 {
6727 assert_eq!(<()>::new_zeroed(), ());
6728 }
6729 }
6730
6731 #[test]
6732 fn test_transparent_packed_generic_struct() {
6733 #[derive(IntoBytes, FromBytes, Unaligned)]
6734 #[repr(transparent)]
6735 #[allow(dead_code)] // We never construct this type
6736 struct Foo<T> {
6737 _t: T,
6738 _phantom: PhantomData<()>,
6739 }
6740
6741 assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6742 assert_impl_all!(Foo<u8>: Unaligned);
6743
6744 #[derive(IntoBytes, FromBytes, Unaligned)]
6745 #[repr(C, packed)]
6746 #[allow(dead_code)] // We never construct this type
6747 struct Bar<T, U> {
6748 _t: T,
6749 _u: U,
6750 }
6751
6752 assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6753 }
6754
6755 #[cfg(feature = "alloc")]
6756 mod alloc {
6757 use super::*;
6758
6759 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6760 #[test]
6761 fn test_extend_vec_zeroed() {
6762 // Test extending when there is an existing allocation.
6763 let mut v = vec![100u16, 200, 300];
6764 FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6765 assert_eq!(v.len(), 6);
6766 assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6767 drop(v);
6768
6769 // Test extending when there is no existing allocation.
6770 let mut v: Vec<u64> = Vec::new();
6771 FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6772 assert_eq!(v.len(), 3);
6773 assert_eq!(&*v, &[0, 0, 0]);
6774 drop(v);
6775 }
6776
6777 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6778 #[test]
6779 fn test_extend_vec_zeroed_zst() {
6780 // Test extending when there is an existing (fake) allocation.
6781 let mut v = vec![(), (), ()];
6782 <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6783 assert_eq!(v.len(), 6);
6784 assert_eq!(&*v, &[(), (), (), (), (), ()]);
6785 drop(v);
6786
6787 // Test extending when there is no existing (fake) allocation.
6788 let mut v: Vec<()> = Vec::new();
6789 <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6790 assert_eq!(&*v, &[(), (), ()]);
6791 drop(v);
6792 }
6793
6794 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6795 #[test]
6796 fn test_insert_vec_zeroed() {
6797 // Insert at start (no existing allocation).
6798 let mut v: Vec<u64> = Vec::new();
6799 u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6800 assert_eq!(v.len(), 2);
6801 assert_eq!(&*v, &[0, 0]);
6802 drop(v);
6803
6804 // Insert at start.
6805 let mut v = vec![100u64, 200, 300];
6806 u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6807 assert_eq!(v.len(), 5);
6808 assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6809 drop(v);
6810
6811 // Insert at middle.
6812 let mut v = vec![100u64, 200, 300];
6813 u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6814 assert_eq!(v.len(), 4);
6815 assert_eq!(&*v, &[100, 0, 200, 300]);
6816 drop(v);
6817
6818 // Insert at end.
6819 let mut v = vec![100u64, 200, 300];
6820 u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6821 assert_eq!(v.len(), 4);
6822 assert_eq!(&*v, &[100, 200, 300, 0]);
6823 drop(v);
6824 }
6825
6826 #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6827 #[test]
6828 fn test_insert_vec_zeroed_zst() {
6829 // Insert at start (no existing fake allocation).
6830 let mut v: Vec<()> = Vec::new();
6831 <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6832 assert_eq!(v.len(), 2);
6833 assert_eq!(&*v, &[(), ()]);
6834 drop(v);
6835
6836 // Insert at start.
6837 let mut v = vec![(), (), ()];
6838 <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6839 assert_eq!(v.len(), 5);
6840 assert_eq!(&*v, &[(), (), (), (), ()]);
6841 drop(v);
6842
6843 // Insert at middle.
6844 let mut v = vec![(), (), ()];
6845 <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6846 assert_eq!(v.len(), 4);
6847 assert_eq!(&*v, &[(), (), (), ()]);
6848 drop(v);
6849
6850 // Insert at end.
6851 let mut v = vec![(), (), ()];
6852 <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6853 assert_eq!(v.len(), 4);
6854 assert_eq!(&*v, &[(), (), (), ()]);
6855 drop(v);
6856 }
6857
6858 #[test]
6859 fn test_new_box_zeroed() {
6860 assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6861 }
6862
6863 #[test]
6864 fn test_new_box_zeroed_array() {
6865 drop(<[u32; 0x1000]>::new_box_zeroed());
6866 }
6867
6868 #[test]
6869 fn test_new_box_zeroed_zst() {
6870 // This test exists in order to exercise unsafe code, especially
6871 // when running under Miri.
6872 #[allow(clippy::unit_cmp)]
6873 {
6874 assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6875 }
6876 }
6877
6878 #[test]
6879 fn test_new_box_zeroed_with_elems() {
6880 let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6881 assert_eq!(s.len(), 3);
6882 assert_eq!(&*s, &[0, 0, 0]);
6883 s[1] = 3;
6884 assert_eq!(&*s, &[0, 3, 0]);
6885 }
6886
6887 #[test]
6888 fn test_new_box_zeroed_with_elems_empty() {
6889 let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6890 assert_eq!(s.len(), 0);
6891 }
6892
6893 #[test]
6894 fn test_new_box_zeroed_with_elems_zst() {
6895 let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6896 assert_eq!(s.len(), 3);
6897 assert!(s.get(10).is_none());
6898 // This test exists in order to exercise unsafe code, especially
6899 // when running under Miri.
6900 #[allow(clippy::unit_cmp)]
6901 {
6902 assert_eq!(s[1], ());
6903 }
6904 s[2] = ();
6905 }
6906
6907 #[test]
6908 fn test_new_box_zeroed_with_elems_zst_empty() {
6909 let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6910 assert_eq!(s.len(), 0);
6911 }
6912
6913 #[test]
6914 fn new_box_zeroed_with_elems_errors() {
6915 assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6916
6917 let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6918 assert_eq!(
6919 <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6920 Err(AllocError)
6921 );
6922 }
6923 }
6924
6925 #[test]
6926 #[allow(deprecated)]
6927 fn test_deprecated_from_bytes() {
6928 let val = 0u32;
6929 let bytes = val.as_bytes();
6930
6931 assert!(u32::ref_from(bytes).is_some());
6932 // mut_from needs mut bytes
6933 let mut val = 0u32;
6934 let mut_bytes = val.as_mut_bytes();
6935 assert!(u32::mut_from(mut_bytes).is_some());
6936
6937 assert!(u32::read_from(bytes).is_some());
6938
6939 let (slc, rest) = <u32>::slice_from_prefix(bytes, 0).unwrap();
6940 assert!(slc.is_empty());
6941 assert_eq!(rest.len(), 4);
6942
6943 let (rest, slc) = <u32>::slice_from_suffix(bytes, 0).unwrap();
6944 assert!(slc.is_empty());
6945 assert_eq!(rest.len(), 4);
6946
6947 let (slc, rest) = <u32>::mut_slice_from_prefix(mut_bytes, 0).unwrap();
6948 assert!(slc.is_empty());
6949 assert_eq!(rest.len(), 4);
6950
6951 let (rest, slc) = <u32>::mut_slice_from_suffix(mut_bytes, 0).unwrap();
6952 assert!(slc.is_empty());
6953 assert_eq!(rest.len(), 4);
6954 }
6955
6956 #[test]
6957 fn test_try_ref_from_prefix_suffix() {
6958 use crate::util::testutil::Align;
6959 let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..];
6960 let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap();
6961 assert_eq!(*r, 0);
6962 assert_eq!(rest.len(), 0);
6963
6964 let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap();
6965 assert_eq!(*r, 0);
6966 assert_eq!(rest.len(), 0);
6967 }
6968
6969 #[test]
6970 fn test_raw_dangling() {
6971 use crate::util::AsAddress;
6972 let ptr: NonNull<u32> = u32::raw_dangling();
6973 assert_eq!(AsAddress::addr(ptr), 1);
6974
6975 let ptr: NonNull<[u32]> = <[u32]>::raw_dangling();
6976 assert_eq!(AsAddress::addr(ptr), 1);
6977 }
6978
6979 #[test]
6980 fn test_try_ref_from_prefix_with_elems() {
6981 use crate::util::testutil::Align;
6982 let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
6983 let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap();
6984 assert_eq!(r.len(), 2);
6985 assert_eq!(rest.len(), 0);
6986 }
6987
6988 #[test]
6989 fn test_try_ref_from_suffix_with_elems() {
6990 use crate::util::testutil::Align;
6991 let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
6992 let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap();
6993 assert_eq!(r.len(), 2);
6994 assert_eq!(rest.len(), 0);
6995 }
6996}