diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index bdbc59821de2f..d33efe1be6b9e 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -172,7 +172,8 @@ impl TypedArena { fn can_allocate(&self, additional: usize) -> bool { // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - let available_bytes = self.end.get().addr() - self.ptr.get().addr(); + let available_bytes = + self.end.get().addr_without_provenance() - self.ptr.get().addr_without_provenance(); let additional_bytes = additional.checked_mul(mem::size_of::()).unwrap(); available_bytes >= additional_bytes } @@ -245,7 +246,8 @@ impl TypedArena { if mem::needs_drop::() { // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - let used_bytes = self.ptr.get().addr() - last_chunk.start().addr(); + let used_bytes = self.ptr.get().addr_without_provenance() + - last_chunk.start().addr_without_provenance(); last_chunk.entries = used_bytes / mem::size_of::(); } @@ -271,9 +273,9 @@ impl TypedArena { // chunks. fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk) { // Determine how much was filled. - let start = last_chunk.start().addr(); + let start = last_chunk.start().addr_without_provenance(); // We obtain the value of the pointer to the first uninitialized element. - let end = self.ptr.get().addr(); + let end = self.ptr.get().addr_without_provenance(); // We then calculate the number of elements to be dropped in the last chunk, // which is the filled area's length. let diff = if mem::size_of::() == 0 { @@ -396,11 +398,11 @@ impl DroplessArena { self.start.set(chunk.start()); // Align the end to DROPLESS_ALIGNMENT. - let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT); + let end = align_down(chunk.end().addr_without_provenance(), DROPLESS_ALIGNMENT); // Make sure we don't go past `start`. This should not happen since the allocation // should be at least DROPLESS_ALIGNMENT - 1 bytes. - debug_assert!(chunk.start().addr() <= end); + debug_assert!(chunk.start().addr_without_provenance() <= end); self.end.set(chunk.end().with_addr(end)); @@ -415,9 +417,9 @@ impl DroplessArena { // This loop executes once or twice: if allocation fails the first // time, the `grow` ensures it will succeed the second time. loop { - let start = self.start.get().addr(); + let start = self.start.get().addr_without_provenance(); let old_end = self.end.get(); - let end = old_end.addr(); + let end = old_end.addr_without_provenance(); // Align allocated bytes so that `self.end` stays aligned to // DROPLESS_ALIGNMENT. diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs index 295e27691090c..7495ed5f73381 100644 --- a/compiler/rustc_codegen_ssa/src/mono_item.rs +++ b/compiler/rustc_codegen_ssa/src/mono_item.rs @@ -138,7 +138,11 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> { fn to_raw_string(&self) -> String { match *self { MonoItem::Fn(instance) => { - format!("Fn({:?}, {})", instance.def, instance.args.as_ptr().addr()) + format!( + "Fn({:?}, {})", + instance.def, + instance.args.as_ptr().addr_without_provenance() + ) } MonoItem::Static(id) => format!("Static({id:?})"), MonoItem::GlobalAsm(id) => format!("GlobalAsm({id:?})"), diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs index ff4208def319d..2e64cab617b70 100644 --- a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs +++ b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs @@ -104,7 +104,7 @@ where #[inline] pub fn tag(&self) -> T { // Unpack the tag, according to the `self.packed` encoding scheme - let tag = self.packed.addr().get() >> Self::TAG_BIT_SHIFT; + let tag = self.packed.addr_without_provenance().get() >> Self::TAG_BIT_SHIFT; // Safety: // The shift retrieves the original value from `T::into_usize`, diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl index 220da19a29dc8..5f0e8938277ab 100644 --- a/compiler/rustc_hir_typeck/messages.ftl +++ b/compiler/rustc_hir_typeck/messages.ftl @@ -90,8 +90,8 @@ hir_typeck_lossy_provenance_int2ptr = hir_typeck_lossy_provenance_ptr2int = under strict provenance it is considered bad style to cast pointer `{$expr_ty}` to integer `{$cast_ty}` - .suggestion = use `.addr()` to obtain the address of a pointer - .help = if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead + .suggestion = use `.addr_without_provenance()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead + .help = if you need to cast the address back to an integer later, use `.expose_addr()` instead hir_typeck_method_call_on_unknown_raw_pointee = cannot call a method on a raw pointer with an unknown pointee type diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs index f609d0f7e8f5d..ccd21c7e06fb5 100644 --- a/compiler/rustc_hir_typeck/src/errors.rs +++ b/compiler/rustc_hir_typeck/src/errors.rs @@ -256,7 +256,7 @@ pub enum LossyProvenancePtr2IntSuggestion<'tcx> { NeedsParensCast { #[suggestion_part(code = "(")] expr_span: Span, - #[suggestion_part(code = ").addr() as {cast_ty}")] + #[suggestion_part(code = ").addr_without_provenance() as {cast_ty}")] cast_span: Span, cast_ty: Ty<'tcx>, }, @@ -264,12 +264,12 @@ pub enum LossyProvenancePtr2IntSuggestion<'tcx> { NeedsParens { #[suggestion_part(code = "(")] expr_span: Span, - #[suggestion_part(code = ").addr()")] + #[suggestion_part(code = ").addr_without_provenance()")] cast_span: Span, }, #[suggestion( hir_typeck_suggestion, - code = ".addr() as {cast_ty}", + code = ".addr_without_provenance() as {cast_ty}", applicability = "maybe-incorrect" )] NeedsCast { @@ -277,7 +277,11 @@ pub enum LossyProvenancePtr2IntSuggestion<'tcx> { cast_span: Span, cast_ty: Ty<'tcx>, }, - #[suggestion(hir_typeck_suggestion, code = ".addr()", applicability = "maybe-incorrect")] + #[suggestion( + hir_typeck_suggestion, + code = ".addr_without_provenance()", + applicability = "maybe-incorrect" + )] Other { #[primary_span] cast_span: Span, diff --git a/compiler/rustc_middle/src/ty/generic_args.rs b/compiler/rustc_middle/src/ty/generic_args.rs index 813a7a64daf00..8f8f98a50ded7 100644 --- a/compiler/rustc_middle/src/ty/generic_args.rs +++ b/compiler/rustc_middle/src/ty/generic_args.rs @@ -149,7 +149,7 @@ impl<'tcx> GenericArg<'tcx> { // pointers were originally created from `Interned` types in `pack()`, // and this is just going in the other direction. unsafe { - match self.ptr.addr().get() & TAG_MASK { + match self.ptr.addr_without_provenance().get() & TAG_MASK { REGION_TAG => GenericArgKind::Lifetime(ty::Region(Interned::new_unchecked( ptr.cast::>().as_ref(), ))), diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 30409e990e13c..717683af321df 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -623,7 +623,7 @@ impl<'tcx> Term<'tcx> { // pointers were originally created from `Interned` types in `pack()`, // and this is just going in the other direction. unsafe { - match self.ptr.addr().get() & TAG_MASK { + match self.ptr.addr_without_provenance().get() & TAG_MASK { TYPE_TAG => TermKind::Ty(Ty(Interned::new_unchecked( ptr.cast::>>().as_ref(), ))), diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index 084157b97ab41..152a553d77ff3 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -2840,7 +2840,7 @@ impl Weak { } pub(crate) fn is_dangling(ptr: *const T) -> bool { - (ptr.cast::<()>()).addr() == usize::MAX + (ptr.cast::<()>()).addr_without_provenance() == usize::MAX } /// Helper type to allow accessing the reference counts without diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index dfd42ca06193a..714f1fc296421 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -222,7 +222,9 @@ impl Iterator for IntoIter { #[inline] fn size_hint(&self) -> (usize, Option) { let exact = if T::IS_ZST { - self.end.addr().wrapping_sub(self.ptr.as_ptr().addr()) + self.end + .addr_without_provenance() + .wrapping_sub(self.ptr.as_ptr().addr_without_provenance()) } else { unsafe { non_null!(self.end, T).sub_ptr(self.ptr) } }; diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs index 153971a59c5c9..30b6bc9b84a8a 100644 --- a/library/core/src/hash/mod.rs +++ b/library/core/src/hash/mod.rs @@ -955,7 +955,7 @@ mod impls { #[inline] fn hash(&self, state: &mut H) { let (address, metadata) = self.to_raw_parts(); - state.write_usize(address.addr()); + state.write_usize(address.addr_without_provenance()); metadata.hash(state); } } @@ -965,7 +965,7 @@ mod impls { #[inline] fn hash(&self, state: &mut H) { let (address, metadata) = self.to_raw_parts(); - state.write_usize(address.addr()); + state.write_usize(address.addr_without_provenance()); metadata.hash(state); } } diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index f9d89795a9988..275e63575d30a 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -1254,7 +1254,7 @@ extern "rust-intrinsic" { /// - If the code just wants to store data of arbitrary type in some buffer and needs to pick a /// type for that buffer, it can use [`MaybeUninit`][crate::mem::MaybeUninit]. /// - If the code actually wants to work on the address the pointer points to, it can use `as` - /// casts or [`ptr.addr()`][pointer::addr]. + /// casts or [`ptr.addr_without_provenance()`][pointer::addr_without_provenance]. /// /// Turning a `*mut T` into an `&mut T`: /// @@ -2760,8 +2760,8 @@ pub(crate) fn is_valid_allocation_size(size: usize, len: usize) -> bool { /// `count * size` do *not* overlap. #[inline] pub(crate) fn is_nonoverlapping(src: *const (), dst: *const (), size: usize, count: usize) -> bool { - let src_usize = src.addr(); - let dst_usize = dst.addr(); + let src_usize = src.addr_without_provenance(); + let dst_usize = dst.addr_without_provenance(); let Some(size) = size.checked_mul(count) else { crate::panicking::panic_nounwind( "is_nonoverlapping: `size_of::() * count` overflows a usize", diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 85a56d37ab75c..5020bc1bc94d9 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -35,7 +35,7 @@ impl *const T { pub const fn is_null(self) -> bool { #[inline] fn runtime_impl(ptr: *const u8) -> bool { - ptr.addr() == 0 + ptr.addr_without_provenance() == 0 } #[inline] @@ -203,7 +203,7 @@ impl *const T { #[must_use] #[inline(always)] #[unstable(feature = "strict_provenance", issue = "95228")] - pub fn addr(self) -> usize { + pub fn addr_without_provenance(self) -> usize { // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the // provenance). @@ -223,7 +223,7 @@ impl *const T { /// Provenance][super#strict-provenance] rules. Supporting /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by /// tools that help you to stay conformant with the Rust memory model, so it is recommended to - /// use [`addr`][pointer::addr] wherever possible. + /// use [`addr_without_provenance`][pointer::addr_without_provenance] wherever possible. /// /// On most platforms this will produce a value with the same bytes as the original pointer, /// because all the bytes are dedicated to describing the address. Platforms which need to store @@ -264,7 +264,7 @@ impl *const T { // In the mean-time, this operation is defined to be "as if" it was // a wrapping_offset, so we can emulate it as such. This should properly // restore pointer provenance even under today's compiler. - let self_addr = self.addr() as isize; + let self_addr = self.addr_without_provenance() as isize; let dest_addr = addr as isize; let offset = dest_addr.wrapping_sub(self_addr); @@ -282,7 +282,7 @@ impl *const T { #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self { - self.with_addr(f(self.addr())) + self.with_addr(f(self.addr_without_provenance())) } /// Decompose a (possibly wide) pointer into its data pointer and metadata components. @@ -592,7 +592,7 @@ impl *const T { /// let tagged_ptr = ptr.map_addr(|a| a | 0b10); /// /// // Get the "tag" back - /// let tag = tagged_ptr.addr() & tag_mask; + /// let tag = tagged_ptr.addr_without_provenance() & tag_mask; /// assert_eq!(tag, 0b10); /// /// // Note that `tagged_ptr` is unaligned, it's UB to read from it. @@ -664,7 +664,7 @@ impl *const T { /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - /// origin as isize) / mem::size_of::()`. - // FIXME: recommend `addr()` instead of `as usize` once that is stable. + // FIXME: recommend `addr_without_provenance()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add /// [allocated object]: crate::ptr#allocated-object @@ -1611,7 +1611,7 @@ impl *const T { #[inline] fn runtime_impl(ptr: *const (), align: usize) -> bool { - ptr.addr() & (align - 1) == 0 + ptr.addr_without_provenance() & (align - 1) == 0 } #[inline] diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index fc5b08c9801a8..23d797762eba2 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -239,7 +239,7 @@ //! let tagged = ptr.map_addr(|addr| addr | HAS_DATA); //! //! // Check the flag: -//! if tagged.addr() & HAS_DATA != 0 { +//! if tagged.addr_without_provenance() & HAS_DATA != 0 { //! // Untag and read the pointer //! let data = *tagged.map_addr(|addr| addr & FLAG_MASK); //! assert_eq!(data, 17); @@ -372,7 +372,7 @@ //! [`wrapping_offset`]: pointer::wrapping_offset //! [`with_addr`]: pointer::with_addr //! [`map_addr`]: pointer::map_addr -//! [`addr`]: pointer::addr +//! [`addr_without_provenance`]: pointer::addr_without_provenance //! [`ptr::dangling`]: core::ptr::dangling //! [`expose_addr`]: pointer::expose_addr //! [`from_exposed_addr`]: from_exposed_addr diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index 28ba26f5c16c4..bb554a0bde2ff 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -35,7 +35,7 @@ impl *mut T { pub const fn is_null(self) -> bool { #[inline] fn runtime_impl(ptr: *mut u8) -> bool { - ptr.addr() == 0 + ptr.addr_without_provenance() == 0 } #[inline] @@ -211,7 +211,7 @@ impl *mut T { #[must_use] #[inline(always)] #[unstable(feature = "strict_provenance", issue = "95228")] - pub fn addr(self) -> usize { + pub fn addr_without_provenance(self) -> usize { // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the // provenance). @@ -231,7 +231,7 @@ impl *mut T { /// Provenance][super#strict-provenance] rules. Supporting /// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported /// by tools that help you to stay conformant with the Rust memory model, so it is recommended - /// to use [`addr`][pointer::addr] wherever possible. + /// to use [`addr_without_provenance`][pointer::addr_without_provenance] wherever possible. /// /// On most platforms this will produce a value with the same bytes as the original pointer, /// because all the bytes are dedicated to describing the address. Platforms which need to store @@ -272,7 +272,7 @@ impl *mut T { // In the mean-time, this operation is defined to be "as if" it was // a wrapping_offset, so we can emulate it as such. This should properly // restore pointer provenance even under today's compiler. - let self_addr = self.addr() as isize; + let self_addr = self.addr_without_provenance() as isize; let dest_addr = addr as isize; let offset = dest_addr.wrapping_sub(self_addr); @@ -290,7 +290,7 @@ impl *mut T { #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self { - self.with_addr(f(self.addr())) + self.with_addr(f(self.addr_without_provenance())) } /// Decompose a (possibly wide) pointer into its data pointer and metadata components. @@ -607,7 +607,7 @@ impl *mut T { /// let tagged_ptr = ptr.map_addr(|a| a | 0b10); /// /// // Get the "tag" back - /// let tag = tagged_ptr.addr() & tag_mask; + /// let tag = tagged_ptr.addr_without_provenance() & tag_mask; /// assert_eq!(tag, 0b10); /// /// // Note that `tagged_ptr` is unaligned, it's UB to read from/write to it. @@ -839,7 +839,7 @@ impl *mut T { /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - /// origin as isize) / mem::size_of::()`. - // FIXME: recommend `addr()` instead of `as usize` once that is stable. + // FIXME: recommend `addr_without_provenance()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add /// [allocated object]: crate::ptr#allocated-object @@ -1884,7 +1884,7 @@ impl *mut T { #[inline] fn runtime_impl(ptr: *mut (), align: usize) -> bool { - ptr.addr() & (align - 1) == 0 + ptr.addr_without_provenance() & (align - 1) == 0 } #[inline] diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index acb8c552a6338..ff9fc42dcb39c 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -284,17 +284,17 @@ impl NonNull { /// Gets the "address" portion of the pointer. /// - /// For more details see the equivalent method on a raw pointer, [`pointer::addr`]. + /// For more details see the equivalent method on a raw pointer, [`pointer::addr_without_provenance`]. /// /// This API and its claimed semantics are part of the Strict Provenance experiment, /// see the [`ptr` module documentation][crate::ptr]. #[must_use] #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] - pub fn addr(self) -> NonZero { + pub fn addr_without_provenance(self) -> NonZero { // SAFETY: The pointer is guaranteed by the type to be non-null, // meaning that the address will be non-zero. - unsafe { NonZero::new_unchecked(self.pointer.addr()) } + unsafe { NonZero::new_unchecked(self.pointer.addr_without_provenance()) } } /// Creates a new pointer with the given address. @@ -321,7 +321,7 @@ impl NonNull { #[inline] #[unstable(feature = "strict_provenance", issue = "95228")] pub fn map_addr(self, f: impl FnOnce(NonZero) -> NonZero) -> Self { - self.with_addr(f(self.addr())) + self.with_addr(f(self.addr_without_provenance())) } /// Acquires the underlying `*mut` pointer. @@ -803,7 +803,7 @@ impl NonNull { /// runtime and may be exploited by optimizations. If you wish to compute the difference between /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - /// origin as isize) / mem::size_of::()`. - // FIXME: recommend `addr()` instead of `as usize` once that is stable. + // FIXME: recommend `addr_without_provenance()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add /// [allocated object]: crate::ptr#allocated-object @@ -839,10 +839,10 @@ impl NonNull { /// /// let ptr1 = NonNull::new(Box::into_raw(Box::new(0u8))).unwrap(); /// let ptr2 = NonNull::new(Box::into_raw(Box::new(1u8))).unwrap(); - /// let diff = (ptr2.addr().get() as isize).wrapping_sub(ptr1.addr().get() as isize); + /// let diff = (ptr2.addr_without_provenance().get() as isize).wrapping_sub(ptr1.addr_without_provenance().get() as isize); /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1. /// let ptr2_other = NonNull::new(ptr1.as_ptr().wrapping_byte_offset(diff)).unwrap(); - /// assert_eq!(ptr2.addr(), ptr2_other.addr()); + /// assert_eq!(ptr2.addr_without_provenance(), ptr2_other.addr_without_provenance()); /// // Since ptr2_other and ptr2 are derived from pointers to different objects, /// // computing their offset is undefined behavior, even though /// // they point to the same address! diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 7910981d0f5ee..1ab79072818a1 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -26,7 +26,7 @@ macro_rules! if_zst { #![allow(unused_unsafe)] // we're sometimes used within an unsafe block if T::IS_ZST { - let $len = $this.end_or_len.addr(); + let $len = $this.end_or_len.addr_without_provenance(); $zst_body } else { // SAFETY: for non-ZSTs, the type invariant ensures it cannot be null diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 993a608f42b60..95cfb5e0975ba 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -342,7 +342,7 @@ where assert!(mem::size_of::() > 0); // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - (r.addr() - l.addr()) / mem::size_of::() + (r.addr_without_provenance() - l.addr_without_provenance()) / mem::size_of::() } loop { diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 45193c11e1d6b..90af59009c68f 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -1752,9 +1752,9 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr_without_provenance(), 0); /// // Note: units of `size_of::()`. - /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); + /// assert_eq!(atom.load(Ordering::Relaxed).addr_without_provenance(), 8); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] @@ -1832,9 +1832,9 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr_without_provenance(), 0); /// // Note: in units of bytes, not `size_of::()`. - /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); + /// assert_eq!(atom.load(Ordering::Relaxed).addr_without_provenance(), 1); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] @@ -1868,8 +1868,8 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::without_provenance_mut(1)); - /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1); - /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr_without_provenance(), 1); + /// assert_eq!(atom.load(Ordering::Relaxed).addr_without_provenance(), 0); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] @@ -1916,10 +1916,10 @@ impl AtomicPtr { /// /// let atom = AtomicPtr::::new(pointer); /// // Tag the bottom bit of the pointer. - /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0); + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr_without_provenance() & 1, 0); /// // Extract and untag. /// let tagged = atom.load(Ordering::Relaxed); - /// assert_eq!(tagged.addr() & 1, 1); + /// assert_eq!(tagged.addr_without_provenance() & 1, 1); /// assert_eq!(tagged.map_addr(|p| p & !1), pointer); /// ``` #[inline] @@ -1966,7 +1966,7 @@ impl AtomicPtr { /// let pointer = &mut 3i64 as *mut i64; /// // A tagged pointer /// let atom = AtomicPtr::::new(pointer.map_addr(|a| a | 1)); - /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1); + /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr_without_provenance() & 1, 1); /// // Untag, and extract the previously tagged pointer. /// let untagged = atom.fetch_and(!1, Ordering::Relaxed) /// .map_addr(|a| a & !1); @@ -2018,7 +2018,7 @@ impl AtomicPtr { /// /// // Toggle a tag bit on the pointer. /// atom.fetch_xor(1, Ordering::Relaxed); - /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1); + /// assert_eq!(atom.load(Ordering::Relaxed).addr_without_provenance() & 1, 1); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 0d1c72a689291..57916cd67cbec 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -131,17 +131,17 @@ fn int_max() { #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins fn ptr_add_null() { let atom = AtomicPtr::::new(core::ptr::null_mut()); - assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0); - assert_eq!(atom.load(SeqCst).addr(), 8); + assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr_without_provenance(), 0); + assert_eq!(atom.load(SeqCst).addr_without_provenance(), 8); - assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8); - assert_eq!(atom.load(SeqCst).addr(), 9); + assert_eq!(atom.fetch_byte_add(1, SeqCst).addr_without_provenance(), 8); + assert_eq!(atom.load(SeqCst).addr_without_provenance(), 9); - assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9); - assert_eq!(atom.load(SeqCst).addr(), 1); + assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr_without_provenance(), 9); + assert_eq!(atom.load(SeqCst).addr_without_provenance(), 1); - assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1); - assert_eq!(atom.load(SeqCst).addr(), 0); + assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr_without_provenance(), 1); + assert_eq!(atom.load(SeqCst).addr_without_provenance(), 0); } #[test] @@ -174,14 +174,14 @@ fn ptr_add_data() { #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins fn ptr_bitops() { let atom = AtomicPtr::::new(core::ptr::null_mut()); - assert_eq!(atom.fetch_or(0b0111, SeqCst).addr(), 0); - assert_eq!(atom.load(SeqCst).addr(), 0b0111); + assert_eq!(atom.fetch_or(0b0111, SeqCst).addr_without_provenance(), 0); + assert_eq!(atom.load(SeqCst).addr_without_provenance(), 0b0111); - assert_eq!(atom.fetch_and(0b1101, SeqCst).addr(), 0b0111); - assert_eq!(atom.load(SeqCst).addr(), 0b0101); + assert_eq!(atom.fetch_and(0b1101, SeqCst).addr_without_provenance(), 0b0111); + assert_eq!(atom.load(SeqCst).addr_without_provenance(), 0b0101); - assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr(), 0b0101); - assert_eq!(atom.load(SeqCst).addr(), 0b1010); + assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr_without_provenance(), 0b0101); + assert_eq!(atom.load(SeqCst).addr_without_provenance(), 0b1010); } #[test] @@ -197,7 +197,7 @@ fn ptr_bitops_tagging() { const MASK_TAG: usize = 0b1111; const MASK_PTR: usize = !MASK_TAG; - assert_eq!(ptr.addr() & MASK_TAG, 0); + assert_eq!(ptr.addr_without_provenance() & MASK_TAG, 0); assert_eq!(atom.fetch_or(0b0111, SeqCst), ptr); assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b111)); diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs index 0f7fde747690a..3acc06ec72de1 100644 --- a/library/core/tests/mem.rs +++ b/library/core/tests/mem.rs @@ -560,10 +560,22 @@ fn offset_of_addr() { let base = Foo { x: 0, y: 0, z: Bar(0, 0) }; - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, x), ptr::addr_of!(base.x).addr()); - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, y), ptr::addr_of!(base.y).addr()); - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, z.0), ptr::addr_of!(base.z.0).addr()); - assert_eq!(ptr::addr_of!(base).addr() + offset_of!(Foo, z.1), ptr::addr_of!(base.z.1).addr()); + assert_eq!( + ptr::addr_of!(base).addr_without_provenance() + offset_of!(Foo, x), + ptr::addr_of!(base.x).addr_without_provenance() + ); + assert_eq!( + ptr::addr_of!(base).addr_without_provenance() + offset_of!(Foo, y), + ptr::addr_of!(base.y).addr_without_provenance() + ); + assert_eq!( + ptr::addr_of!(base).addr_without_provenance() + offset_of!(Foo, z.0), + ptr::addr_of!(base.z.0).addr_without_provenance() + ); + assert_eq!( + ptr::addr_of!(base).addr_without_provenance() + offset_of!(Foo, z.1), + ptr::addr_of!(base.z.1).addr_without_provenance() + ); } #[test] diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 659fbd255c168..258c428a4be8b 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -1056,7 +1056,7 @@ fn nonnull_tagged_pointer_with_provenance() { /// Consume this tagged pointer and produce the data it carries. pub fn tag(&self) -> usize { - self.0.addr().get() & Self::DATA_MASK + self.0.addr_without_provenance().get() & Self::DATA_MASK } /// Update the data this tagged pointer carries to a new value. diff --git a/library/proc_macro/src/bridge/arena.rs b/library/proc_macro/src/bridge/arena.rs index f81f2152cd046..c5bf2c4730671 100644 --- a/library/proc_macro/src/bridge/arena.rs +++ b/library/proc_macro/src/bridge/arena.rs @@ -72,9 +72,9 @@ impl Arena { /// chunk. Returns `None` if there is no free space left to satisfy the /// request. fn alloc_raw_without_grow(&self, bytes: usize) -> Option<&mut [MaybeUninit]> { - let start = self.start.get().addr(); + let start = self.start.get().addr_without_provenance(); let old_end = self.end.get(); - let end = old_end.addr(); + let end = old_end.addr_without_provenance(); let new_end = end.checked_sub(bytes)?; if start <= new_end { diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 475b3e7eb9312..51e018938e2c4 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -333,7 +333,8 @@ impl Backtrace { frame: RawFrame::Actual(frame.clone()), symbols: Vec::new(), }); - if frame.symbol_address().addr() == ip && actual_start.is_none() { + if frame.symbol_address().addr_without_provenance() == ip && actual_start.is_none() + { actual_start = Some(frames.len()); } true diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 6f8d5e3777568..f69e39632d147 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -144,7 +144,7 @@ impl Repr { let p = Box::into_raw(b).cast::(); // Should only be possible if an allocator handed out a pointer with // wrong alignment. - debug_assert_eq!(p.addr() & TAG_MASK, 0); + debug_assert_eq!(p.addr_without_provenance() & TAG_MASK, 0); // Note: We know `TAG_CUSTOM <= size_of::()` (static_assert at // end of file), and both the start and end of the expression must be // valid without address space wraparound due to `Box`'s semantics. @@ -252,7 +252,7 @@ unsafe fn decode_repr(ptr: NonNull<()>, make_custom: F) -> ErrorData where F: FnOnce(*mut Custom) -> C, { - let bits = ptr.as_ptr().addr(); + let bits = ptr.as_ptr().addr_without_provenance(); match bits & TAG_MASK { TAG_OS => { let code = ((bits as i64) >> 32) as RawOsError; diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs index 9757653e02c06..650be9c522102 100644 --- a/library/std/src/os/unix/net/addr.rs +++ b/library/std/src/os/unix/net/addr.rs @@ -20,8 +20,8 @@ mod libc { fn sun_path_offset(addr: &libc::sockaddr_un) -> usize { // Work with an actual instance of the type since using a null pointer is UB - let base = (addr as *const libc::sockaddr_un).addr(); - let path = core::ptr::addr_of!(addr.sun_path).addr(); + let base = (addr as *const libc::sockaddr_un).addr_without_provenance(); + let path = core::ptr::addr_of!(addr.sun_path).addr_without_provenance(); path - base } diff --git a/library/std/src/path.rs b/library/std/src/path.rs index 89fbd5c4c6454..08cf32176ec0b 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -1482,8 +1482,8 @@ impl PathBuf { }; // truncate until right after the file stem - let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr(); - let start = self.inner.as_encoded_bytes().as_ptr().addr(); + let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr_without_provenance(); + let start = self.inner.as_encoded_bytes().as_ptr().addr_without_provenance(); let v = self.as_mut_vec(); v.truncate(end_file_stem.wrapping_sub(start)); diff --git a/library/std/src/sync/mpmc/waker.rs b/library/std/src/sync/mpmc/waker.rs index 9aab1b9417edb..38412601d7604 100644 --- a/library/std/src/sync/mpmc/waker.rs +++ b/library/std/src/sync/mpmc/waker.rs @@ -206,5 +206,5 @@ pub fn current_thread_id() -> usize { // `u8` is not drop so this variable will be available during thread destruction, // whereas `thread::current()` would not be thread_local! { static DUMMY: u8 = 0 } - DUMMY.with(|x| (x as *const u8).addr()) + DUMMY.with(|x| (x as *const u8).addr_without_provenance()) } diff --git a/library/std/src/sync/remutex.rs b/library/std/src/sync/remutex.rs index 0ced48d10b7c6..e575de5e42dbc 100644 --- a/library/std/src/sync/remutex.rs +++ b/library/std/src/sync/remutex.rs @@ -174,5 +174,5 @@ impl Drop for ReentrantMutexGuard<'_, T> { pub fn current_thread_unique_ptr() -> usize { // Use a non-drop type to make sure it's still available during thread destruction. thread_local! { static X: u8 = const { 0 } } - X.with(|x| <*const _>::addr(x)) + X.with(|x| <*const _>::addr_without_provenance(x)) } diff --git a/library/std/src/sys/locks/condvar/xous.rs b/library/std/src/sys/locks/condvar/xous.rs index 0e51449e0afa4..748ca4aba3494 100644 --- a/library/std/src/sys/locks/condvar/xous.rs +++ b/library/std/src/sys/locks/condvar/xous.rs @@ -85,7 +85,7 @@ impl Condvar { } fn index(&self) -> usize { - core::ptr::from_ref(self).addr() + core::ptr::from_ref(self).addr_without_provenance() } /// Unlock the given Mutex and wait for the notification. Wait at most diff --git a/library/std/src/sys/locks/mutex/xous.rs b/library/std/src/sys/locks/mutex/xous.rs index a8c9518ff0bcf..a491788b5dce4 100644 --- a/library/std/src/sys/locks/mutex/xous.rs +++ b/library/std/src/sys/locks/mutex/xous.rs @@ -29,7 +29,7 @@ impl Mutex { } fn index(&self) -> usize { - core::ptr::from_ref(self).addr() + core::ptr::from_ref(self).addr_without_provenance() } #[inline] diff --git a/library/std/src/sys/locks/rwlock/queue.rs b/library/std/src/sys/locks/rwlock/queue.rs index dce966086b8ff..2a40ef91427d6 100644 --- a/library/std/src/sys/locks/rwlock/queue.rs +++ b/library/std/src/sys/locks/rwlock/queue.rs @@ -137,14 +137,14 @@ const MASK: usize = !(QUEUE_LOCKED | QUEUED | LOCKED); #[inline] fn write_lock(state: State) -> Option { let state = state.wrapping_byte_add(LOCKED); - if state.addr() & LOCKED == LOCKED { Some(state) } else { None } + if state.addr_without_provenance() & LOCKED == LOCKED { Some(state) } else { None } } /// Marks the state as read-locked, if possible. #[inline] fn read_lock(state: State) -> Option { - if state.addr() & QUEUED == 0 && state.addr() != LOCKED { - Some(without_provenance_mut(state.addr().checked_add(SINGLE)? | LOCKED)) + if state.addr_without_provenance() & QUEUED == 0 && state.addr_without_provenance() != LOCKED { + Some(without_provenance_mut(state.addr_without_provenance().checked_add(SINGLE)? | LOCKED)) } else { None } @@ -303,7 +303,7 @@ impl RwLock { // "ldseta" on modern AArch64), and therefore is more efficient than // `fetch_update(lock(true))`, which can spuriously fail if a new node // is appended to the queue. - self.state.fetch_or(LOCKED, Acquire).addr() & LOCKED == 0 + self.state.fetch_or(LOCKED, Acquire).addr_without_provenance() & LOCKED == 0 } #[inline] @@ -326,7 +326,7 @@ impl RwLock { Ok(_) => return, Err(new) => state = new, } - } else if state.addr() & QUEUED == 0 && count < SPIN_COUNT { + } else if state.addr_without_provenance() & QUEUED == 0 && count < SPIN_COUNT { // If the lock is not available and no threads are queued, spin // for a while, using exponential backoff to decrease cache // contention. @@ -346,10 +346,10 @@ impl RwLock { node.next.0 = AtomicPtr::new(state.mask(MASK).cast()); node.prev = AtomicLink::new(None); let mut next = ptr::from_ref(&node) - .map_addr(|addr| addr | QUEUED | (state.addr() & LOCKED)) + .map_addr(|addr| addr | QUEUED | (state.addr_without_provenance() & LOCKED)) as State; - if state.addr() & QUEUED == 0 { + if state.addr_without_provenance() & QUEUED == 0 { // If this is the first node in the queue, set the tail field to // the node itself to ensure there is a current `tail` field in // the queue (invariants 1 and 2). This needs to use `set` to @@ -378,7 +378,7 @@ impl RwLock { // If the current thread locked the queue, unlock it again, // linking it in the process. - if state.addr() & (QUEUE_LOCKED | QUEUED) == QUEUED { + if state.addr_without_provenance() & (QUEUE_LOCKED | QUEUED) == QUEUED { unsafe { self.unlock_queue(next); } @@ -403,8 +403,8 @@ impl RwLock { #[inline] pub unsafe fn read_unlock(&self) { match self.state.fetch_update(Release, Acquire, |state| { - if state.addr() & QUEUED == 0 { - let count = state.addr() - (SINGLE | LOCKED); + if state.addr_without_provenance() & QUEUED == 0 { + let count = state.addr_without_provenance() - (SINGLE | LOCKED); Some(if count > 0 { without_provenance_mut(count | LOCKED) } else { UNLOCKED }) } else { None @@ -431,7 +431,8 @@ impl RwLock { // The lock count is stored in the `next` field of `tail`. // Decrement it, making sure to observe all changes made to the queue // by the other lock owners by using acquire-release ordering. - let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0; + let was_last = + tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr_without_provenance() - SINGLE == 0; if was_last { // SAFETY: // Other threads cannot read-lock while threads are queued. Also, @@ -464,7 +465,7 @@ impl RwLock { match self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) { // The queue lock was acquired. Release it, waking up the next // waiter in the process. - Ok(_) if state.addr() & QUEUE_LOCKED == 0 => unsafe { + Ok(_) if state.addr_without_provenance() & QUEUE_LOCKED == 0 => unsafe { return self.unlock_queue(next); }, // Another thread already holds the queue lock, leave waking up @@ -481,12 +482,15 @@ impl RwLock { /// # Safety /// The queue lock must be held by the current thread. unsafe fn unlock_queue(&self, mut state: State) { - debug_assert_eq!(state.addr() & (QUEUED | QUEUE_LOCKED), QUEUED | QUEUE_LOCKED); + debug_assert_eq!( + state.addr_without_provenance() & (QUEUED | QUEUE_LOCKED), + QUEUED | QUEUE_LOCKED + ); loop { let tail = unsafe { add_backlinks_and_find_tail(to_node(state)) }; - if state.addr() & LOCKED == LOCKED { + if state.addr_without_provenance() & LOCKED == LOCKED { // Another thread has locked the lock. Leave waking up waiters // to them by releasing the queue lock. match self.state.compare_exchange_weak( diff --git a/library/std/src/sys/pal/common/thread_local/os_local.rs b/library/std/src/sys/pal/common/thread_local/os_local.rs index 3edffd7e4437c..4681ff0cb3cfe 100644 --- a/library/std/src/sys/pal/common/thread_local/os_local.rs +++ b/library/std/src/sys/pal/common/thread_local/os_local.rs @@ -115,7 +115,7 @@ impl Key { pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> { // SAFETY: See the documentation for this method. let ptr = unsafe { self.os.get() as *mut Value }; - if ptr.addr() > 1 { + if ptr.addr_without_provenance() > 1 { // SAFETY: the check ensured the pointer is safe (its destructor // is not running) + it is coming from a trusted source (self). if let Some(ref value) = unsafe { (*ptr).inner.get() } { @@ -134,7 +134,7 @@ impl Key { // SAFETY: No mutable references are ever handed out meaning getting // the value is ok. let ptr = unsafe { self.os.get() as *mut Value }; - if ptr.addr() == 1 { + if ptr.addr_without_provenance() == 1 { // destructor is running return None; } diff --git a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs index f99cea360f1f4..7a819cf51eb52 100644 --- a/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/pal/sgx/abi/usercalls/alloc.rs @@ -406,8 +406,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) assert!(is_enclave_range(src, len)); assert!(is_user_range(dst, len)); assert!(len < isize::MAX as usize); - assert!(!src.addr().overflowing_add(len).1); - assert!(!dst.addr().overflowing_add(len).1); + assert!(!src.addr_without_provenance().overflowing_add(len).1); + assert!(!dst.addr_without_provenance().overflowing_add(len).1); unsafe { let (len1, len2, len3) = u64_align_to_guaranteed(dst, len); diff --git a/library/std/src/sys/pal/unix/memchr.rs b/library/std/src/sys/pal/unix/memchr.rs index 73ba604eccba2..913928facf044 100644 --- a/library/std/src/sys/pal/unix/memchr.rs +++ b/library/std/src/sys/pal/unix/memchr.rs @@ -9,7 +9,11 @@ pub fn memchr(needle: u8, haystack: &[u8]) -> Option { haystack.len(), ) }; - if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) } + if p.is_null() { + None + } else { + Some(p.addr_without_provenance() - haystack.as_ptr().addr_without_provenance()) + } } pub fn memrchr(needle: u8, haystack: &[u8]) -> Option { @@ -28,7 +32,11 @@ pub fn memrchr(needle: u8, haystack: &[u8]) -> Option { }; // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) } + if p.is_null() { + None + } else { + Some(p.addr_without_provenance() - haystack.as_ptr().addr_without_provenance()) + } } #[cfg(not(target_os = "linux"))] diff --git a/library/std/src/sys/pal/unix/thread.rs b/library/std/src/sys/pal/unix/thread.rs index 864de31c6ebfc..1b5c268bea98b 100644 --- a/library/std/src/sys/pal/unix/thread.rs +++ b/library/std/src/sys/pal/unix/thread.rs @@ -757,10 +757,11 @@ pub mod guard { let stack_ptr = current_stack.ss_sp; let stackaddr = if libc::pthread_main_np() == 1 { // main thread - stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed) + stack_ptr.addr_without_provenance() - current_stack.ss_size + + PAGE_SIZE.load(Ordering::Relaxed) } else { // new thread - stack_ptr.addr() - current_stack.ss_size + stack_ptr.addr_without_provenance() - current_stack.ss_size }; Some(stack_ptr.with_addr(stackaddr)) } @@ -799,7 +800,7 @@ pub mod guard { let page_size = PAGE_SIZE.load(Ordering::Relaxed); assert!(page_size != 0); let stackptr = get_stack_start()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.addr_without_provenance(); // Ensure stackaddr is page aligned! A parent process might // have reset RLIMIT_STACK to be non-page aligned. The @@ -831,7 +832,7 @@ pub mod guard { // faulting, so our handler can report "stack overflow", and // trust that the kernel's own stack guard will work. let stackptr = get_stack_start_aligned()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.addr_without_provenance(); Some(stackaddr - page_size..stackaddr) } else if cfg!(all(target_os = "linux", target_env = "musl")) { // For the main thread, the musl's pthread_attr_getstack @@ -845,7 +846,7 @@ pub mod guard { // ourselves, FreeBSD's guard page moves upwards. So we'll just use // the builtin guard page. let stackptr = get_stack_start_aligned()?; - let guardaddr = stackptr.addr(); + let guardaddr = stackptr.addr_without_provenance(); // Technically the number of guard pages is tunable and controlled // by the security.bsd.stack_guard_page sysctl. // By default it is 1, checking once is enough since it is @@ -881,7 +882,7 @@ pub mod guard { // faulting, so our handler can report "stack overflow", and // trust that the kernel's own stack guard will work. let stackptr = get_stack_start_aligned()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.addr_without_provenance(); Some(stackaddr - page_size..stackaddr) } else { // Reallocate the last page of the stack. @@ -910,7 +911,7 @@ pub mod guard { panic!("failed to protect the guard page: {}", io::Error::last_os_error()); } - let guardaddr = stackptr.addr(); + let guardaddr = stackptr.addr_without_provenance(); Some(guardaddr..guardaddr + page_size) } @@ -919,7 +920,7 @@ pub mod guard { #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))] pub unsafe fn current() -> Option { let stackptr = get_stack_start()?; - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.addr_without_provenance(); Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr) } @@ -957,7 +958,7 @@ pub mod guard { let mut size = 0; assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0); - let stackaddr = stackptr.addr(); + let stackaddr = stackptr.addr_without_provenance(); ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) { Some(stackaddr - guardsize..stackaddr) } else if cfg!(all(target_os = "linux", target_env = "musl")) { diff --git a/library/std/src/sys/pal/unix/weak.rs b/library/std/src/sys/pal/unix/weak.rs index 48cc8633e93d2..f4a2c8a43a890 100644 --- a/library/std/src/sys/pal/unix/weak.rs +++ b/library/std/src/sys/pal/unix/weak.rs @@ -93,7 +93,7 @@ impl DlsymWeak { // Relaxed is fine here because we fence before reading through the // pointer (see the comment below). match self.func.load(Ordering::Relaxed) { - func if func.addr() == 1 => self.initialize(), + func if func.addr_without_provenance() == 1 => self.initialize(), func if func.is_null() => None, func => { let func = mem::transmute_copy::<*mut libc::c_void, F>(&func); diff --git a/library/std/src/sys/pal/windows/alloc.rs b/library/std/src/sys/pal/windows/alloc.rs index 270eca37b14d6..670f63f4f099e 100644 --- a/library/std/src/sys/pal/windows/alloc.rs +++ b/library/std/src/sys/pal/windows/alloc.rs @@ -162,7 +162,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 { // Create a correctly aligned pointer offset from the start of the allocated block, // and write a header before it. - let offset = layout.align() - (ptr.addr() & (layout.align() - 1)); + let offset = layout.align() - (ptr.addr_without_provenance() & (layout.align() - 1)); // SAFETY: `MIN_ALIGN` <= `offset` <= `layout.align()` and the size of the allocated // block is `layout.align() + layout.size()`. `aligned` will thus be a correctly aligned // pointer inside the allocated block with at least `layout.size()` bytes after it and at diff --git a/library/std/src/sys/pal/windows/mod.rs b/library/std/src/sys/pal/windows/mod.rs index b47d213df343a..3341b0b60669f 100644 --- a/library/std/src/sys/pal/windows/mod.rs +++ b/library/std/src/sys/pal/windows/mod.rs @@ -154,7 +154,7 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option { ($($n:literal,)+) => { $( if start[$n] == needle { - return Some(((&start[$n] as *const u16).addr() - ptr.addr()) / 2); + return Some(((&start[$n] as *const u16).addr_without_provenance() - ptr.addr_without_provenance()) / 2); } )+ } @@ -167,7 +167,9 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option { for c in start { if *c == needle { - return Some(((c as *const u16).addr() - ptr.addr()) / 2); + return Some( + ((c as *const u16).addr_without_provenance() - ptr.addr_without_provenance()) / 2, + ); } } None diff --git a/library/std/src/sys/pal/xous/thread_parking.rs b/library/std/src/sys/pal/xous/thread_parking.rs index 0bd0462d77d35..c233982fbe198 100644 --- a/library/std/src/sys/pal/xous/thread_parking.rs +++ b/library/std/src/sys/pal/xous/thread_parking.rs @@ -22,7 +22,7 @@ impl Parker { } fn index(&self) -> usize { - ptr::from_ref(self).addr() + ptr::from_ref(self).addr_without_provenance() } pub unsafe fn park(self: Pin<&Self>) { diff --git a/library/std/src/sys/personality/dwarf/eh.rs b/library/std/src/sys/personality/dwarf/eh.rs index a78084de0faef..96070632efb68 100644 --- a/library/std/src/sys/personality/dwarf/eh.rs +++ b/library/std/src/sys/personality/dwarf/eh.rs @@ -111,12 +111,12 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result // SjLj version: // The "IP" is an index into the call-site table, with two exceptions: // -1 means 'no-action', and 0 means 'terminate'. - match ip.addr() as isize { + match ip.addr_without_provenance() as isize { -1 => return Ok(EHAction::None), 0 => return Ok(EHAction::Terminate), _ => (), } - let mut idx = ip.addr(); + let mut idx = ip.addr_without_provenance(); loop { let cs_lpad = reader.read_uleb128(); let cs_action_entry = reader.read_uleb128(); @@ -230,8 +230,10 @@ unsafe fn read_encoded_pointer( DW_EH_PE_datarel => (*context.get_data_start)(), // aligned means the value is aligned to the size of a pointer DW_EH_PE_aligned => { - reader.ptr = - reader.ptr.with_addr(round_up(reader.ptr.addr(), mem::size_of::<*const u8>())?); + reader.ptr = reader.ptr.with_addr(round_up( + reader.ptr.addr_without_provenance(), + mem::size_of::<*const u8>(), + )?); core::ptr::null() } _ => return Err(()), diff --git a/library/std/src/sys_common/once/queue.rs b/library/std/src/sys_common/once/queue.rs index 730cdb768bd27..94964e1ead6a1 100644 --- a/library/std/src/sys_common/once/queue.rs +++ b/library/std/src/sys_common/once/queue.rs @@ -119,12 +119,12 @@ impl Once { // operations visible to us, and, this being a fast path, weaker // ordering helps with performance. This `Acquire` synchronizes with // `Release` operations on the slow path. - self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE + self.state_and_queue.load(Ordering::Acquire).addr_without_provenance() == COMPLETE } #[inline] pub(crate) fn state(&mut self) -> ExclusiveState { - match self.state_and_queue.get_mut().addr() { + match self.state_and_queue.get_mut().addr_without_provenance() { INCOMPLETE => ExclusiveState::Incomplete, POISONED => ExclusiveState::Poisoned, COMPLETE => ExclusiveState::Complete, @@ -148,7 +148,7 @@ impl Once { pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) { let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); loop { - match state_and_queue.addr() { + match state_and_queue.addr_without_provenance() { COMPLETE => break, POISONED if !ignore_poisoning => { // Panic to propagate the poison. @@ -176,7 +176,7 @@ impl Once { // poisoned or not. let init_state = public::OnceState { inner: OnceState { - poisoned: state_and_queue.addr() == POISONED, + poisoned: state_and_queue.addr_without_provenance() == POISONED, set_state_on_drop_to: Cell::new(ptr::without_provenance_mut(COMPLETE)), }, }; @@ -187,7 +187,7 @@ impl Once { _ => { // All other values must be RUNNING with possibly a // pointer to the waiter queue in the more significant bits. - assert!(state_and_queue.addr() & STATE_MASK == RUNNING); + assert!(state_and_queue.addr_without_provenance() & STATE_MASK == RUNNING); wait(&self.state_and_queue, state_and_queue); state_and_queue = self.state_and_queue.load(Ordering::Acquire); } @@ -202,7 +202,7 @@ fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { loop { // Don't queue this thread if the status is no longer running, // otherwise we will not be woken up. - if current_state.addr() & STATE_MASK != RUNNING { + if current_state.addr_without_provenance() & STATE_MASK != RUNNING { return; } @@ -210,7 +210,8 @@ fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { let node = Waiter { thread: Cell::new(Some(thread::current())), signaled: AtomicBool::new(false), - next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, + next: current_state.with_addr(current_state.addr_without_provenance() & !STATE_MASK) + as *const Waiter, }; let me = core::ptr::addr_of!(node) as *const Masked as *mut Masked; @@ -218,7 +219,7 @@ fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { // that another thread didn't just replace the head of the linked list. let exchange_result = state_and_queue.compare_exchange( current_state, - me.with_addr(me.addr() | RUNNING), + me.with_addr(me.addr_without_provenance() | RUNNING), Ordering::Release, Ordering::Relaxed, ); @@ -257,7 +258,7 @@ impl Drop for WaiterQueue<'_> { self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); // We should only ever see an old state which was RUNNING. - assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); + assert_eq!(state_and_queue.addr_without_provenance() & STATE_MASK, RUNNING); // Walk the entire linked list of waiters and wake them up (in lifo // order, last to register is first to wake up). @@ -266,8 +267,9 @@ impl Drop for WaiterQueue<'_> { // free `node` if there happens to be has a spurious wakeup. // So we have to take out the `thread` field and copy the pointer to // `next` first. - let mut queue = - state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; + let mut queue = state_and_queue + .with_addr(state_and_queue.addr_without_provenance() & !STATE_MASK) + as *const Waiter; while !queue.is_null() { let next = (*queue).next; let thread = (*queue).thread.take().unwrap(); diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs index 0466743966034..62e021feafc04 100644 --- a/library/std/src/sys_common/thread_parking/id.rs +++ b/library/std/src/sys_common/thread_parking/id.rs @@ -62,7 +62,7 @@ impl Parker { // The state must be reset with acquire ordering to ensure that all // calls to `unpark` synchronize with this thread. while self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_err() { - park(self.state.as_ptr().addr()); + park(self.state.as_ptr().addr_without_provenance()); } } } @@ -72,7 +72,7 @@ impl Parker { let state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); if state == PARKED { - park_timeout(dur, self.state.as_ptr().addr()); + park_timeout(dur, self.state.as_ptr().addr_without_provenance()); // Swap to ensure that we observe all state changes with acquire // ordering. self.state.swap(EMPTY, Acquire); @@ -94,7 +94,7 @@ impl Parker { // and terminated before this call is made. This call then returns an // error or wakes up an unrelated thread. The platform API and // environment does allow this, however. - unpark(tid, self.state.as_ptr().addr()); + unpark(tid, self.state.as_ptr().addr_without_provenance()); } } } diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs index 527c408c89edd..c003c32bd5083 100644 --- a/library/unwind/src/libunwind.rs +++ b/library/unwind/src/libunwind.rs @@ -238,7 +238,7 @@ if #[cfg(any(target_os = "ios", target_os = "tvos", target_os = "watchos", targe pub unsafe fn _Unwind_SetIP(ctx: *mut _Unwind_Context, value: _Unwind_Word) { // Propagate thumb bit to instruction pointer - let thumb_state = _Unwind_GetGR(ctx, UNWIND_IP_REG).addr() & 1; + let thumb_state = _Unwind_GetGR(ctx, UNWIND_IP_REG).addr_without_provenance() & 1; let value = value.map_addr(|v| v | thumb_state); _Unwind_SetGR(ctx, UNWIND_IP_REG, value); } diff --git a/src/tools/miri/tests/fail/provenance/provenance_transmute.rs b/src/tools/miri/tests/fail/provenance/provenance_transmute.rs index bc5dd53dcf5e4..b5d790dcb56ea 100644 --- a/src/tools/miri/tests/fail/provenance/provenance_transmute.rs +++ b/src/tools/miri/tests/fail/provenance/provenance_transmute.rs @@ -22,6 +22,6 @@ fn main() { let ptr2 = &1u8 as *const u8; unsafe { // Two pointers with the same address but different provenance. - deref(ptr1, ptr2.with_addr(ptr1.addr())); + deref(ptr1, ptr2.with_addr(ptr1.addr_without_provenance())); } } diff --git a/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr b/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr index 6b1c2941c075c..e0446eae4cba7 100644 --- a/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr +++ b/src/tools/miri/tests/fail/provenance/provenance_transmute.stderr @@ -11,8 +11,8 @@ LL | let _val = *left_ptr; note: inside `main` --> $DIR/provenance_transmute.rs:LL:CC | -LL | deref(ptr1, ptr2.with_addr(ptr1.addr())); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | deref(ptr1, ptr2.with_addr(ptr1.addr_without_provenance())); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace diff --git a/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs b/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs index 20fd330699890..50127e7c673ea 100644 --- a/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs +++ b/src/tools/miri/tests/fail/provenance/ptr_int_unexposed.rs @@ -5,7 +5,7 @@ fn main() { let x: i32 = 3; let x_ptr = &x as *const i32; - let x_usize: usize = x_ptr.addr(); + let x_usize: usize = x_ptr.addr_without_provenance(); // Cast back an address that did *not* get exposed. let ptr = std::ptr::from_exposed_addr::(x_usize); assert_eq!(unsafe { *ptr }, 3); //~ ERROR: is a dangling pointer diff --git a/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs b/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs index e075db66039bc..fbc41db39ef1a 100644 --- a/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs +++ b/src/tools/miri/tests/fail/unaligned_pointers/promise_alignment.rs @@ -17,8 +17,8 @@ fn main() { let _val = unsafe { buffer.read() }; // Let's find a place to promise alignment 8. - let align8 = if buffer.addr() % 8 == 0 { buffer } else { buffer.wrapping_add(1) }; - assert!(align8.addr() % 8 == 0); + let align8 = if buffer.addr_without_provenance() % 8 == 0 { buffer } else { buffer.wrapping_add(1) }; + assert!(align8.addr_without_provenance() % 8 == 0); unsafe { utils::miri_promise_symbolic_alignment(align8.cast(), 8) }; // Promising the alignment down to 1 *again* still must not hurt. unsafe { utils::miri_promise_symbolic_alignment(buffer.cast(), 1) }; @@ -37,8 +37,8 @@ fn main() { #[derive(Copy, Clone)] struct Align16(#[allow(dead_code)] u128); - let align16 = if align8.addr() % 16 == 0 { align8 } else { align8.wrapping_add(2) }; - assert!(align16.addr() % 16 == 0); + let align16 = if align8.addr_without_provenance() % 16 == 0 { align8 } else { align8.wrapping_add(2) }; + assert!(align16.addr_without_provenance() % 16 == 0); let _val = unsafe { align8.cast::().read() }; //~[read_unaligned_ptr]^ ERROR: accessing memory based on pointer with alignment 8, but alignment 16 is required diff --git a/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs b/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs index 5cf62995fbee2..e763d08ce5cfd 100644 --- a/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs +++ b/src/tools/miri/tests/pass-dep/shims/posix_memalign.rs @@ -65,7 +65,7 @@ fn main() { // The pointer is not modified on failure, posix_memalign(3) says: // > On Linux (and other systems), posix_memalign() does not modify memptr on failure. // > A requirement standardizing this behavior was added in POSIX.1-2008 TC2. - assert_eq!(ptr.addr(), 0x1234567); + assert_eq!(ptr.addr_without_provenance(), 0x1234567); } // Too small align (smaller than ptr) @@ -77,6 +77,6 @@ fn main() { // The pointer is not modified on failure, posix_memalign(3) says: // > On Linux (and other systems), posix_memalign() does not modify memptr on failure. // > A requirement standardizing this behavior was added in POSIX.1-2008 TC2. - assert_eq!(ptr.addr(), 0x1234567); + assert_eq!(ptr.addr_without_provenance(), 0x1234567); } } diff --git a/src/tools/miri/tests/pass/atomic.rs b/src/tools/miri/tests/pass/atomic.rs index dfdc9b42f81fc..7f29053f07d07 100644 --- a/src/tools/miri/tests/pass/atomic.rs +++ b/src/tools/miri/tests/pass/atomic.rs @@ -136,45 +136,45 @@ fn atomic_ptr() { let x = array.as_ptr() as *mut i32; let ptr = AtomicPtr::::new(ptr::null_mut()); - assert!(ptr.load(Relaxed).addr() == 0); + assert!(ptr.load(Relaxed).addr_without_provenance() == 0); ptr.store(ptr::without_provenance_mut(13), SeqCst); - assert!(ptr.swap(x, Relaxed).addr() == 13); + assert!(ptr.swap(x, Relaxed).addr_without_provenance() == 13); unsafe { assert!(*ptr.load(Acquire) == 0) }; // comparison ignores provenance assert_eq!( ptr.compare_exchange( - (&mut 0 as *mut i32).with_addr(x.addr()), + (&mut 0 as *mut i32).with_addr(x.addr_without_provenance()), ptr::without_provenance_mut(0), SeqCst, SeqCst ) .unwrap() - .addr(), - x.addr(), + .addr_without_provenance(), + x.addr_without_provenance(), ); assert_eq!( ptr.compare_exchange( - (&mut 0 as *mut i32).with_addr(x.addr()), + (&mut 0 as *mut i32).with_addr(x.addr_without_provenance()), ptr::without_provenance_mut(0), SeqCst, SeqCst ) .unwrap_err() - .addr(), + .addr_without_provenance(), 0, ); ptr.store(x, Relaxed); - assert_eq!(ptr.fetch_ptr_add(13, AcqRel).addr(), x.addr()); + assert_eq!(ptr.fetch_ptr_add(13, AcqRel).addr_without_provenance(), x.addr_without_provenance()); unsafe { assert_eq!(*ptr.load(SeqCst), 13) }; // points to index 13 now - assert_eq!(ptr.fetch_ptr_sub(4, AcqRel).addr(), x.addr() + 13 * 4); + assert_eq!(ptr.fetch_ptr_sub(4, AcqRel).addr_without_provenance(), x.addr_without_provenance() + 13 * 4); unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; - assert_eq!(ptr.fetch_or(3, AcqRel).addr(), x.addr() + 9 * 4); // ptr is 4-aligned, so set the last 2 bits - assert_eq!(ptr.fetch_and(!3, AcqRel).addr(), (x.addr() + 9 * 4) | 3); // and unset them again + assert_eq!(ptr.fetch_or(3, AcqRel).addr_without_provenance(), x.addr_without_provenance() + 9 * 4); // ptr is 4-aligned, so set the last 2 bits + assert_eq!(ptr.fetch_and(!3, AcqRel).addr_without_provenance(), (x.addr_without_provenance() + 9 * 4) | 3); // and unset them again unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; - assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr(), x.addr() + 9 * 4); - assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr(), (x.addr() + 9 * 4) ^ 0xdeadbeef); + assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr_without_provenance(), x.addr_without_provenance() + 9 * 4); + assert_eq!(ptr.fetch_xor(0xdeadbeef, AcqRel).addr_without_provenance(), (x.addr_without_provenance() + 9 * 4) ^ 0xdeadbeef); unsafe { assert_eq!(*ptr.load(SeqCst), 9) }; // after XORing twice with the same thing, we get our ptr back } diff --git a/src/tools/miri/tests/pass/const-addrs.rs b/src/tools/miri/tests/pass/const-addrs.rs index 6c14f0b679ce8..f803d81c81037 100644 --- a/src/tools/miri/tests/pass/const-addrs.rs +++ b/src/tools/miri/tests/pass/const-addrs.rs @@ -25,7 +25,7 @@ fn main() { // Check that within a call we always produce the same address let mut prev = 0; for iter in 0..EVALS { - let addr = "test".as_bytes().as_ptr().addr(); + let addr = "test".as_bytes().as_ptr().addr_without_provenance(); if iter > 0 { assert_eq!(prev, addr); } @@ -34,5 +34,5 @@ fn main() { } fn const_addr() -> usize { - "test".as_bytes().as_ptr().addr() + "test".as_bytes().as_ptr().addr_without_provenance() } diff --git a/src/tools/miri/tests/pass/ptr_int_from_exposed.rs b/src/tools/miri/tests/pass/ptr_int_from_exposed.rs index d8d57679e6b36..2e2eb8d0c5273 100644 --- a/src/tools/miri/tests/pass/ptr_int_from_exposed.rs +++ b/src/tools/miri/tests/pass/ptr_int_from_exposed.rs @@ -52,7 +52,7 @@ fn ptr_roundtrip_null() { assert_eq!(null, 0); let x_null_ptr_copy = ptr::from_exposed_addr::(null); // just a roundtrip, so has provenance of x (angelically) - let x_ptr_copy = x_null_ptr_copy.with_addr(x_ptr.addr()); // addr of x and provenance of x + let x_ptr_copy = x_null_ptr_copy.with_addr(x_ptr.addr_without_provenance()); // addr of x and provenance of x assert_eq!(unsafe { *x_ptr_copy }, 42); } diff --git a/src/tools/miri/tests/pass/shims/ptr_mask.rs b/src/tools/miri/tests/pass/shims/ptr_mask.rs index fb8bb6b13dbc2..0e8ea1a91dc9d 100644 --- a/src/tools/miri/tests/pass/shims/ptr_mask.rs +++ b/src/tools/miri/tests/pass/shims/ptr_mask.rs @@ -7,10 +7,10 @@ fn main() { // u32 is 4 aligned, // so the lower `log2(4) = 2` bits of the address are always 0 - assert_eq!(ptr.addr() & 0b11, 0); + assert_eq!(ptr.addr_without_provenance() & 0b11, 0); let tagged_ptr = ptr.map_addr(|a| a | 0b11); - let tag = tagged_ptr.addr() & 0b11; + let tag = tagged_ptr.addr_without_provenance() & 0b11; let masked_ptr = tagged_ptr.mask(!0b11); assert_eq!(tag, 0b11); diff --git a/src/tools/miri/tests/pass/transmute_ptr.rs b/src/tools/miri/tests/pass/transmute_ptr.rs index ce6d86b7068a0..42a5c1c6a5b17 100644 --- a/src/tools/miri/tests/pass/transmute_ptr.rs +++ b/src/tools/miri/tests/pass/transmute_ptr.rs @@ -42,7 +42,7 @@ fn ptr_in_two_halves() { // Now target_arr[1] is a mix of the two `ptr` we had stored in `arr`. let strange_ptr = target_arr[1]; // Check that the provenance works out. - assert_eq!(*strange_ptr.with_addr(ptr.addr()), 0); + assert_eq!(*strange_ptr.with_addr(ptr.addr_without_provenance()), 0); } } diff --git a/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs b/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs index d4a74b3d78283..5e4264711dc75 100644 --- a/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs +++ b/tests/codegen/issues/issue-103285-ptr-addr-overflow-check.rs @@ -7,8 +7,8 @@ pub fn test(src: *const u8, dst: *const u8) -> usize { // CHECK-LABEL: @test( // CHECK-NOT: panic - let src_usize = src.addr(); - let dst_usize = dst.addr(); + let src_usize = src.addr_without_provenance(); + let dst_usize = dst.addr_without_provenance(); if src_usize > dst_usize { return src_usize - dst_usize; } diff --git a/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr b/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr index aa151fe2d214e..0284d345ec51b 100644 --- a/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr +++ b/tests/ui/lint/lint-strict-provenance-lossy-casts.stderr @@ -4,16 +4,16 @@ error: under strict provenance it is considered bad style to cast pointer `*cons LL | let addr: usize = &x as *const u8 as usize; | ^^^^^^^^^^^^^^^^^^^^^^^^ | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead + = help: if you need to cast the address back to an integer later, use `.expose_addr()` instead note: the lint level is defined here --> $DIR/lint-strict-provenance-lossy-casts.rs:2:9 | LL | #![deny(lossy_provenance_casts)] | ^^^^^^^^^^^^^^^^^^^^^^ -help: use `.addr()` to obtain the address of a pointer +help: use `.addr_without_provenance()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead | -LL | let addr: usize = (&x as *const u8).addr(); - | + ~~~~~~~~ +LL | let addr: usize = (&x as *const u8).addr_without_provenance(); + | + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: under strict provenance it is considered bad style to cast pointer `*const u8` to integer `u32` --> $DIR/lint-strict-provenance-lossy-casts.rs:9:22 @@ -21,31 +21,35 @@ error: under strict provenance it is considered bad style to cast pointer `*cons LL | let addr_32bit = &x as *const u8 as u32; | ^^^^^^^^^^^^^^^^^^^^^^ | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead -help: use `.addr()` to obtain the address of a pointer + = help: if you need to cast the address back to an integer later, use `.expose_addr()` instead +help: use `.addr_without_provenance()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead | -LL | let addr_32bit = (&x as *const u8).addr() as u32; - | + ~~~~~~~~~~~~~~~ +LL | let addr_32bit = (&x as *const u8).addr_without_provenance() as u32; + | + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: under strict provenance it is considered bad style to cast pointer `*const u8` to integer `usize` --> $DIR/lint-strict-provenance-lossy-casts.rs:14:20 | LL | let ptr_addr = ptr as usize; - | ^^^--------- - | | - | help: use `.addr()` to obtain the address of a pointer: `.addr()` + | ^^^^^^^^^^^^ | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead + = help: if you need to cast the address back to an integer later, use `.expose_addr()` instead +help: use `.addr_without_provenance()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead + | +LL | let ptr_addr = ptr.addr_without_provenance(); + | ~~~~~~~~~~~~~~~~~~~~~~~~~~ error: under strict provenance it is considered bad style to cast pointer `*const u8` to integer `u32` --> $DIR/lint-strict-provenance-lossy-casts.rs:16:26 | LL | let ptr_addr_32bit = ptr as u32; - | ^^^------- - | | - | help: use `.addr()` to obtain the address of a pointer: `.addr() as u32` + | ^^^^^^^^^^ + | + = help: if you need to cast the address back to an integer later, use `.expose_addr()` instead +help: use `.addr_without_provenance()` to obtain the address of a pointer without its provenance -- but note that this cannot be cast back to a pointer later; you need to use `with_addr` instead | - = help: if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead +LL | let ptr_addr_32bit = ptr.addr_without_provenance() as u32; + | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: aborting due to 4 previous errors diff --git a/tests/ui/mir/alignment/i686-pc-windows-msvc.rs b/tests/ui/mir/alignment/i686-pc-windows-msvc.rs index 379f61ae818f2..2d7cc9e402007 100644 --- a/tests/ui/mir/alignment/i686-pc-windows-msvc.rs +++ b/tests/ui/mir/alignment/i686-pc-windows-msvc.rs @@ -14,8 +14,8 @@ fn main() { let ptr = x.as_mut_ptr(); unsafe { let misaligned = ptr.byte_add(4); - assert!(misaligned.addr() % 8 != 0); - assert!(misaligned.addr() % 4 == 0); + assert!(misaligned.addr_without_provenance() % 8 != 0); + assert!(misaligned.addr_without_provenance() % 4 == 0); *misaligned = 42; } } diff --git a/tests/ui/structs-enums/type-sizes.rs b/tests/ui/structs-enums/type-sizes.rs index 92060e3cade3c..156fe97021ec7 100644 --- a/tests/ui/structs-enums/type-sizes.rs +++ b/tests/ui/structs-enums/type-sizes.rs @@ -318,7 +318,10 @@ pub fn main() { // Currently the layout algorithm will choose the latter because it doesn't attempt // to aggregate multiple smaller fields to move a niche before a higher-alignment one. let b = BoolInTheMiddle( NonZeroU16::new(1).unwrap(), true, 0); - assert!(ptr::from_ref(&b.1).addr() > ptr::from_ref(&b.2).addr()); + assert!( + ptr::from_ref(&b.1).addr_without_provenance() + > ptr::from_ref(&b.2).addr_without_provenance() + ); assert_eq!(size_of::>(), size_of::()); @@ -331,7 +334,8 @@ pub fn main() { // Neither field has a niche at the beginning so the layout algorithm should try move niches to // the end which means the 8-sized field shouldn't be alignment-promoted before the 4-sized one. let v = ReorderEndNiche { a: EndNiche8([0; 7], false), b: MiddleNiche4(0, 0, false, 0) }; - assert!(ptr::from_ref(&v.a).addr() > ptr::from_ref(&v.b).addr()); - - + assert!( + ptr::from_ref(&v.a).addr_without_provenance() + > ptr::from_ref(&v.b).addr_without_provenance() + ); }