Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 103 additions & 15 deletions compiler/rustc_const_eval/src/const_eval/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,22 +280,110 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
interp_ok(match (a, b) {
// Comparisons between integers are always known.
(Scalar::Int(a), Scalar::Int(b)) => (a == b) as u8,
// Comparisons of null with an arbitrary scalar can be known if `scalar_may_be_null`
// indicates that the scalar can definitely *not* be null.
(Scalar::Int(int), ptr) | (ptr, Scalar::Int(int))
if int.is_null() && !self.scalar_may_be_null(ptr)? =>
{
0
// Comparing a pointer `ptr` with an integer `int` is equivalent to comparing
// `ptr-int` with null, so we can reduce this case to a `scalar_may_be_null` test.
(Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Another possible implementation here would be to compute ptr.wrapping_sub(int) and then compare that against null. scalar_may_be_null already handles alignment so that should cover the same cases and more, I think?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added in 2a24220

let int = int.to_target_usize(*self.tcx);
// The `wrapping_neg` here may produce a value that is not
// a valid target usize any more... but `wrapping_offset` handles that correctly.
let offset_ptr = ptr.wrapping_offset(Size::from_bytes(int.wrapping_neg()), self);
if !self.scalar_may_be_null(Scalar::from_pointer(offset_ptr, self))? {
// `ptr.wrapping_sub(int)` is definitely not equal to `0`, so `ptr != int`
0
} else {
// `ptr.wrapping_sub(int)` could be equal to `0`, but might not be,
// so we cannot know for sure if `ptr == int` or not
2
}
}
(Scalar::Ptr(a, _), Scalar::Ptr(b, _)) => {
let (a_prov, a_offset) = a.prov_and_relative_offset();
let (b_prov, b_offset) = b.prov_and_relative_offset();
let a_allocid = a_prov.alloc_id();
let b_allocid = b_prov.alloc_id();
let a_info = self.get_alloc_info(a_allocid);
let b_info = self.get_alloc_info(b_allocid);

// Check if the pointers cannot be equal due to alignment
if a_info.align > Align::ONE && b_info.align > Align::ONE {
let min_align = Ord::min(a_info.align.bytes(), b_info.align.bytes());
let a_residue = a_offset.bytes() % min_align;
let b_residue = b_offset.bytes() % min_align;
if a_residue != b_residue {
// If the two pointers have a different residue modulo their
// common alignment, they cannot be equal.
return interp_ok(0);
}
// The pointers have the same residue modulo their common alignment,
// so they could be equal. Try the other checks.
}

if let (Some(GlobalAlloc::Static(a_did)), Some(GlobalAlloc::Static(b_did))) = (
self.tcx.try_get_global_alloc(a_allocid),
self.tcx.try_get_global_alloc(b_allocid),
) {
if a_allocid == b_allocid {
debug_assert_eq!(
a_did, b_did,
"different static item DefIds had same AllocId? {a_allocid:?} == {b_allocid:?}, {a_did:?} != {b_did:?}"
);
// Comparing two pointers into the same static. As per
// https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
// a static cannot be duplicated, so if two pointers are into the same
// static, they are equal if and only if their offsets are equal.
(a_offset == b_offset) as u8
} else {
debug_assert_ne!(
a_did, b_did,
"same static item DefId had two different AllocIds? {a_allocid:?} != {b_allocid:?}, {a_did:?} == {b_did:?}"
);
// Comparing two pointers into the different statics.
// We can never determine for sure that two pointers into different statics
// are *equal*, but we can know that they are *inequal* if they are both
// strictly in-bounds (i.e. in-bounds and not one-past-the-end) of
// their respective static, as different non-zero-sized statics cannot
// overlap or be deduplicated as per
// https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.intro
// (non-deduplication), and
// https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
// (non-overlapping).
if a_offset < a_info.size && b_offset < b_info.size {
0
} else {
// Otherwise, conservatively say we don't know.
// There are some cases we could still return `0` for, e.g.
// if the pointers being equal would require their statics to overlap
// one or more bytes, but for simplicity we currently only check
// strictly in-bounds pointers.
2
}
}
} else {
// All other cases we conservatively say we don't know.
//
// For comparing statics to non-statics, as per https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
// immutable statics can overlap with other kinds of allocations sometimes.
//
// FIXME: We could be more decisive for (non-zero-sized) mutable statics,
// which cannot overlap with other kinds of allocations.
//
// Functions and vtables can be duplicated and deduplicated, so we
// cannot be sure of runtime equality of pointers to the same one, or the
// runtime inequality of pointers to different ones (see e.g. #73722),
// so comparing those should return 2, whether they are the same allocation
// or not.
//
// `GlobalAlloc::TypeId` exists mostly to prevent consteval from comparing
// `TypeId`s, so comparing those should always return 2, whether they are the
// same allocation or not.
//
// FIXME: We could revisit comparing pointers into the same
// `GlobalAlloc::Memory` once https://github.com/rust-lang/rust/issues/128775
// is fixed (but they can be deduplicated, so comparing pointers into different
// ones should return 2).
2
}
}
// Other ways of comparing integers and pointers can never be known for sure.
(Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2,
// FIXME: return a `1` for when both sides are the same pointer, *except* that
// some things (like functions and vtables) do not have stable addresses
// so we need to be careful around them (see e.g. #73722).
// FIXME: return `0` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time.
// Examples include comparison of addresses in different static items.
(Scalar::Ptr(..), Scalar::Ptr(..)) => 2,
})
}
}
Expand Down
222 changes: 192 additions & 30 deletions tests/ui/consts/ptr_comparisons.rs
Original file line number Diff line number Diff line change
@@ -1,43 +1,205 @@
//@ compile-flags: --crate-type=lib
//@ check-pass
//@ edition: 2024
#![feature(const_raw_ptr_comparison)]
#![feature(fn_align)]
// Generally:
// For any `Some` return, `None` would also be valid, unless otherwise noted.
// For any `None` return, only `None` is valid, unless otherwise noted.

#![feature(
core_intrinsics,
const_raw_ptr_comparison,
)]
macro_rules! do_test {
($a:expr, $b:expr, $expected:pat) => {
const _: () = {
let a: *const _ = $a;
let b: *const _ = $b;
assert!(matches!(<*const u8>::guaranteed_eq(a.cast(), b.cast()), $expected));
};
};
}

const FOO: &usize = &42;
#[repr(align(2))]
struct T(#[allow(unused)] u16);

macro_rules! check {
(eq, $a:expr, $b:expr) => {
pub const _: () =
assert!(std::intrinsics::ptr_guaranteed_cmp($a as *const u8, $b as *const u8) == 1);
};
(ne, $a:expr, $b:expr) => {
pub const _: () =
assert!(std::intrinsics::ptr_guaranteed_cmp($a as *const u8, $b as *const u8) == 0);
#[repr(align(2))]
struct AlignedZst;

static A: T = T(42);
static B: T = T(42);
static mut MUT_STATIC: T = T(42);
static ZST: () = ();
static ALIGNED_ZST: AlignedZst = AlignedZst;
static LARGE_WORD_ALIGNED: [usize; 2] = [0, 1];
static mut MUT_LARGE_WORD_ALIGNED: [usize; 2] = [0, 1];

const FN_PTR: *const () = {
fn foo() {}
unsafe { std::mem::transmute(foo as fn()) }
};

const ALIGNED_FN_PTR: *const () = {
#[rustc_align(2)]
fn aligned_foo() {}
unsafe { std::mem::transmute(aligned_foo as fn()) }
};

trait Trait {
#[allow(unused)]
fn method(&self) -> u8;
}
impl Trait for u32 {
fn method(&self) -> u8 { 1 }
}
impl Trait for i32 {
fn method(&self) -> u8 { 2 }
}

const VTABLE_PTR_1: *const () = {
let [_data, vtable] = unsafe {
std::mem::transmute::<&dyn Trait, [*const (); 2]>(&42_u32 as &dyn Trait)
};
(!, $a:expr, $b:expr) => {
pub const _: () =
assert!(std::intrinsics::ptr_guaranteed_cmp($a as *const u8, $b as *const u8) == 2);
vtable
};
const VTABLE_PTR_2: *const () = {
let [_data, vtable] = unsafe {
std::mem::transmute::<&dyn Trait, [*const (); 2]>(&42_i32 as &dyn Trait)
};
}
vtable
};

check!(eq, 0, 0);
check!(ne, 0, 1);
check!(ne, FOO as *const _, 0);
check!(ne, unsafe { (FOO as *const usize).offset(1) }, 0);
check!(ne, unsafe { (FOO as *const usize as *const u8).offset(3) }, 0);
// Cannot be `None`: `is_null` is stable with strong guarantees about integer-valued pointers.
do_test!(0 as *const u8, 0 as *const u8, Some(true));
do_test!(0 as *const u8, 1 as *const u8, Some(false));

// We want pointers to be equal to themselves, but aren't checking this yet because
// there are some open questions (e.g. whether function pointers to the same function
// compare equal: they don't necessarily do at runtime).
check!(!, FOO as *const _, FOO as *const _);
// Integer-valued pointers can always be compared.
do_test!(1 as *const u8, 1 as *const u8, Some(true));
do_test!(1 as *const u8, 2 as *const u8, Some(false));

// Cannot be `None`: `static`s' addresses, references, (and within and one-past-the-end of those),
// and `fn` pointers cannot be null, and `is_null` is stable with strong guarantees, and
// `is_null` is implemented using `guaranteed_cmp`.
do_test!(&A, 0 as *const u8, Some(false));
do_test!((&raw const A).cast::<u8>().wrapping_add(1), 0 as *const u8, Some(false));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please also add a case that adds 11 or so to ensure we hit the alignment code path, not the inbounds code path.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added in edbafa2 (as size_of::<T>() + 1)

// This pointer is out-of-bounds, but still cannot be equal to 0 because of alignment.
do_test!((&raw const A).cast::<u8>().wrapping_add(size_of::<T>() + 1), 0 as *const u8, Some(false));

do_test!((&raw const A).wrapping_add(1), 0 as *const u8, Some(false));
do_test!(&ZST, 0 as *const u8, Some(false));
do_test!(&(), 0 as *const u8, Some(false));
do_test!(const { &() }, 0 as *const u8, Some(false));
do_test!(FN_PTR, 0 as *const u8, Some(false));

// This pointer is out-of-bounds, but still cannot be equal to 0 because of alignment.
do_test!((&raw const A).cast::<u8>().wrapping_add(size_of::<T>() + 1), 0 as *const u8, Some(false));

// aside from 0, these pointers might end up pretty much anywhere.
check!(!, FOO as *const _, 1); // this one could be `ne` by taking into account alignment
check!(!, FOO as *const _, 1024);
do_test!(&A, align_of::<T>() as *const u8, None);
do_test!((&raw const A).wrapping_byte_add(1), (align_of::<T>() + 1) as *const u8, None);

// except that they must still be aligned
do_test!(&A, 1 as *const u8, Some(false));
do_test!((&raw const A).wrapping_byte_add(1), align_of::<T>() as *const u8, Some(false));

// If `ptr.wrapping_sub(int)` cannot be null (because it is in-bounds or one-past-the-end of
// `ptr`'s allocation, or because it is misaligned from `ptr`'s allocation), then we know that
// `ptr != int`, even if `ptr` itself is out-of-bounds or one-past-the-end of its allocation.
do_test!((&raw const A).wrapping_byte_add(1), 1 as *const u8, Some(false));
do_test!((&raw const A).wrapping_byte_add(2), 2 as *const u8, Some(false));
do_test!((&raw const A).wrapping_byte_add(3), 1 as *const u8, Some(false));
do_test!((&raw const ZST).wrapping_byte_add(1), 1 as *const u8, Some(false));
do_test!(VTABLE_PTR_1.wrapping_byte_add(1), 1 as *const u8, Some(false));
do_test!(FN_PTR.wrapping_byte_add(1), 1 as *const u8, Some(false));
do_test!(&A, size_of::<T>().wrapping_neg() as *const u8, Some(false));
do_test!(&LARGE_WORD_ALIGNED, size_of::<usize>().wrapping_neg() as *const u8, Some(false));
// (`ptr - int != 0` due to misalignment)
do_test!((&raw const A).wrapping_byte_add(2), 1 as *const u8, Some(false));
do_test!((&raw const ALIGNED_ZST).wrapping_byte_add(2), 1 as *const u8, Some(false));

// When pointers go out-of-bounds, they *might* become null, so these comparions cannot work.
check!(!, unsafe { (FOO as *const usize).wrapping_add(2) }, 0);
check!(!, unsafe { (FOO as *const usize).wrapping_sub(1) }, 0);
do_test!((&raw const A).wrapping_add(2), 0 as *const u8, None);
do_test!((&raw const A).wrapping_sub(1), 0 as *const u8, None);

// Statics cannot be duplicated
do_test!(&A, &A, Some(true));

// Two non-ZST statics cannot have the same address
do_test!(&A, &B, Some(false));
do_test!(&A, &raw const MUT_STATIC, Some(false));

// One-past-the-end of one static can be equal to the address of another static.
do_test!(&A, (&raw const B).wrapping_add(1), None);

// Cannot know if ZST static is at the same address with anything non-null (if alignment allows).
do_test!(&A, &ZST, None);
do_test!(&A, &ALIGNED_ZST, None);

// Unclear if ZST statics can be placed "in the middle of" non-ZST statics.
// For now, we conservatively say they could, and return None here.
do_test!(&ZST, (&raw const A).wrapping_byte_add(1), None);

// As per https://doc.rust-lang.org/nightly/reference/items/static-items.html#r-items.static.storage-disjointness
// immutable statics are allowed to overlap with const items and promoteds.
do_test!(&A, &T(42), None);
do_test!(&A, const { &T(42) }, None);
do_test!(&A, { const X: T = T(42); &X }, None);

// These could return Some(false), since only immutable statics can overlap with const items
// and promoteds.
do_test!(&raw const MUT_STATIC, &T(42), None);
do_test!(&raw const MUT_STATIC, const { &T(42) }, None);
do_test!(&raw const MUT_STATIC, { const X: T = T(42); &X }, None);

// An odd offset from a 2-aligned allocation can never be equal to an even offset from a
// 2-aligned allocation, even if the offsets are out-of-bounds.
do_test!(&A, (&raw const B).wrapping_byte_add(1), Some(false));
do_test!(&A, (&raw const B).wrapping_byte_add(5), Some(false));
do_test!(&A, (&raw const ALIGNED_ZST).wrapping_byte_add(1), Some(false));
do_test!(&ALIGNED_ZST, (&raw const A).wrapping_byte_add(1), Some(false));
do_test!(&A, (&T(42) as *const T).wrapping_byte_add(1), Some(false));
do_test!(&A, (const { &T(42) } as *const T).wrapping_byte_add(1), Some(false));
do_test!(&A, ({ const X: T = T(42); &X } as *const T).wrapping_byte_add(1), Some(false));

// We could return `Some(false)` for these, as pointers to different statics can never be equal if
// that would require the statics to overlap, even if the pointers themselves are offset out of
// bounds or one-past-the-end. We currently only check strictly in-bounds pointers when comparing
// pointers to different statics, however.
do_test!((&raw const A).wrapping_add(1), (&raw const B).wrapping_add(1), None);
do_test!(
(&raw const LARGE_WORD_ALIGNED).cast::<usize>().wrapping_add(2),
(&raw const MUT_LARGE_WORD_ALIGNED).cast::<usize>().wrapping_add(1),
None
);

// Pointers into the same static are equal if and only if their offset is the same,
// even if either is out-of-bounds.
do_test!(&A, &A, Some(true));
do_test!(&A, &A.0, Some(true));
do_test!(&A, (&raw const A).wrapping_byte_add(1), Some(false));
do_test!(&A, (&raw const A).wrapping_byte_add(2), Some(false));
do_test!(&A, (&raw const A).wrapping_byte_add(51), Some(false));
do_test!((&raw const A).wrapping_byte_add(51), (&raw const A).wrapping_byte_add(51), Some(true));

// Pointers to the same fn may be unequal, since `fn`s can be duplicated.
do_test!(FN_PTR, FN_PTR, None);
do_test!(ALIGNED_FN_PTR, ALIGNED_FN_PTR, None);

// Pointers to different fns may be equal, since `fn`s can be deduplicated.
do_test!(FN_PTR, ALIGNED_FN_PTR, None);

// Pointers to the same vtable may be unequal, since vtables can be duplicated.
do_test!(VTABLE_PTR_1, VTABLE_PTR_1, None);

// Pointers to different vtables may be equal, since vtables can be deduplicated.
do_test!(VTABLE_PTR_1, VTABLE_PTR_2, None);

// Function pointers to aligned function allocations are not necessarily actually aligned,
// due to platform-specific semantics.
// See https://github.com/rust-lang/rust/issues/144661
// FIXME: This could return `Some` on platforms where function pointers' addresses actually
// correspond to function addresses including alignment, or on platforms where all functions
// are aligned to some amount (e.g. ARM where a32 function pointers are at least 4-aligned,
// and t32 function pointers are 2-aligned-offset-by-1).
do_test!(ALIGNED_FN_PTR, ALIGNED_FN_PTR.wrapping_byte_offset(1), None);

// Conservatively say we don't know.
do_test!(FN_PTR, VTABLE_PTR_1, None);
do_test!((&raw const LARGE_WORD_ALIGNED).cast::<usize>().wrapping_add(1), VTABLE_PTR_1, None);
do_test!((&raw const MUT_LARGE_WORD_ALIGNED).cast::<usize>().wrapping_add(1), VTABLE_PTR_1, None);
do_test!((&raw const LARGE_WORD_ALIGNED).cast::<usize>().wrapping_add(1), FN_PTR, None);
do_test!((&raw const MUT_LARGE_WORD_ALIGNED).cast::<usize>().wrapping_add(1), FN_PTR, None);
Loading