11//@ run-pass
22
3+ // We need some non-1 alignment to test we use the alignment of the type in the compiler.
34#[ repr( align( 4 ) ) ]
45struct Foo ;
56
67static FOO : Foo = Foo ;
78
9+ // This tests for regression of https://github.com/rust-lang/rust/issues/147516
10+ //
11+ // THe compiler will codegen `&Zst` without creating a real allocation, just a properly aligned
12+ // `usize` (i.e., ptr::dangling). However, code can add an arbitrary offset from that base
13+ // allocation. We confirm here that we correctly codegen that offset combined with the necessary
14+ // alignment of the base &() as a 1-ZST and &Foo as a 4-ZST.
15+ const A : * const ( ) = ( & ( ) as * const ( ) ) . wrapping_byte_add ( 2 ) ;
16+ const B : * const ( ) = ( & Foo as * const _ as * const ( ) ) . wrapping_byte_add ( usize:: MAX ) ;
17+ const C : * const ( ) = ( & Foo as * const _ as * const ( ) ) . wrapping_byte_add ( 2 ) ;
18+
819fn main ( ) {
920 // There's no stable guarantee that these are true.
1021 // However, we want them to be true so that our LLVM IR and runtime are a bit faster:
@@ -15,6 +26,13 @@ fn main() {
1526 let x: & ' static Foo = & Foo ;
1627 assert_eq ! ( x as * const Foo as usize , 4 ) ;
1728
29+ // * A 1-aligned ZST (1-ZST) is placed at 0x1. Then offsetting that by 2 results in 3.
30+ // * Foo is a 4-aligned ZST, so is placed at 0x4. +2 = 6
31+ // * Foo is a 4-aligned ZST, so is placed at 0x4. +usize::MAX = -1 (same bit pattern) = 3
32+ assert_eq ! ( A . addr( ) , 3 ) ;
33+ assert_eq ! ( B . addr( ) , 3 ) ;
34+ assert_eq ! ( C . addr( ) , 6 ) ;
35+
1836 // The exact addresses returned by these library functions are not necessarily stable guarantees
1937 // but for now we assert that we're still matching.
2038 #[ allow( dangling_pointers_from_temporaries) ]
0 commit comments