6767use crate :: cmp:: Ordering ;
6868use crate :: fmt;
6969use crate :: hash;
70- use crate :: intrinsics:: { self , abort, is_aligned_and_not_null, is_nonoverlapping } ;
70+ use crate :: intrinsics:: { self , abort, is_aligned_and_not_null} ;
7171use crate :: mem:: { self , MaybeUninit } ;
7272
7373#[ stable( feature = "rust1" , since = "1.0.0" ) ]
@@ -394,7 +394,8 @@ pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
394394/// ```
395395#[ inline]
396396#[ stable( feature = "rust1" , since = "1.0.0" ) ]
397- pub unsafe fn swap < T > ( x : * mut T , y : * mut T ) {
397+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
398+ pub const unsafe fn swap < T > ( x : * mut T , y : * mut T ) {
398399 // Give ourselves some scratch space to work with.
399400 // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
400401 let mut tmp = MaybeUninit :: < T > :: uninit ( ) ;
@@ -451,16 +452,8 @@ pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
451452/// ```
452453#[ inline]
453454#[ stable( feature = "swap_nonoverlapping" , since = "1.27.0" ) ]
454- pub unsafe fn swap_nonoverlapping < T > ( x : * mut T , y : * mut T , count : usize ) {
455- if cfg ! ( debug_assertions)
456- && !( is_aligned_and_not_null ( x)
457- && is_aligned_and_not_null ( y)
458- && is_nonoverlapping ( x, y, count) )
459- {
460- // Not panicking to keep codegen impact smaller.
461- abort ( ) ;
462- }
463-
455+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
456+ pub const unsafe fn swap_nonoverlapping < T > ( x : * mut T , y : * mut T , count : usize ) {
464457 let x = x as * mut u8 ;
465458 let y = y as * mut u8 ;
466459 let len = mem:: size_of :: < T > ( ) * count;
@@ -470,7 +463,8 @@ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
470463}
471464
472465#[ inline]
473- pub ( crate ) unsafe fn swap_nonoverlapping_one < T > ( x : * mut T , y : * mut T ) {
466+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
467+ pub ( crate ) const unsafe fn swap_nonoverlapping_one < T > ( x : * mut T , y : * mut T ) {
474468 // For types smaller than the block optimization below,
475469 // just swap directly to avoid pessimizing codegen.
476470 if mem:: size_of :: < T > ( ) < 32 {
@@ -488,7 +482,8 @@ pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
488482}
489483
490484#[ inline]
491- unsafe fn swap_nonoverlapping_bytes ( x : * mut u8 , y : * mut u8 , len : usize ) {
485+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
486+ const unsafe fn swap_nonoverlapping_bytes ( x : * mut u8 , y : * mut u8 , len : usize ) {
492487 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
493488 // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
494489 // Haswell E processors. LLVM is more able to optimize if we give a struct a
@@ -589,7 +584,8 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
589584/// ```
590585#[ inline]
591586#[ stable( feature = "rust1" , since = "1.0.0" ) ]
592- pub unsafe fn replace < T > ( dst : * mut T , mut src : T ) -> T {
587+ #[ rustc_const_unstable( feature = "const_replace" , issue = "83164" ) ]
588+ pub const unsafe fn replace < T > ( dst : * mut T , mut src : T ) -> T {
593589 // SAFETY: the caller must guarantee that `dst` is valid to be
594590 // cast to a mutable reference (valid for writes, aligned, initialized),
595591 // and cannot overlap `src` since `dst` must point to a distinct
0 commit comments