From b6540777fed3ca92a7c306be6c6ab4b074a033b2 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Tue, 6 Dec 2022 00:07:28 -0500 Subject: [PATCH 1/5] cg_llvm: remove pointee types and pointercast/bitcast-of-ptr --- compiler/rustc_codegen_llvm/src/abi.rs | 17 +--- compiler/rustc_codegen_llvm/src/allocator.rs | 2 +- compiler/rustc_codegen_llvm/src/back/write.rs | 7 +- compiler/rustc_codegen_llvm/src/base.rs | 3 +- compiler/rustc_codegen_llvm/src/builder.rs | 35 +++----- compiler/rustc_codegen_llvm/src/callee.rs | 36 +-------- compiler/rustc_codegen_llvm/src/common.rs | 12 +-- compiler/rustc_codegen_llvm/src/consts.rs | 16 ++-- compiler/rustc_codegen_llvm/src/context.rs | 54 +++++-------- .../rustc_codegen_llvm/src/debuginfo/gdb.rs | 3 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 81 +++++++------------ compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 2 +- compiler/rustc_codegen_llvm/src/type_.rs | 35 ++++---- compiler/rustc_codegen_llvm/src/type_of.rs | 39 ++------- compiler/rustc_codegen_llvm/src/va_arg.rs | 35 +++----- tests/ui/dyn-star/llvm-old-style-ptrs.rs | 23 ------ tests/ui/limits/issue-17913.stderr | 1 + 17 files changed, 118 insertions(+), 283 deletions(-) delete mode 100644 tests/ui/dyn-star/llvm-old-style-ptrs.rs diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index d221bad28ef96..e02b457fd0b19 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -216,9 +216,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx)); - let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); - bx.store(val, cast_dst, self.layout.align.abi); + bx.store(val, dst.llval, self.layout.align.abi); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -336,7 +334,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), PassMode::Cast(cast, _) => cast.llvm_type(cx), PassMode::Indirect { .. } => { - llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + llargument_tys.push(cx.type_ptr()); cx.type_void() } }; @@ -364,9 +362,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } cast.llvm_type(cx) } - PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { - cx.type_ptr_to(arg.memory_ty(cx)) - } + PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(), }; llargument_tys.push(llarg_ty); } @@ -379,12 +375,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - unsafe { - llvm::LLVMPointerType( - self.llvm_type(cx), - cx.data_layout().instruction_address_space.0 as c_uint, - ) - } + cx.type_ptr_ext(cx.data_layout().instruction_address_space) } fn llvm_cconv(&self) -> llvm::CallConv { diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index a57508815d6f8..ca123334fca3a 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -28,7 +28,7 @@ pub(crate) unsafe fn codegen( tws => bug!("Unsupported target word size for int: {}", tws), }; let i8 = llvm::LLVMInt8TypeInContext(llcx); - let i8p = llvm::LLVMPointerType(i8, 0); + let i8p = llvm::LLVMPointerTypeInContext(llcx, 0); let void = llvm::LLVMVoidTypeInContext(llcx); if kind == AllocatorKind::Default { diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 0f5e975445fae..2fba82c026e40 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -4,7 +4,6 @@ use crate::back::profiling::{ }; use crate::base; use crate::common; -use crate::consts; use crate::errors::{ CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode, }; @@ -992,7 +991,7 @@ fn create_msvc_imps( let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" }; unsafe { - let i8p_ty = Type::i8p_llcx(llcx); + let ptr_ty = Type::ptr_llcx(llcx); let globals = base::iter_globals(llmod) .filter(|&val| { llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage @@ -1012,8 +1011,8 @@ fn create_msvc_imps( .collect::>(); for (imp_name, val) in globals { - let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast()); - llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty)); + let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast()); + llvm::LLVMSetInitializer(imp, val); llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 5b5f81c032940..b659fd02eecf6 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen // happen after the llvm.used variables are created. for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMReplaceAllUsesWith(old_g, new_g); llvm::LLVMDeleteGlobal(old_g); } } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index d55992bf092f8..1576cfeba42a0 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -652,7 +652,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) -> &'ll Value { debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); - let ptr = self.check_store(val, ptr); + let ptr = self.check_store(ptr); unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); let align = @@ -682,7 +682,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { size: Size, ) { debug!("Store {:?} -> {:?}", val, ptr); - let ptr = self.check_store(val, ptr); + let ptr = self.check_store(ptr); unsafe { let store = llvm::LLVMRustBuildAtomicStore( self.llbuilder, @@ -873,8 +873,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemCpy( self.llbuilder, @@ -900,8 +898,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemMove( self.llbuilder, @@ -924,7 +920,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) { let is_volatile = flags.contains(MemFlags::VOLATILE); - let ptr = self.pointercast(ptr, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemSet( self.llbuilder, @@ -981,7 +976,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { - let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false); let landing_pad = self.landing_pad(ty, pers_fn, 0); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); @@ -990,14 +985,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { - let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false); let landing_pad = self.landing_pad(ty, pers_fn, 1); - self.add_clause(landing_pad, self.const_array(self.type_i8p(), &[])); + self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[])); (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) } fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) { - let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false); let mut exn = self.const_poison(ty); exn = self.insert_value(exn, exn0, 0); exn = self.insert_value(exn, exn1, 1); @@ -1161,7 +1156,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; let llty = self.cx.type_func( - &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], + &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], self.cx.type_void(), ); let args = &[fn_name, hash, num_counters, index]; @@ -1387,23 +1382,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchret") } - fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { + fn check_store(&mut self, ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = self.cx.val_ty(ptr); - let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - if dest_ptr_ty == stored_ptr_ty { - ptr - } else { - debug!( - "type mismatch in store. \ - Expected {:?}, got {:?}; inserting bitcast", - dest_ptr_ty, stored_ptr_ty - ); - self.bitcast(ptr, stored_ptr_ty) - } + ptr } fn check_call<'b>( @@ -1468,7 +1452,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return; } - let ptr = self.pointercast(ptr, self.cx.type_i8p()); self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index 5d3e245f45f08..36c098218cf3b 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -4,13 +4,11 @@ //! and methods are represented as just a fn ptr and not a full //! closure. -use crate::abi::FnAbiLlvmExt; use crate::attributes; use crate::common; use crate::context::CodegenCx; use crate::llvm; use crate::value::Value; -use rustc_codegen_ssa::traits::*; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; @@ -45,39 +43,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); let llfn = if let Some(llfn) = cx.get_declared_value(sym) { - // Create a fn pointer with the new signature. - let llptrty = fn_abi.ptr_to_llvm_type(cx); - - // This is subtle and surprising, but sometimes we have to bitcast - // the resulting fn pointer. The reason has to do with external - // functions. If you have two crates that both bind the same C - // library, they may not use precisely the same types: for - // example, they will probably each declare their own structs, - // which are distinct types from LLVM's point of view (nominal - // types). - // - // Now, if those two crates are linked into an application, and - // they contain inlined code, you can wind up with a situation - // where both of those functions wind up being loaded into this - // application simultaneously. In that case, the same function - // (from LLVM's point of view) requires two types. But of course - // LLVM won't allow one function to have two types. - // - // What we currently do, therefore, is declare the function with - // one of the two types (whichever happens to come first) and then - // bitcast as needed when the function is referenced to make sure - // it has the type we expect. - // - // This can occur on either a crate-local or crate-external - // reference. It also occurs when testing libcore and in some - // other weird situations. Annoying. - if cx.val_ty(llfn) != llptrty { - debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); - cx.const_ptrcast(llfn, llptrty) - } else { - debug!("get_fn: not casting pointer!"); - llfn - } + llfn } else { let instance_def_id = instance.def_id(); let llfn = if tcx.sess.target.arch == "x86" && diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 4d879fbdcb7a7..d5dc3abf31ed7 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -4,7 +4,6 @@ use crate::consts::{self, const_alloc_to_llvm}; pub use crate::context::CodegenCx; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_ast::Mutability; @@ -13,7 +12,6 @@ use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher}; use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::TyCtxt; use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType}; use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer}; @@ -211,11 +209,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }) .1; let len = s.len(); - let cs = consts::ptrcast( - str_global, - self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)), - ); - (cs, self.const_usize(len as u64)) + (str_global, self.const_usize(len as u64)) } fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value { @@ -292,7 +286,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let llval = unsafe { llvm::LLVMConstInBoundsGEP2( self.type_i8(), - self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)), + self.const_bitcast(base_addr, self.type_ptr_ext(base_addr_space)), &self.const_usize(offset.bytes()), 1, ) @@ -322,7 +316,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMConstInBoundsGEP2( self.type_i8(), - self.const_bitcast(base_addr, self.type_i8p()), + base_addr, &self.const_usize(offset.bytes()), 1, ) diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index df52f50f86f05..4ccf54cd96d8e 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -103,7 +103,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size), }, - cx.type_i8p_ext(address_space), + cx.type_ptr_ext(address_space), )); next_offset = offset + pointer_size; } @@ -179,7 +179,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( }) }); llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); - llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty)); + llvm::LLVMSetInitializer(g2, g1); g2 } } else if cx.tcx.sess.target.arch == "x86" && @@ -251,7 +251,7 @@ impl<'ll> CodegenCx<'ll, '_> { let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { let llty = self.layout_of(ty).llvm_type(self); if let Some(g) = self.get_declared_value(sym) { - if self.val_ty(g) != self.type_ptr_to(llty) { + if self.val_ty(g) != self.type_ptr() { span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); } } @@ -552,16 +552,14 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { } } - /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. + /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr. fn add_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; - self.used_statics.borrow_mut().push(cast); + self.used_statics.borrow_mut().push(global); } /// Add a global value to a list to be stored in the `llvm.compiler.used` variable, - /// an array of i8*. + /// an array of ptr. fn add_compiler_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; - self.compiler_used_statics.borrow_mut().push(cast); + self.compiler_used_statics.borrow_mut().push(global); } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 5bf641055c564..cb093996d1d09 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -59,17 +59,6 @@ pub struct CodegenCx<'ll, 'tcx> { /// Cache of constant strings, pub const_str_cache: RefCell>, - /// Reverse-direction for const ptrs cast from globals. - /// - /// Key is a Value holding a `*T`, - /// Val is a Value holding a `*[T]`. - /// - /// Needed because LLVM loses pointer->pointee association - /// when we ptrcast, and we have to ptrcast during codegen - /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not - /// a pointer to an LLVM array type. Similar for trait objects. - pub const_unsized: RefCell>, - /// Cache of emitted const globals (value -> global) pub const_globals: RefCell>, @@ -464,7 +453,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { instances: Default::default(), vtables: Default::default(), const_str_cache: Default::default(), - const_unsized: Default::default(), const_globals: Default::default(), statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), @@ -495,7 +483,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { let section = cstr!("llvm.metadata"); - let array = self.const_array(self.type_ptr_to(self.type_i8()), values); + let array = self.const_array(self.type_ptr(), values); unsafe { let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); @@ -673,7 +661,7 @@ impl<'ll> CodegenCx<'ll, '_> { ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false)) } - let i8p = self.type_i8p(); + let ptr = self.type_ptr(); let void = self.type_void(); let i1 = self.type_i1(); let t_i8 = self.type_i8(); @@ -687,7 +675,7 @@ impl<'ll> CodegenCx<'ll, '_> { let t_metadata = self.type_metadata(); let t_token = self.type_token(); - ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p); + ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr); ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32); ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32); @@ -723,7 +711,7 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.trap", fn() -> void); ifn!("llvm.debugtrap", fn() -> void); - ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + ifn!("llvm.frameaddress", fn(t_i32) -> ptr); ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); @@ -890,43 +878,43 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64); ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128); - ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void); - ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void); + ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void); + ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void); ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32); ifn!("llvm.localescape", fn(...) -> void); - ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); - ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr); + ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr); ifn!("llvm.assume", fn(i1) -> void); - ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void); // This isn't an "LLVM intrinsic", but LLVM's optimization passes // recognize it like one and we assume it exists in `core::slice::cmp` match self.sess().target.arch.as_ref() { - "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16), - _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32), + "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16), + _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32), } // variadic intrinsics - ifn!("llvm.va_start", fn(i8p) -> void); - ifn!("llvm.va_end", fn(i8p) -> void); - ifn!("llvm.va_copy", fn(i8p, i8p) -> void); + ifn!("llvm.va_start", fn(ptr) -> void); + ifn!("llvm.va_end", fn(ptr) -> void); + ifn!("llvm.va_copy", fn(ptr, ptr) -> void); if self.sess().instrument_coverage() { - ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void); + ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void); } - ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1); - ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1}); + ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1); + ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1}); if self.sess().opts.debuginfo != DebugInfo::None { ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void); ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void); } - ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p); + ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr); None } @@ -940,12 +928,10 @@ impl<'ll> CodegenCx<'ll, '_> { let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() { Some(def_id) => self.get_static(def_id), _ => { - let ty = self - .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false); self.declare_global("rust_eh_catch_typeinfo", ty) } }; - let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p()); self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo)); eh_catch_typeinfo } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 37f30917609ae..e4eed0a89feb2 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -17,8 +17,7 @@ use rustc_span::symbol::sym; /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { - let gdb_debug_scripts_section = - bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p()); + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 6df1b708ccd45..55f2808864077 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -167,7 +167,6 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let ptr = args[0].immediate(); let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let llty = ty.llvm_type(self); - let ptr = self.pointercast(ptr, self.type_ptr_to(llty)); self.volatile_load(llty, ptr) } else { self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr) @@ -317,18 +316,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.const_bool(true) } else if use_integer_compare { let integer_ty = self.type_ix(layout.size().bits()); - let ptr_ty = self.type_ptr_to(integer_ty); - let a_ptr = self.bitcast(a, ptr_ty); - let a_val = self.load(integer_ty, a_ptr, layout.align().abi); - let b_ptr = self.bitcast(b, ptr_ty); - let b_val = self.load(integer_ty, b_ptr, layout.align().abi); + let a_val = self.load(integer_ty, a, layout.align().abi); + let b_val = self.load(integer_ty, b, layout.align().abi); self.icmp(IntPredicate::IntEQ, a_val, b_val) } else { - let i8p_ty = self.type_i8p(); - let a_ptr = self.bitcast(a, i8p_ty); - let b_ptr = self.bitcast(b, i8p_ty); let n = self.const_usize(layout.size().bytes()); - let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]); + let cmp = self.call_intrinsic("memcmp", &[a, b, n]); match self.cx.sess().target.arch.as_ref() { "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)), _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)), @@ -383,10 +376,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { - let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); - let ptr = self.pointercast(result.llval, ptr_llty); - self.store(llval, ptr, result.align); + if let PassMode::Cast(_, _) = &fn_abi.ret.mode { + self.store(llval, result.llval, result.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -410,9 +401,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value { // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time // optimization pass replaces calls to this intrinsic with code to test type membership. - let i8p_ty = self.type_i8p(); - let bitcast = self.bitcast(pointer, i8p_ty); - self.call_intrinsic("llvm.type.test", &[bitcast, typeid]) + self.call_intrinsic("llvm.type.test", &[pointer, typeid]) } fn type_checked_load( @@ -444,7 +433,7 @@ fn try_intrinsic<'ll>( dest: &'ll Value, ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.call(try_func_ty, None, None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. @@ -544,8 +533,8 @@ fn codegen_msvc_try<'ll>( // // More information can be found in libstd's seh.rs implementation. let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(bx.type_i8p(), ptr_align); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let slot = bx.alloca(bx.type_ptr(), ptr_align); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); bx.switch_to_block(normal); @@ -568,10 +557,10 @@ fn codegen_msvc_try<'ll>( // // When modifying, make sure that the type_name string exactly matches // the one used in library/panic_unwind/src/seh.rs. - let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); + let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr()); let type_name = bx.const_bytes(b"rust_panic\0"); let type_info = - bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); + bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false); let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); unsafe { llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); @@ -588,15 +577,15 @@ fn codegen_msvc_try<'ll>( bx.switch_to_block(catchpad_rust); let flags = bx.const_i32(8); let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); - let ptr = bx.load(bx.type_i8p(), slot, ptr_align); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let ptr = bx.load(bx.type_ptr(), slot, ptr_align); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); bx.catch_ret(&funclet, caught); // The flag value of 64 indicates a "catch-all". bx.switch_to_block(catchpad_foreign); let flags = bx.const_i32(64); - let null = bx.const_null(bx.type_i8p()); + let null = bx.const_null(bx.type_ptr()); let funclet = bx.catch_pad(cs, &[null, flags, null]); bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet)); bx.catch_ret(&funclet, caught); @@ -655,7 +644,7 @@ fn codegen_wasm_try<'ll>( // ret i32 1 // } // - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); bx.switch_to_block(normal); @@ -665,13 +654,13 @@ fn codegen_wasm_try<'ll>( let cs = bx.catch_switch(None, None, &[catchpad]); bx.switch_to_block(catchpad); - let null = bx.const_null(bx.type_i8p()); + let null = bx.const_null(bx.type_ptr()); let funclet = bx.catch_pad(cs, &[null]); let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]); let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); bx.catch_ret(&funclet, caught); @@ -723,7 +712,7 @@ fn codegen_gnu_try<'ll>( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); @@ -736,12 +725,12 @@ fn codegen_gnu_try<'ll>( // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. bx.switch_to_block(catch); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1); - let tydesc = bx.const_null(bx.type_i8p()); + let tydesc = bx.const_null(bx.type_ptr()); bx.add_clause(vals, tydesc); let ptr = bx.extract_value(vals, 0); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, ptr], None); bx.ret(bx.const_i32(1)); }); @@ -787,7 +776,7 @@ fn codegen_emcc_try<'ll>( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); @@ -800,10 +789,10 @@ fn codegen_emcc_try<'ll>( // the landing pad clauses the exception's type had been matched to. bx.switch_to_block(catch); let tydesc = bx.eh_catch_typeinfo(); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2); bx.add_clause(vals, tydesc); - bx.add_clause(vals, bx.const_null(bx.type_i8p())); + bx.add_clause(vals, bx.const_null(bx.type_ptr())); let ptr = bx.extract_value(vals, 0); let selector = bx.extract_value(vals, 1); @@ -816,7 +805,7 @@ fn codegen_emcc_try<'ll>( // create an alloca and pass a pointer to that. let ptr_align = bx.tcx().data_layout.pointer_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi; - let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false); + let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false); let catch_data = bx.alloca(catch_data_type, ptr_align); let catch_data_0 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]); @@ -824,9 +813,8 @@ fn codegen_emcc_try<'ll>( let catch_data_1 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]); bx.store(is_rust_panic, catch_data_1, i8_align); - let catch_data = bx.bitcast(catch_data, bx.type_i8p()); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None); bx.ret(bx.const_i32(1)); }); @@ -967,8 +955,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); - bx.load(int_ty, ptr, Align::ONE) + bx.load(int_ty, place.llval, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, @@ -1217,7 +1204,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); bx.store(ze, ptr, Align::ONE); let array_ty = bx.type_array(bx.type_i8(), expected_bytes); - let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty)); return Ok(bx.load(array_ty, ptr, Align::ONE)); } _ => return_error!(InvalidMonomorphization::CannotReturn { @@ -1352,7 +1338,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64, - mut no_pointers: usize, + no_pointers: usize, ) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match *elem_ty.kind() { @@ -1361,9 +1347,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ty::Float(v) => cx.type_float_from_ty(v), _ => unreachable!(), }; - while no_pointers > 0 { - elem_ty = cx.type_ptr_to(elem_ty); - no_pointers -= 1; + if no_pointers > 0 { + elem_ty = cx.type_ptr(); } cx.type_vector(elem_ty, vec_len) } @@ -1857,11 +1842,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } } - if in_elem == out_elem { - return Ok(args[0].immediate()); - } else { - return Ok(bx.pointercast(args[0].immediate(), llret_ty)); - } + return Ok(args[0].immediate()); } if name == sym::simd_expose_addr { diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 5fe6407ac4fe8..7407cfa8c5b2f 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1073,7 +1073,7 @@ extern "C" { // Operations on array, pointer, and vector types (sequence types) pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; - pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type; + pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type; pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; pub fn LLVMGetElementType(Ty: &Type) -> &Type; diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 7e672a8dc3362..ef25af59db729 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -69,6 +69,14 @@ impl<'ll> CodegenCx<'ll, '_> { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } + pub(crate) fn type_ptr(&self) -> &'ll Type { + self.type_ptr_ext(AddressSpace::DATA) + } + + pub(crate) fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { + unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) } + } + pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { let n_args = llvm::LLVMCountParamTypes(ty) as usize; @@ -112,12 +120,6 @@ impl<'ll> CodegenCx<'ll, '_> { } } - pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { @@ -189,17 +191,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } } - fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { - assert_ne!( - self.type_kind(ty), - TypeKind::Function, - "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense" - ); - ty.ptr_to(AddressSpace::DATA) + fn type_ptr_to(&self, _ty: &'ll Type) -> &'ll Type { + self.type_ptr() } - fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { - ty.ptr_to(address_space) + fn type_ptr_to_ext(&self, _ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { + self.type_ptr_ext(address_space) } fn element_type(&self, ty: &'ll Type) -> &'ll Type { @@ -247,12 +244,8 @@ impl Type { unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } } - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA) - } - - fn ptr_to(&self, address_space: AddressSpace) -> &Type { - unsafe { llvm::LLVMPointerType(self, address_space.0) } + pub fn ptr_llcx(llcx: &llvm::Context) -> &Type { + unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) } } } diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 2dbd467cc84c3..ac18fdfe07fb0 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>( match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), Abi::Vector { element, count } => { - let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); + let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } Abi::ScalarPair(..) => { @@ -179,12 +179,7 @@ pub trait LayoutLlvmExt<'tcx> { fn is_llvm_scalar_pair(&self) -> bool; fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; - fn scalar_llvm_type_at<'a>( - &self, - cx: &CodegenCx<'a, 'tcx>, - scalar: Scalar, - offset: Size, - ) -> &'a Type; + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type; fn scalar_pair_element_llvm_type<'a>( &self, cx: &CodegenCx<'a, 'tcx>, @@ -230,16 +225,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { return llty; } let llty = match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) - } + ty::Ref(..) | ty::RawPtr(ty::TypeAndMut { .. }) => cx.type_ptr(), + ty::Adt(def, _) if def.is_box() => cx.type_ptr(), ty::FnPtr(sig) => { cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) } - _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO), + _ => self.scalar_llvm_type_at(cx, scalar), }; cx.scalar_lltypes.borrow_mut().insert(self.ty, llty); return llty; @@ -300,25 +291,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { self.llvm_type(cx) } - fn scalar_llvm_type_at<'a>( - &self, - cx: &CodegenCx<'a, 'tcx>, - scalar: Scalar, - offset: Size, - ) -> &'a Type { + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type { match scalar.primitive() { Int(i, _) => cx.type_from_integer(i), F32 => cx.type_f32(), F64 => cx.type_f64(), - Pointer(address_space) => { - // If we know the alignment, pick something better than i8. - let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - cx.type_pointee_for_align(pointee.align) - } else { - cx.type_i8() - }; - cx.type_ptr_to_ext(pointee, address_space) - } + Pointer(address_space) => cx.type_ptr_ext(address_space), } } @@ -364,8 +342,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { return cx.type_i1(); } - let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) }; - self.scalar_llvm_type_at(cx, scalar, offset) + self.scalar_llvm_type_at(cx, scalar) } fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 { diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 8800caa71d6c7..172c66a7af13c 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -5,7 +5,7 @@ use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::{ common::IntPredicate, - traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}, + traits::{BaseTypeMethods, BuilderMethods, ConstMethods}, }; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::Ty; @@ -26,24 +26,18 @@ fn round_pointer_up_to_alignment<'ll>( fn emit_direct_ptr_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, - llty: &'ll Type, size: Size, align: Align, slot_size: Align, allow_higher_align: bool, ) -> (&'ll Value, Align) { - let va_list_ty = bx.type_i8p(); - let va_list_ptr_ty = bx.type_ptr_to(va_list_ty); - let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty { - bx.bitcast(list.immediate(), va_list_ptr_ty) - } else { - list.immediate() - }; + let va_list_ty = bx.type_ptr(); + let va_list_addr = list.immediate(); let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); let (addr, addr_align) = if allow_higher_align && align > slot_size { - (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) + (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align) } else { (ptr, slot_size) }; @@ -56,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]); - (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) + (adjusted, addr_align) } else { - (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) + (addr, addr_align) } } @@ -81,7 +75,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>( (layout.llvm_type(bx.cx), layout.size, layout.align) }; let (addr, addr_align) = - emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); + emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align); if indirect { let tmp_ret = bx.load(llty, addr, addr_align); bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi) @@ -146,7 +140,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( bx.cond_br(use_stack, on_stack, in_reg); bx.switch_to_block(in_reg); - let top_type = bx.type_i8p(); + let top_type = bx.type_ptr(); let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index); let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); @@ -158,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]); } let reg_type = layout.llvm_type(bx); - let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type)); let reg_value = bx.load(reg_type, reg_addr, layout.align.abi); bx.br(end); @@ -218,7 +211,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( // Work out the address of the value in the register save area. let reg_ptr = bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3)); - let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi); + let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi); let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8)); let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding)); let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]); @@ -234,7 +227,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( // Work out the address of the value in the argument overflow area. let arg_ptr = bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2)); - let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi); + let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi); let arg_off = bx.const_u64(padding); let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]); @@ -246,14 +239,12 @@ fn emit_s390x_va_arg<'ll, 'tcx>( // Return the appropriate result. bx.switch_to_block(end); - let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]); + let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); let val_type = layout.llvm_type(bx); let val_addr = if indirect { - let ptr_type = bx.cx.type_ptr_to(val_type); - let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type)); - bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi) + bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi) } else { - bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type)) + val_addr }; bx.load(val_type, val_addr, layout.align.abi) } diff --git a/tests/ui/dyn-star/llvm-old-style-ptrs.rs b/tests/ui/dyn-star/llvm-old-style-ptrs.rs deleted file mode 100644 index 460af99f9c508..0000000000000 --- a/tests/ui/dyn-star/llvm-old-style-ptrs.rs +++ /dev/null @@ -1,23 +0,0 @@ -// run-pass -// compile-flags: -Copt-level=0 -Cllvm-args=-opaque-pointers=0 - -// (the ability to disable opaque pointers has been removed in LLVM 17) -// ignore-llvm-version: 17 - 99 - -// This test can be removed once non-opaque pointers are gone from LLVM, maybe. - -#![feature(dyn_star, pointer_like_trait)] -#![allow(incomplete_features)] - -use std::fmt::Debug; -use std::marker::PointerLike; - -fn make_dyn_star<'a>(t: impl PointerLike + Debug + 'a) -> dyn* Debug + 'a { - t as _ -} - -fn main() { - println!("{:?}", make_dyn_star(Box::new(1i32))); - println!("{:?}", make_dyn_star(2usize)); - println!("{:?}", make_dyn_star((3usize,))); -} diff --git a/tests/ui/limits/issue-17913.stderr b/tests/ui/limits/issue-17913.stderr index 684db53a91909..0d21a42883ebb 100644 --- a/tests/ui/limits/issue-17913.stderr +++ b/tests/ui/limits/issue-17913.stderr @@ -1,4 +1,5 @@ error: values of the type `[&usize; usize::MAX]` are too big for the current architecture + --> $SRC_DIR/alloc/src/boxed.rs:LL:COL error: aborting due to previous error From 04303cfb3af3a8019ef913b1a07a94cfb683a5ab Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Fri, 28 Jul 2023 20:24:33 -0400 Subject: [PATCH 2/5] cg_ssa: remove pointee types and pointercast/bitcast-of-ptr --- compiler/rustc_codegen_gcc/src/builder.rs | 1 - compiler/rustc_codegen_gcc/src/common.rs | 8 +-- .../rustc_codegen_gcc/src/intrinsic/mod.rs | 2 +- compiler/rustc_codegen_gcc/src/type_.rs | 26 ++++++-- compiler/rustc_codegen_llvm/src/common.rs | 6 +- compiler/rustc_codegen_llvm/src/consts.rs | 4 -- compiler/rustc_codegen_llvm/src/type_.rs | 16 ++--- compiler/rustc_codegen_ssa/src/base.rs | 62 ++++--------------- compiler/rustc_codegen_ssa/src/meth.rs | 4 +- compiler/rustc_codegen_ssa/src/mir/block.rs | 12 ++-- .../rustc_codegen_ssa/src/mir/intrinsic.rs | 24 +++---- compiler/rustc_codegen_ssa/src/mir/operand.rs | 33 ++-------- compiler/rustc_codegen_ssa/src/mir/place.rs | 39 ++---------- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 26 ++------ .../rustc_codegen_ssa/src/traits/consts.rs | 1 - .../rustc_codegen_ssa/src/traits/type_.rs | 12 +--- 16 files changed, 75 insertions(+), 201 deletions(-) diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 43d0aafbd50bf..0b1f2fe6a87d9 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -27,7 +27,6 @@ use rustc_codegen_ssa::traits::{ BaseTypeMethods, BuilderMethods, ConstMethods, - DerivedTypeMethods, LayoutTypeMethods, HasCodegen, OverflowOp, diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index b62f4676f70b6..5f54cb16d8e2b 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -16,6 +16,10 @@ use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { + self.context.new_cast(None, val, ty) + } + pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> { bytes_in_context(self, bytes) } @@ -242,10 +246,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { const_alloc_to_gcc(self, alloc) } - fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { - self.context.new_cast(None, val, ty) - } - fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { if value.get_type() == self.bool_type.make_pointer() { if let Some(pointee) = typ.get_pointee() { diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index b75546447e321..68edde1382941 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -12,7 +12,7 @@ use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; #[cfg(feature="master")] -use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods}; +use rustc_codegen_ssa::traits::MiscMethods; use rustc_codegen_ssa::errors::InvalidMonomorphization; use rustc_middle::bug; use rustc_middle::ty::{self, Instance, Ty}; diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs index 521b64ad34d15..31899740514ae 100644 --- a/compiler/rustc_codegen_gcc/src/type_.rs +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -54,6 +54,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.u128_type } + pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { + ty.make_pointer() + } + + pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { + // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? + ty.make_pointer() + } + + pub fn type_i8p(&self) -> Type<'gcc> { + self.type_ptr_to(self.type_i8()) + } + + pub fn type_i8p_ext(&self, address_space: AddressSpace) -> Type<'gcc> { + self.type_ptr_to_ext(self.type_i8(), address_space) + } + pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> { // FIXME(eddyb) We could find a better approximation if ity.align < align. let ity = Integer::approximate_align(self, align); @@ -149,13 +166,12 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } - fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { - ty.make_pointer() + fn type_ptr(&self) -> Type<'gcc> { + self.type_ptr_to(self.type_void()) } - fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { - // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? - ty.make_pointer() + fn type_ptr_ext(&self, address_space: AddressSpace) -> Type<'gcc> { + self.type_ptr_to_ext(self.type_void(), address_space) } fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> { diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index d5dc3abf31ed7..620526481b25b 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -1,6 +1,6 @@ //! Code that is useful in various codegen modules. -use crate::consts::{self, const_alloc_to_llvm}; +use crate::consts::const_alloc_to_llvm; pub use crate::context::CodegenCx; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; use crate::type_::Type; @@ -304,10 +304,6 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { const_alloc_to_llvm(self, alloc) } - fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { - consts::ptrcast(val, ty) - } - fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { self.const_bitcast(val, ty) } diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 4ccf54cd96d8e..5d3656f1ef243 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -193,10 +193,6 @@ fn check_and_apply_linkage<'ll, 'tcx>( } } -pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { llvm::LLVMConstPointerCast(val, ty) } -} - impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstBitCast(val, ty) } diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index ef25af59db729..8db6195d931f0 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -69,14 +69,6 @@ impl<'ll> CodegenCx<'ll, '_> { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - pub(crate) fn type_ptr(&self) -> &'ll Type { - self.type_ptr_ext(AddressSpace::DATA) - } - - pub(crate) fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { - unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) } - } - pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { let n_args = llvm::LLVMCountParamTypes(ty) as usize; @@ -191,12 +183,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } } - fn type_ptr_to(&self, _ty: &'ll Type) -> &'ll Type { - self.type_ptr() + fn type_ptr(&self) -> &'ll Type { + self.type_ptr_ext(AddressSpace::DATA) } - fn type_ptr_to_ext(&self, _ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { - self.type_ptr_ext(address_space) + fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { + unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) } } fn element_type(&self, ty: &'ll Type) -> &'ll Type { diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 79c284ecfbfa0..f4d309c6e9f1c 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -165,50 +165,27 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target)); if let Some(entry_idx) = vptr_entry_idx { - let ptr_ty = cx.type_i8p(); + let ptr_ty = cx.type_ptr(); let ptr_align = cx.tcx().data_layout.pointer_align.abi; - let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind); - let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty)); let gep = bx.inbounds_gep( ptr_ty, - llvtable, + old_info, &[bx.const_usize(u64::try_from(entry_idx).unwrap())], ); let new_vptr = bx.load(ptr_ty, gep, ptr_align); bx.nonnull_metadata(new_vptr); // VTable loads are invariant. bx.set_invariant_load(new_vptr); - bx.pointercast(new_vptr, vtable_ptr_ty) + new_vptr } else { old_info } } - (_, &ty::Dynamic(ref data, _, target_dyn_kind)) => { - let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind); - cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty) - } + (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()), _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target), } } -// Returns the vtable pointer type of a `dyn` or `dyn*` type -fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>( - cx: &Cx, - target: Ty<'tcx>, - kind: ty::DynKind, -) -> ::Type { - cx.scalar_pair_element_backend_type( - cx.layout_of(match kind { - // vtable is the second field of `*mut dyn Trait` - ty::Dyn => Ty::new_mut_ptr(cx.tcx(), target), - // vtable is the second field of `dyn* Trait` - ty::DynStar => target, - }), - 1, - true, - ) -} - /// Coerces `src` to `dst_ty`. `src_ty` must be a pointer. pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, @@ -222,8 +199,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { assert_eq!(bx.cx().type_is_sized(a), old_info.is_none()); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); - (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info)) + (src, unsized_info(bx, a, b, old_info)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { assert_eq!(def_a, def_b); @@ -248,11 +224,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( assert_eq!(result, None); result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info)); } - let (lldata, llextra) = result.unwrap(); - let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true); - let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty)) + result.unwrap() } _ => bug!("unsize_ptr: called on bad types"), } @@ -271,11 +243,9 @@ pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)), "destination type must be a dyn*" ); - // FIXME(dyn-star): We can remove this when all supported LLVMs use opaque ptrs only. - let unit_ptr = bx.cx().type_ptr_to(bx.cx().type_struct(&[], false)); let src = match bx.cx().type_kind(bx.cx().backend_type(src_ty_and_layout)) { - TypeKind::Pointer => bx.pointercast(src, unit_ptr), - TypeKind::Integer => bx.inttoptr(src, unit_ptr), + TypeKind::Pointer => src, + TypeKind::Integer => bx.inttoptr(src, bx.type_ptr()), // FIXME(dyn-star): We probably have to do a bitcast first, then inttoptr. kind => bug!("unexpected TypeKind for left-hand side of `dyn*` cast: {kind:?}"), }; @@ -398,11 +368,6 @@ pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( if flags == MemFlags::empty() && let Some(bty) = bx.cx().scalar_copy_backend_type(layout) { - // I look forward to only supporting opaque pointers - let pty = bx.type_ptr_to(bty); - let src = bx.pointercast(src, pty); - let dst = bx.pointercast(dst, pty); - let temp = bx.load(bty, src, src_align); bx.store(temp, dst, dst_align); } else { @@ -456,7 +421,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, // depending on whether the target needs `argc` and `argv` to be passed in. let llfty = if cx.sess().target.main_needs_argc_argv { - cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()) + cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int()) } else { cx.type_func(&[], cx.type_int()) }; @@ -490,7 +455,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx.insert_reference_to_gdb_debug_scripts_section_global(); let isize_ty = cx.type_isize(); - let i8pp_ty = cx.type_ptr_to(cx.type_i8p()); + let ptr_ty = cx.type_ptr(); let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx); let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type { @@ -509,12 +474,11 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let i8_ty = cx.type_i8(); let arg_sigpipe = bx.const_u8(sigpipe); - let start_ty = - cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty); + let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, ptr_ty, i8_ty], isize_ty); (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe]) } else { debug!("using user-defined start fn"); - let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty); + let start_ty = cx.type_func(&[isize_ty, ptr_ty], isize_ty); (rust_main, start_ty, vec![arg_argc, arg_argv]) }; @@ -541,7 +505,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } else { // The Rust start function doesn't need `argc` and `argv`, so just pass zeros. let arg_argc = bx.const_int(cx.type_int(), 0); - let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p())); + let arg_argv = bx.const_null(cx.type_ptr()); (arg_argc, arg_argv) } } diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index 9abe7b25d0e93..12146a54d3b92 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -23,7 +23,6 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_fn({llvtable:?}, {ty:?}, {self:?})"); let llty = bx.fn_ptr_backend_type(fn_abi); - let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); if bx.cx().sess().opts.unstable_opts.virtual_function_elimination && bx.cx().sess().lto() == Lto::Fat @@ -33,7 +32,7 @@ impl<'a, 'tcx> VirtualIndex { .unwrap(); let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes(); let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid); - bx.pointercast(func, llty) + func } else { let ptr_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); @@ -54,7 +53,6 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_int({:?}, {:?})", llvtable, self); let llty = bx.type_isize(); - let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); let usize_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); let ptr = bx.load(llty, gep, usize_align); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index ed608bdbe9a8a..7a3982b5d4cc8 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -439,8 +439,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"), }; let ty = bx.cast_backend_type(cast_ty); - let addr = bx.pointercast(llslot, bx.type_ptr_to(ty)); - bx.load(ty, addr, self.fn_abi.ret.layout.align.abi) + bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi) } }; bx.ret(llval); @@ -853,9 +852,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Some(intrinsic) => { let dest = match ret_dest { _ if fn_abi.ret.is_indirect() => llargs[0], - ReturnDest::Nothing => { - bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret))) - } + ReturnDest::Nothing => bx.const_undef(bx.type_ptr()), ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, ReturnDest::DirectOperand(_) => { bug!("Cannot use direct operand with an intrinsic call") @@ -1428,8 +1425,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty, _) = &arg.mode { let llty = bx.cast_backend_type(ty); - let addr = bx.pointercast(llval, bx.type_ptr_to(llty)); - llval = bx.load(llty, addr, align.min(arg.layout.align.abi)); + llval = bx.load(llty, llval, align.min(arg.layout.align.abi)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -1634,7 +1630,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // represents that this is a catch-all block. bx = Bx::build(self.cx, cp_llbb); let null = - bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space)); + bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space)); let sixty_four = bx.const_i32(64); funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null])); } else { diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 18201abd63180..8821fb21fd002 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -270,7 +270,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::const_allocate => { // returns a null pointer at runtime. - bx.const_null(bx.type_i8p()) + bx.const_null(bx.type_ptr()) } sym::const_deallocate => { @@ -310,14 +310,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = fn_args.type_at(0); if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { let weak = instruction == "cxchgweak"; - let mut dst = args[0].immediate(); + let dst = args[0].immediate(); let mut cmp = args[1].immediate(); let mut src = args[2].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr_to(bx.type_isize()); - dst = bx.pointercast(dst, ptr_llty); cmp = bx.ptrtoint(cmp, bx.type_isize()); src = bx.ptrtoint(src, bx.type_isize()); } @@ -342,13 +340,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { let layout = bx.layout_of(ty); let size = layout.size; - let mut source = args[0].immediate(); + let source = args[0].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first... let llty = bx.type_isize(); - let ptr_llty = bx.type_ptr_to(llty); - source = bx.pointercast(source, ptr_llty); let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size); // ... and then cast the result back to a pointer bx.inttoptr(result, bx.backend_type(layout)) @@ -365,12 +361,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { let size = bx.layout_of(ty).size; let mut val = args[1].immediate(); - let mut ptr = args[0].immediate(); + let ptr = args[0].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr_to(bx.type_isize()); - ptr = bx.pointercast(ptr, ptr_llty); val = bx.ptrtoint(val, bx.type_isize()); } bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size); @@ -409,13 +403,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = fn_args.type_at(0); if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { - let mut ptr = args[0].immediate(); + let ptr = args[0].immediate(); let mut val = args[1].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr_to(bx.type_isize()); - ptr = bx.pointercast(ptr, ptr_llty); val = bx.ptrtoint(val, bx.type_isize()); } bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) @@ -470,10 +462,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { - let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty)); - let ptr = bx.pointercast(result.llval, ptr_llty); - bx.store(llval, ptr, result.align); + if let PassMode::Cast(..) = &fn_abi.ret.mode { + bx.store(llval, result.llval, result.align); } else { OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) .val diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 31c293d7c298e..f90d1a0fc9cda 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -2,7 +2,6 @@ use super::place::PlaceRef; use super::{FunctionCx, LocalRef}; use crate::base; -use crate::common::TypeKind; use crate::glue; use crate::traits::*; use crate::MemFlags; @@ -132,7 +131,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { ) -> Self { let alloc_align = alloc.inner().align; assert_eq!(alloc_align, layout.align.abi); - let ty = bx.type_ptr_to(bx.cx().backend_type(layout)); let read_scalar = |start, size, s: abi::Scalar, ty| { let val = alloc @@ -156,7 +154,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { let size = s.size(bx); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); - let val = read_scalar(Size::ZERO, size, s, ty); + let val = read_scalar(Size::ZERO, size, s, bx.type_ptr()); OperandRef { val: OperandValue::Immediate(val), layout } } Abi::ScalarPair( @@ -187,7 +185,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let base_addr = bx.static_addr_of(init, alloc_align, None); let llval = bx.const_ptr_byte_offset(base_addr, offset); - let llval = bx.const_bitcast(llval, ty); bx.load_operand(PlaceRef::new_sized(llval, layout)) } } @@ -314,38 +311,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { ) => { // Bools in union fields needs to be truncated. *llval = bx.to_immediate(*llval, field); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - let ty = bx.cx().immediate_backend_type(field); - if bx.type_kind(ty) == TypeKind::Pointer { - *llval = bx.pointercast(*llval, ty); - } } (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => { // Bools in union fields needs to be truncated. *a = bx.to_immediate_scalar(*a, a_abi); *b = bx.to_immediate_scalar(*b, b_abi); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - let a_ty = bx.cx().scalar_pair_element_backend_type(field, 0, true); - let b_ty = bx.cx().scalar_pair_element_backend_type(field, 1, true); - if bx.type_kind(a_ty) == TypeKind::Pointer { - *a = bx.pointercast(*a, a_ty); - } - if bx.type_kind(b_ty) == TypeKind::Pointer { - *b = bx.pointercast(*b, b_ty); - } } // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]); (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => { assert!(matches!(self.layout.abi, Abi::Vector { .. })); - let llty = bx.cx().backend_type(self.layout); let llfield_ty = bx.cx().backend_type(field); // Can't bitcast an aggregate, so round trip through memory. - let lltemp = bx.alloca(llfield_ty, field.align.abi); - let llptr = bx.pointercast(lltemp, bx.cx().type_ptr_to(llty)); + let llptr = bx.alloca(llfield_ty, field.align.abi); bx.store(*llval, llptr, field.align.abi); - *llval = bx.load(llfield_ty, lltemp, field.align.abi); + *llval = bx.load(llfield_ty, llptr, field.align.abi); } (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => { bug!() @@ -380,9 +361,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true); OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1)) } else { - let bty = bx.cx().backend_type(layout); - let ptr_bty = bx.cx().type_ptr_to(bty); - OperandValue::Ref(bx.const_poison(ptr_bty), None, layout.align.abi) + let ptr = bx.cx().type_ptr(); + OperandValue::Ref(bx.const_poison(ptr), None, layout.align.abi) } } @@ -434,8 +414,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let ty = bx.backend_type(dest.layout); - let ptr = bx.pointercast(r, bx.type_ptr_to(ty)); - let val = bx.load(ty, ptr, source_align); + let val = bx.load(ty, r, source_align); bx.store_with_flags(val, dest.llval, dest.align, flags); return; } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 90eab55f76e76..64c6d17469bc7 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -115,8 +115,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. - let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); - bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())]) + bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())]) } Abi::Scalar(_) | Abi::ScalarPair(..) => { // All fields of Scalar and ScalarPair layouts must have been handled by this point. @@ -133,8 +132,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } }; PlaceRef { - // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), + llval, llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, layout: field, align: effective_field_align, @@ -194,20 +192,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { debug!("struct_field_ptr: DST field offset: {:?}", offset); - // Cast and adjust pointer. - let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); - let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]); + // Adjust pointer. + let ptr = bx.gep(bx.cx().type_i8(), self.llval, &[offset]); - // Finally, cast back to the type expected. - let ll_fty = bx.cx().backend_type(field); - debug!("struct_field_ptr: Field type is {:?}", ll_fty); - - PlaceRef { - llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), - llextra: self.llextra, - layout: field, - align: effective_field_align, - } + PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align } } /// Obtain the actual discriminant of a value. @@ -416,11 +404,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { let mut downcast = *self; downcast.layout = self.layout.for_variant(bx.cx(), variant_index); - - // Cast to the appropriate variant struct type. - let variant_ty = bx.cx().backend_type(downcast.layout); - downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); - downcast } @@ -431,11 +414,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { let mut downcast = *self; downcast.layout = bx.cx().layout_of(ty); - - // Cast to the appropriate type. - let variant_ty = bx.cx().backend_type(downcast.layout); - downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); - downcast } @@ -515,13 +493,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { )); } - // Cast the place pointer type to the new - // array or slice type (`*[%_; new_len]`). - subslice.llval = bx.pointercast( - subslice.llval, - bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)), - ); - subslice } mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v), diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 1f90a28eb8eef..6f02eb8411a61 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -182,9 +182,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Immediate(..) | OperandValue::Pair(..) => { // When we have immediate(s), the alignment of the source is irrelevant, // so we can store them using the destination's alignment. - let llty = bx.backend_type(src.layout); - let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty)); - src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align)); + src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align)); } } } @@ -222,9 +220,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(ptr, meta, align) => { debug_assert_eq!(meta, None); debug_assert!(matches!(operand_kind, OperandValueKind::Ref)); - let cast_bty = bx.backend_type(cast); - let cast_ptr = bx.pointercast(ptr, bx.type_ptr_to(cast_bty)); - let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align); + let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align); Some(bx.load_operand(fake_place).val) } OperandValue::ZeroSized => { @@ -480,18 +476,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { { if let OperandValue::Pair(data_ptr, meta) = operand.val { if bx.cx().is_backend_scalar_pair(cast) { - let data_cast = bx.pointercast( - data_ptr, - bx.cx().scalar_pair_element_backend_type(cast, 0, true), - ); - OperandValue::Pair(data_cast, meta) + OperandValue::Pair(data_ptr, meta) } else { - // cast to thin-ptr - // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and - // pointer-cast of that pointer to desired pointer type. - let llcast_ty = bx.cx().immediate_backend_type(cast); - let llval = bx.pointercast(data_ptr, llcast_ty); - OperandValue::Immediate(llval) + // Cast of fat-ptr to thin-ptr is an extraction of data-ptr. + OperandValue::Immediate(data_ptr) } } else { bug!("unexpected non-pair operand"); @@ -736,13 +724,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { let operand = self.codegen_operand(bx, operand); - let lloperand = operand.immediate(); + let val = operand.immediate(); let content_ty = self.monomorphize(content_ty); let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty)); - let llty_ptr = bx.cx().backend_type(box_layout); - let val = bx.pointercast(lloperand, llty_ptr); OperandRef { val: OperandValue::Immediate(val), layout: box_layout } } } diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index 822c19155e3b7..4dff9c7684f18 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -36,7 +36,6 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value; - fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; fn const_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value; } diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index e64417e1a4a69..dc3dbd9d81949 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -26,8 +26,8 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; - fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; - fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type; + fn type_ptr(&self) -> Self::Type; + fn type_ptr_ext(&self, address_space: AddressSpace) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; /// Returns the number of elements in `self` if it is a LLVM vector type. @@ -42,14 +42,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { } pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { - fn type_i8p(&self) -> Self::Type { - self.type_i8p_ext(AddressSpace::DATA) - } - - fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type { - self.type_ptr_to_ext(self.type_i8(), address_space) - } - fn type_int(&self) -> Self::Type { match &self.sess().target.c_int_width[..] { "16" => self.type_i16(), From def44c56693327144e13e05dba286197f618ca0f Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sat, 29 Jul 2023 16:31:53 -0400 Subject: [PATCH 3/5] cg_llvm: inline check_store --- compiler/rustc_codegen_llvm/src/builder.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 1576cfeba42a0..bcbf55c6effe9 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -652,7 +652,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) -> &'ll Value { debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); - let ptr = self.check_store(ptr); + assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); let align = @@ -682,7 +682,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { size: Size, ) { debug!("Store {:?} -> {:?}", val, ptr); - let ptr = self.check_store(ptr); + assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); unsafe { let store = llvm::LLVMRustBuildAtomicStore( self.llbuilder, @@ -1382,14 +1382,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchret") } - fn check_store(&mut self, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = self.cx.val_ty(ptr); - - assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - - ptr - } - fn check_call<'b>( &mut self, typ: &str, From cf7788d54b1b91cd3e778984f8dceada224e28ad Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sat, 29 Jul 2023 16:32:03 -0400 Subject: [PATCH 4/5] cg_llvm: clean up match --- compiler/rustc_codegen_llvm/src/type_of.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index ac18fdfe07fb0..32e71c6a641ef 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -225,7 +225,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { return llty; } let llty = match *self.ty.kind() { - ty::Ref(..) | ty::RawPtr(ty::TypeAndMut { .. }) => cx.type_ptr(), + ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(), ty::Adt(def, _) if def.is_box() => cx.type_ptr(), ty::FnPtr(sig) => { cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) From 55800123b73067ec98293f49ded6739036b0aca4 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Sat, 29 Jul 2023 16:56:27 -0400 Subject: [PATCH 5/5] cg_llvm: simplify llvm.masked.gather/scatter naming with opaque pointers With opaque pointers, there's no longer a need to generate a chain of pointer types in the intrinsic name when arguments are pointers to pointers. --- compiler/rustc_codegen_llvm/src/intrinsic.rs | 152 ++++++------------ .../simd-intrinsic-generic-gather.rs | 4 +- .../simd-intrinsic-generic-scatter.rs | 4 +- 3 files changed, 51 insertions(+), 109 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 55f2808864077..b62aa9cb3083d 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1307,49 +1307,34 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // FIXME: use: // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 - fn llvm_vector_str( - elem_ty: Ty<'_>, - vec_len: u64, - no_pointers: usize, - bx: &Builder<'_, '_, '_>, - ) -> String { - let p0s: String = "p0".repeat(no_pointers); + fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String { match *elem_ty.kind() { ty::Int(v) => format!( - "v{}{}i{}", + "v{}i{}", vec_len, - p0s, // Normalize to prevent crash if v: IntTy::Isize v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() ), ty::Uint(v) => format!( - "v{}{}i{}", + "v{}i{}", vec_len, - p0s, // Normalize to prevent crash if v: UIntTy::Usize v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() ), - ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), + ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()), + ty::RawPtr(_) => format!("v{}p0", vec_len), _ => unreachable!(), } } - fn llvm_vector_ty<'ll>( - cx: &CodegenCx<'ll, '_>, - elem_ty: Ty<'_>, - vec_len: u64, - no_pointers: usize, - ) -> &'ll Type { - // FIXME: use cx.layout_of(ty).llvm_type() ? - let mut elem_ty = match *elem_ty.kind() { + fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type { + let elem_ty = match *elem_ty.kind() { ty::Int(v) => cx.type_int_from_ty(v), ty::Uint(v) => cx.type_uint_from_ty(v), ty::Float(v) => cx.type_float_from_ty(v), + ty::RawPtr(_) => cx.type_ptr(), _ => unreachable!(), }; - if no_pointers > 0 { - elem_ty = cx.type_ptr(); - } cx.type_vector(elem_ty, vec_len) } @@ -1404,47 +1389,26 @@ fn generic_simd_intrinsic<'ll, 'tcx>( InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } ); - // This counts how many pointers - fn ptr_count(t: Ty<'_>) -> usize { - match t.kind() { - ty::RawPtr(p) => 1 + ptr_count(p.ty), - _ => 0, - } - } - - // Non-ptr type - fn non_ptr(t: Ty<'_>) -> Ty<'_> { - match t.kind() { - ty::RawPtr(p) => non_ptr(p.ty), - _ => t, - } - } - // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (pointer_count, underlying_ty) = match element_ty1.kind() { - ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), - _ => { - require!( - false, - InvalidMonomorphization::ExpectedElementType { - span, - name, - expected_element: element_ty1, - second_arg: arg_tys[1], - in_elem, - in_ty, - mutability: ExpectedPointerMutability::Not, - } - ); - unreachable!(); + + require!( + matches!( + element_ty1.kind(), + ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Not, } - }; - assert!(pointer_count > 0); - assert_eq!(pointer_count - 1, ptr_count(element_ty0)); - assert_eq!(underlying_ty, non_ptr(element_ty0)); + ); // The element type of the third argument must be a signed integer type of any width: let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); @@ -1475,12 +1439,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }; // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); - let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len); + let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); - let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx); + let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len); + let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len); let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); @@ -1544,50 +1508,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } ); - // This counts how many pointers - fn ptr_count(t: Ty<'_>) -> usize { - match t.kind() { - ty::RawPtr(p) => 1 + ptr_count(p.ty), - _ => 0, - } - } - - // Non-ptr type - fn non_ptr(t: Ty<'_>) -> Ty<'_> { - match t.kind() { - ty::RawPtr(p) => non_ptr(p.ty), - _ => t, - } - } - // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); - let (pointer_count, underlying_ty) = match element_ty1.kind() { - ty::RawPtr(p) if p.ty == in_elem && p.mutbl.is_mut() => { - (ptr_count(element_ty1), non_ptr(element_ty1)) - } - _ => { - require!( - false, - InvalidMonomorphization::ExpectedElementType { - span, - name, - expected_element: element_ty1, - second_arg: arg_tys[1], - in_elem, - in_ty, - mutability: ExpectedPointerMutability::Mut, - } - ); - unreachable!(); + + require!( + matches!( + element_ty1.kind(), + ty::RawPtr(p) + if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Mut, } - }; - assert!(pointer_count > 0); - assert_eq!(pointer_count - 1, ptr_count(element_ty0)); - assert_eq!(underlying_ty, non_ptr(element_ty0)); + ); // The element type of the third argument must be a signed integer type of any width: match element_ty2.kind() { @@ -1619,12 +1561,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ret_t = bx.type_void(); // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); - let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len); + let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); - let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx); + let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len); + let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len); let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs index 7fe3ffd208608..0bb21019685e5 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs @@ -23,7 +23,7 @@ extern "platform-intrinsic" { #[no_mangle] pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2, values: Vec2) -> Vec2 { - // CHECK: call <2 x float> @llvm.masked.gather.v2f32.{{.+}}(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) + // CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) simd_gather(values, pointers, mask) } @@ -31,6 +31,6 @@ pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2, #[no_mangle] pub unsafe fn gather_pf32x2(pointers: Vec2<*const *const f32>, mask: Vec2, values: Vec2<*const f32>) -> Vec2<*const f32> { - // CHECK: call <2 x ptr> @llvm.masked.gather.{{.+}}(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x ptr> {{.*}}) + // CHECK: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x ptr> {{.*}}) simd_gather(values, pointers, mask) } diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs index 5c917474e4577..51953560b4fde 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs @@ -23,7 +23,7 @@ extern "platform-intrinsic" { #[no_mangle] pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2, values: Vec2) { - // CHECK: call void @llvm.masked.scatter.v2f32.v2p0{{.*}}(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + // CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) simd_scatter(values, pointers, mask) } @@ -32,6 +32,6 @@ pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2, #[no_mangle] pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2, values: Vec2<*const f32>) { - // CHECK: call void @llvm.masked.scatter.v2p0{{.*}}.v2p0{{.*}}(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + // CHECK: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) simd_scatter(values, pointers, mask) }