diff --git a/README.md b/README.md
index 6d6383351caf7..3690a9c93c528 100644
--- a/README.md
+++ b/README.md
@@ -1,27 +1,36 @@
-# The Rust Programming Language
-
-[![Rust Community](https://img.shields.io/badge/Rust_Community%20-Join_us-brightgreen?style=plastic&logo=rust)](https://www.rust-lang.org/community)
+
+
+
+[Website][Rust] | [Getting started] | [Learn] | [Documentation] | [Contributing]
+
This is the main source code repository for [Rust]. It contains the compiler,
standard library, and documentation.
[Rust]: https://www.rust-lang.org/
+[Getting Started]: https://www.rust-lang.org/learn/get-started
+[Learn]: https://www.rust-lang.org/learn
+[Documentation]: https://www.rust-lang.org/learn#learn-use
+[Contributing]: CONTRIBUTING.md
+
+## Why Rust?
-**Note: this README is for _users_ rather than _contributors_.**
-If you wish to _contribute_ to the compiler, you should read
-[CONTRIBUTING.md](CONTRIBUTING.md) instead.
+- **Performance:** Fast and memory-efficient, suitable for critical services, embedded devices, and easily integrate with other languages.
-
-Table of Contents
+- **Reliability:** Our rich type system and ownership model ensure memory and thread safety, reducing bugs at compile-time.
-- [Quick Start](#quick-start)
-- [Installing from Source](#installing-from-source)
-- [Getting Help](#getting-help)
-- [Contributing](#contributing)
-- [License](#license)
-- [Trademark](#trademark)
+- **Productivity:** Comprehensive documentation, a compiler committed to providing great diagnostics, and advanced tooling including package manager and build tool ([Cargo]), auto-formatter ([rustfmt]), linter ([Clippy]) and editor support ([rust-analyzer]).
-
+[Cargo]: https://github.com/rust-lang/cargo
+[rustfmt]: https://github.com/rust-lang/rustfmt
+[Clippy]: https://github.com/rust-lang/rust-clippy
+[rust-analyzer]: https://github.com/rust-lang/rust-analyzer
## Quick Start
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index aba94f4d817d7..c10a6258d397e 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -1276,7 +1276,8 @@ impl Expr {
ExprKind::While(..) => ExprPrecedence::While,
ExprKind::ForLoop { .. } => ExprPrecedence::ForLoop,
ExprKind::Loop(..) => ExprPrecedence::Loop,
- ExprKind::Match(..) => ExprPrecedence::Match,
+ ExprKind::Match(_, _, MatchKind::Prefix) => ExprPrecedence::Match,
+ ExprKind::Match(_, _, MatchKind::Postfix) => ExprPrecedence::PostfixMatch,
ExprKind::Closure(..) => ExprPrecedence::Closure,
ExprKind::Block(..) => ExprPrecedence::Block,
ExprKind::TryBlock(..) => ExprPrecedence::TryBlock,
@@ -3341,7 +3342,7 @@ impl TryFrom for ForeignItemKind {
pub type ForeignItem = Item;
// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index f49eb2f22c50b..5060bbec42169 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -1021,7 +1021,7 @@ where
}
// Some types are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index 239735456ad53..f3249f3e5a8b5 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -768,7 +768,7 @@ impl DelimSpacing {
}
// Some types are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
diff --git a/compiler/rustc_ast/src/util/parser.rs b/compiler/rustc_ast/src/util/parser.rs
index 13768c1201791..373c0ebcc5cba 100644
--- a/compiler/rustc_ast/src/util/parser.rs
+++ b/compiler/rustc_ast/src/util/parser.rs
@@ -281,6 +281,7 @@ pub enum ExprPrecedence {
ForLoop,
Loop,
Match,
+ PostfixMatch,
ConstBlock,
Block,
TryBlock,
@@ -334,7 +335,8 @@ impl ExprPrecedence {
| ExprPrecedence::InlineAsm
| ExprPrecedence::Mac
| ExprPrecedence::FormatArgs
- | ExprPrecedence::OffsetOf => PREC_POSTFIX,
+ | ExprPrecedence::OffsetOf
+ | ExprPrecedence::PostfixMatch => PREC_POSTFIX,
// Never need parens
ExprPrecedence::Array
@@ -390,7 +392,8 @@ pub fn contains_exterior_struct_lit(value: &ast::Expr) -> bool {
| ast::ExprKind::Cast(x, _)
| ast::ExprKind::Type(x, _)
| ast::ExprKind::Field(x, _)
- | ast::ExprKind::Index(x, _, _) => {
+ | ast::ExprKind::Index(x, _, _)
+ | ast::ExprKind::Match(x, _, ast::MatchKind::Postfix) => {
// &X { y: 1 }, X { y: 1 }.y
contains_exterior_struct_lit(x)
}
diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs
index a1164008d0daf..402044c7af9de 100644
--- a/compiler/rustc_ast_lowering/src/index.rs
+++ b/compiler/rustc_ast_lowering/src/index.rs
@@ -3,7 +3,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::{LocalDefId, LocalDefIdMap};
use rustc_hir::intravisit::Visitor;
use rustc_hir::*;
-use rustc_index::{Idx, IndexVec};
+use rustc_index::IndexVec;
use rustc_middle::span_bug;
use rustc_middle::ty::TyCtxt;
use rustc_span::{Span, DUMMY_SP};
@@ -31,7 +31,7 @@ pub(super) fn index_hir<'hir>(
bodies: &SortedMap>,
num_nodes: usize,
) -> (IndexVec>, LocalDefIdMap) {
- let zero_id = ItemLocalId::new(0);
+ let zero_id = ItemLocalId::ZERO;
let err_node = ParentedNode { parent: zero_id, node: Node::Err(item.span()) };
let mut nodes = IndexVec::from_elem_n(err_node, num_nodes);
// This node's parent should never be accessed: the owner's parent is computed by the
@@ -112,7 +112,9 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> {
}
fn insert_nested(&mut self, item: LocalDefId) {
- self.parenting.insert(item, self.parent_node);
+ if self.parent_node.as_u32() != 0 {
+ self.parenting.insert(item, self.parent_node);
+ }
}
}
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
index c9786328565ba..abfea6078f21c 100644
--- a/compiler/rustc_ast_lowering/src/item.rs
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -11,7 +11,7 @@ use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
use rustc_hir::PredicateOrigin;
-use rustc_index::{Idx, IndexSlice, IndexVec};
+use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::span_bug;
use rustc_middle::ty::{ResolverAstLowering, TyCtxt};
use rustc_span::edit_distance::find_best_match_for_name;
@@ -563,7 +563,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let kind =
this.lower_use_tree(use_tree, &prefix, id, vis_span, &mut ident, attrs);
if let Some(attrs) = attrs {
- this.attrs.insert(hir::ItemLocalId::new(0), attrs);
+ this.attrs.insert(hir::ItemLocalId::ZERO, attrs);
}
let item = hir::Item {
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index 833b0e9b5679e..8cf347bfa966c 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -157,7 +157,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
attrs: SortedMap::default(),
children: Vec::default(),
current_hir_id_owner: hir::CRATE_OWNER_ID,
- item_local_id_counter: hir::ItemLocalId::new(0),
+ item_local_id_counter: hir::ItemLocalId::ZERO,
node_id_to_local_id: Default::default(),
trait_map: Default::default(),
@@ -583,7 +583,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// and the caller to refer to some of the subdefinitions' nodes' `LocalDefId`s.
// Always allocate the first `HirId` for the owner itself.
- let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::new(0));
+ let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::ZERO);
debug_assert_eq!(_old, None);
let item = f(self);
@@ -677,7 +677,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
v.insert(local_id);
self.item_local_id_counter.increment_by(1);
- assert_ne!(local_id, hir::ItemLocalId::new(0));
+ assert_ne!(local_id, hir::ItemLocalId::ZERO);
if let Some(def_id) = self.opt_local_def_id(ast_node_id) {
self.children.push((def_id, hir::MaybeOwner::NonOwner(hir_id)));
}
@@ -696,7 +696,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn next_id(&mut self) -> hir::HirId {
let owner = self.current_hir_id_owner;
let local_id = self.item_local_id_counter;
- assert_ne!(local_id, hir::ItemLocalId::new(0));
+ assert_ne!(local_id, hir::ItemLocalId::ZERO);
self.item_local_id_counter.increment_by(1);
hir::HirId { owner, local_id }
}
diff --git a/compiler/rustc_borrowck/src/borrow_set.rs b/compiler/rustc_borrowck/src/borrow_set.rs
index 6a683d129ded1..a38dd286be51b 100644
--- a/compiler/rustc_borrowck/src/borrow_set.rs
+++ b/compiler/rustc_borrowck/src/borrow_set.rs
@@ -159,7 +159,7 @@ impl<'tcx> BorrowSet<'tcx> {
}
pub(crate) fn indices(&self) -> impl Iterator- {
- BorrowIndex::from_usize(0)..BorrowIndex::from_usize(self.len())
+ BorrowIndex::ZERO..BorrowIndex::from_usize(self.len())
}
pub(crate) fn iter_enumerated(&self) -> impl Iterator
- )> {
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
index 6405364c30c14..71b54a761a2be 100644
--- a/compiler/rustc_borrowck/src/type_check/mod.rs
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -2261,7 +2261,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
- CastKind::PointerExposeAddress => {
+ CastKind::PointerExposeProvenance => {
let ty_from = op.ty(body, tcx);
let cast_ty_from = CastTy::from_ty(ty_from);
let cast_ty_to = CastTy::from_ty(*ty);
@@ -2271,7 +2271,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
span_mirbug!(
self,
rvalue,
- "Invalid PointerExposeAddress cast {:?} -> {:?}",
+ "Invalid PointerExposeProvenance cast {:?} -> {:?}",
ty_from,
ty
)
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 249c16898ce6e..0aa2bae8f78b1 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -649,7 +649,7 @@ fn codegen_stmt<'tcx>(
| CastKind::IntToFloat
| CastKind::FnPtrToPtr
| CastKind::PtrToPtr
- | CastKind::PointerExposeAddress
+ | CastKind::PointerExposeProvenance
| CastKind::PointerWithExposedProvenance,
ref operand,
to_ty,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index 1615dc5de697b..8df83c706a100 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -1393,7 +1393,7 @@ fn llvm_add_sub<'tcx>(
// c + carry -> c + first intermediate carry or borrow respectively
let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
- let c = int0.value_field(fx, FieldIdx::new(0));
+ let c = int0.value_field(fx, FieldIdx::ZERO);
let cb0 = int0.value_field(fx, FieldIdx::new(1)).load_scalar(fx);
// c + carry -> c + second intermediate carry or borrow respectively
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 783ad5d1dd1fc..67f9d83106294 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -965,7 +965,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
});
}
- sym::simd_expose_addr | sym::simd_with_exposed_provenance | sym::simd_cast_ptr => {
+ sym::simd_expose_provenance | sym::simd_with_exposed_provenance | sym::simd_cast_ptr => {
intrinsic_args!(fx, args => (arg); intrinsic);
ret.write_cvalue_transmute(fx, arg);
}
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
index 86ebf37d105f6..04e24320f9131 100644
--- a/compiler/rustc_codegen_cranelift/src/vtable.rs
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -61,7 +61,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
if ty.is_dyn_star() {
let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap().ty);
let dyn_star = CPlace::for_ptr(Pointer::new(arg.load_scalar(fx)), inner_layout);
- let ptr = dyn_star.place_field(fx, FieldIdx::new(0)).to_ptr();
+ let ptr = dyn_star.place_field(fx, FieldIdx::ZERO).to_ptr();
let vtable =
dyn_star.place_field(fx, FieldIdx::new(1)).to_cvalue(fx).load_scalar(fx);
break 'block (ptr, vtable);
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index e5f5146fac8fb..d2828669d438f 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -16,13 +16,15 @@ pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use rustc_middle::ty::Ty;
use rustc_session::config;
pub use rustc_target::abi::call::*;
-use rustc_target::abi::{self, HasDataLayout, Int};
+use rustc_target::abi::{self, HasDataLayout, Int, Size};
pub use rustc_target::spec::abi::Abi;
use rustc_target::spec::SanitizerSet;
use libc::c_uint;
use smallvec::SmallVec;
+use std::cmp;
+
pub trait ArgAttributesExt {
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
fn apply_attrs_to_callsite(
@@ -130,42 +132,36 @@ impl LlvmType for Reg {
impl LlvmType for CastTarget {
fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
let rest_ll_unit = self.rest.unit.llvm_type(cx);
- let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
- (0, 0)
+ let rest_count = if self.rest.total == Size::ZERO {
+ 0
} else {
- (
- self.rest.total.bytes() / self.rest.unit.size.bytes(),
- self.rest.total.bytes() % self.rest.unit.size.bytes(),
- )
+ assert_ne!(
+ self.rest.unit.size,
+ Size::ZERO,
+ "total size {:?} cannot be divided into units of zero size",
+ self.rest.total
+ );
+ if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 {
+ assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split");
+ }
+ self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes())
};
+ // Simplify to a single unit or an array if there's no prefix.
+ // This produces the same layout, but using a simpler type.
if self.prefix.iter().all(|x| x.is_none()) {
- // Simplify to a single unit when there is no prefix and size <= unit size
- if self.rest.total <= self.rest.unit.size {
+ if rest_count == 1 {
return rest_ll_unit;
}
- // Simplify to array when all chunks are the same size and type
- if rem_bytes == 0 {
- return cx.type_array(rest_ll_unit, rest_count);
- }
- }
-
- // Create list of fields in the main structure
- let mut args: Vec<_> = self
- .prefix
- .iter()
- .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
- .chain((0..rest_count).map(|_| rest_ll_unit))
- .collect();
-
- // Append final integer
- if rem_bytes != 0 {
- // Only integers can be really split further.
- assert_eq!(self.rest.unit.kind, RegKind::Integer);
- args.push(cx.type_ix(rem_bytes * 8));
+ return cx.type_array(rest_ll_unit, rest_count);
}
+ // Generate a struct type with the prefix and the "rest" arguments.
+ let prefix_args =
+ self.prefix.iter().flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)));
+ let rest_args = (0..rest_count).map(|_| rest_ll_unit);
+ let args: Vec<_> = prefix_args.chain(rest_args).collect();
cx.type_struct(&args, false)
}
}
@@ -215,47 +211,33 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
}
PassMode::Cast { cast, pad_i32: _ } => {
- // FIXME(eddyb): Figure out when the simpler Store is safe, clang
- // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
- let can_store_through_cast_ptr = false;
- if can_store_through_cast_ptr {
- bx.store(val, dst.llval, self.layout.align.abi);
- } else {
- // The actual return type is a struct, but the ABI
- // adaptation code has cast it into some scalar type. The
- // code that follows is the only reliable way I have
- // found to do a transform like i64 -> {i32,i32}.
- // Basically we dump the data onto the stack then memcpy it.
- //
- // Other approaches I tried:
- // - Casting rust ret pointer to the foreign type and using Store
- // is (a) unsafe if size of foreign type > size of rust type and
- // (b) runs afoul of strict aliasing rules, yielding invalid
- // assembly under -O (specifically, the store gets removed).
- // - Truncating foreign type to correct integral type and then
- // bitcasting to the struct type yields invalid cast errors.
-
- // We instead thus allocate some scratch space...
- let scratch_size = cast.size(bx);
- let scratch_align = cast.align(bx);
- let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
- bx.lifetime_start(llscratch, scratch_size);
-
- // ... where we first store the value...
- bx.store(val, llscratch, scratch_align);
-
- // ... and then memcpy it to the intended destination.
- bx.memcpy(
- dst.llval,
- self.layout.align.abi,
- llscratch,
- scratch_align,
- bx.const_usize(self.layout.size.bytes()),
- MemFlags::empty(),
- );
-
- bx.lifetime_end(llscratch, scratch_size);
- }
+ // The ABI mandates that the value is passed as a different struct representation.
+ // Spill and reload it from the stack to convert from the ABI representation to
+ // the Rust representation.
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ // Note that the ABI type may be either larger or smaller than the Rust type,
+ // due to the presence or absence of trailing padding. For example:
+ // - On some ABIs, the Rust layout { f64, f32, } may omit padding
+ // when passed by value, making it smaller.
+ // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
+ // when passed by value, making it larger.
+ let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
+ // Allocate some scratch space...
+ let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+ // ...store the value...
+ bx.store(val, llscratch, scratch_align);
+ // ... and then memcpy it to the intended destination.
+ bx.memcpy(
+ dst.llval,
+ self.layout.align.abi,
+ llscratch,
+ scratch_align,
+ bx.const_usize(copy_bytes),
+ MemFlags::empty(),
+ );
+ bx.lifetime_end(llscratch, scratch_size);
}
_ => {
OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index 3ef8538ced3a5..f58dd4066ad71 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -147,7 +147,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
for options in [
TypeIdOptions::GENERALIZE_POINTERS,
TypeIdOptions::NORMALIZE_INTEGERS,
- TypeIdOptions::NO_SELF_TYPE_ERASURE,
+ TypeIdOptions::ERASE_SELF_TYPE,
]
.into_iter()
.powerset()
@@ -173,7 +173,9 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
if self.tcx.sess.is_sanitizer_kcfi_enabled() {
// LLVM KCFI does not support multiple !kcfi_type attachments
- let mut options = TypeIdOptions::empty();
+ // Default to erasing the self type. If we need the concrete type, there will be a
+ // hint in the instance.
+ let mut options = TypeIdOptions::ERASE_SELF_TYPE;
if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
options.insert(TypeIdOptions::GENERALIZE_POINTERS);
}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index e4ec7974e9000..dc52dd156b7e7 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -2111,7 +2111,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
return Ok(args[0].immediate());
}
- if name == sym::simd_expose_addr {
+ if name == sym::simd_expose_provenance {
let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
require!(
in_len == out_len,
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index d4123329f4481..1aa52a985ef7d 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -1505,9 +1505,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
- if let PassMode::Cast { cast: ty, .. } = &arg.mode {
- let llty = bx.cast_backend_type(ty);
- llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
+ if let PassMode::Cast { cast, pad_i32: _ } = &arg.mode {
+ // The ABI mandates that the value is passed as a different struct representation.
+ // Spill and reload it from the stack to convert from the Rust representation to
+ // the ABI representation.
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ // Note that the ABI type may be either larger or smaller than the Rust type,
+ // due to the presence or absence of trailing padding. For example:
+ // - On some ABIs, the Rust layout { f64, f32, } may omit padding
+ // when passed by value, making it smaller.
+ // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
+ // when passed by value, making it larger.
+ let copy_bytes = cmp::min(scratch_size.bytes(), arg.layout.size.bytes());
+ // Allocate some scratch space...
+ let llscratch = bx.alloca(bx.cast_backend_type(cast), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+ // ...memcpy the value...
+ bx.memcpy(
+ llscratch,
+ scratch_align,
+ llval,
+ align,
+ bx.const_usize(copy_bytes),
+ MemFlags::empty(),
+ );
+ // ...and then load it with the ABI type.
+ let cast_ty = bx.cast_backend_type(cast);
+ llval = bx.load(cast_ty, llscratch, scratch_align);
+ bx.lifetime_end(llscratch, scratch_size);
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 6f7b98a262d55..4d746c89f1fc3 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -405,7 +405,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
let val = match *kind {
- mir::CastKind::PointerExposeAddress => {
+ mir::CastKind::PointerExposeProvenance => {
assert!(bx.cx().is_backend_immediate(cast));
let llptr = operand.immediate();
let llcast_ty = bx.cx().immediate_backend_type(cast);
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index e0d45f1fe1146..9447d18fe8c93 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -34,9 +34,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.unsize_into(src, cast_layout, dest)?;
}
- CastKind::PointerExposeAddress => {
+ CastKind::PointerExposeProvenance => {
let src = self.read_immediate(src)?;
- let res = self.pointer_expose_address_cast(&src, cast_layout)?;
+ let res = self.pointer_expose_provenance_cast(&src, cast_layout)?;
self.write_immediate(*res, dest)?;
}
@@ -225,7 +225,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- pub fn pointer_expose_address_cast(
+ pub fn pointer_expose_provenance_cast(
&mut self,
src: &ImmTy<'tcx, M::Provenance>,
cast_to: TyAndLayout<'tcx>,
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 831787a92c869..842fb6d204c29 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -792,7 +792,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 1a2f1194f89a2..e32aea39fc597 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -1058,7 +1058,7 @@ where
}
// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index b6dcc33414739..543996c86baca 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -544,7 +544,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// Unsizing is implemented for CTFE.
}
- Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => {
+ Rvalue::Cast(CastKind::PointerExposeProvenance, _, _) => {
self.check_op(ops::RawPtrToIntCast);
}
Rvalue::Cast(CastKind::PointerWithExposedProvenance, _, _) => {
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index e1e98ebc1e9ae..a499e4b980fc3 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -1077,7 +1077,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
// FIXME: Add Checks for these
CastKind::PointerWithExposedProvenance
- | CastKind::PointerExposeAddress
+ | CastKind::PointerExposeProvenance
| CastKind::PointerCoercion(_) => {}
CastKind::IntToInt | CastKind::IntToFloat => {
let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
index a45f1dd72a126..30e240cf85b84 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -72,7 +72,7 @@ fn dominators_impl(graph: &G) -> Inner {
IndexVec::with_capacity(graph.num_nodes());
let mut stack = vec![PreOrderFrame {
- pre_order_idx: PreorderIndex::new(0),
+ pre_order_idx: PreorderIndex::ZERO,
iter: graph.successors(graph.start_node()),
}];
let mut pre_order_to_real: IndexVec =
@@ -80,8 +80,8 @@ fn dominators_impl(graph: &G) -> Inner {
let mut real_to_pre_order: IndexVec> =
IndexVec::from_elem_n(None, graph.num_nodes());
pre_order_to_real.push(graph.start_node());
- parent.push(PreorderIndex::new(0)); // the parent of the root node is the root for now.
- real_to_pre_order[graph.start_node()] = Some(PreorderIndex::new(0));
+ parent.push(PreorderIndex::ZERO); // the parent of the root node is the root for now.
+ real_to_pre_order[graph.start_node()] = Some(PreorderIndex::ZERO);
let mut post_order_idx = 0;
// Traverse the graph, collecting a number of things:
@@ -111,7 +111,7 @@ fn dominators_impl(graph: &G) -> Inner {
let reachable_vertices = pre_order_to_real.len();
- let mut idom = IndexVec::from_elem_n(PreorderIndex::new(0), reachable_vertices);
+ let mut idom = IndexVec::from_elem_n(PreorderIndex::ZERO, reachable_vertices);
let mut semi = IndexVec::from_fn_n(std::convert::identity, reachable_vertices);
let mut label = semi.clone();
let mut bucket = IndexVec::from_elem_n(vec![], reachable_vertices);
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index 7b40954e735db..b4107bd4a2bad 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -102,9 +102,9 @@ pub type PResult<'a, T> = Result>;
rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
// `PResult` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(PResult<'_, ()>, 16);
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(PResult<'_, bool>, 16);
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, Encodable, Decodable)]
@@ -1951,6 +1951,39 @@ pub fn report_ambiguity_error<'a, G: EmissionGuarantee>(
}
}
+/// Grammatical tool for displaying messages to end users in a nice form.
+///
+/// Returns "an" if the given string starts with a vowel, and "a" otherwise.
+pub fn a_or_an(s: &str) -> &'static str {
+ let mut chars = s.chars();
+ let Some(mut first_alpha_char) = chars.next() else {
+ return "a";
+ };
+ if first_alpha_char == '`' {
+ let Some(next) = chars.next() else {
+ return "a";
+ };
+ first_alpha_char = next;
+ }
+ if ["a", "e", "i", "o", "u", "&"].contains(&&first_alpha_char.to_lowercase().to_string()[..]) {
+ "an"
+ } else {
+ "a"
+ }
+}
+
+/// Grammatical tool for displaying messages to end users in a nice form.
+///
+/// Take a list ["a", "b", "c"] and output a display friendly version "a, b and c"
+pub fn display_list_with_comma_and(v: &[T]) -> String {
+ match v.len() {
+ 0 => "".to_string(),
+ 1 => v[0].to_string(),
+ 2 => format!("{} and {}", v[0], v[1]),
+ _ => format!("{}, {}", v[0], display_list_with_comma_and(&v[1..])),
+ }
+}
+
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum TerminalUrl {
No,
diff --git a/compiler/rustc_expand/src/mbe/macro_parser.rs b/compiler/rustc_expand/src/mbe/macro_parser.rs
index a31be05ccc4d2..9fff00ffeae17 100644
--- a/compiler/rustc_expand/src/mbe/macro_parser.rs
+++ b/compiler/rustc_expand/src/mbe/macro_parser.rs
@@ -266,7 +266,7 @@ struct MatcherPos {
}
// This type is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MatcherPos, 16);
impl MatcherPos {
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index a0f86565929b9..f21cd653f962b 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -846,9 +846,8 @@ pub struct OwnerNodes<'tcx> {
impl<'tcx> OwnerNodes<'tcx> {
pub fn node(&self) -> OwnerNode<'tcx> {
- use rustc_index::Idx;
// Indexing must ensure it is an OwnerNode.
- self.nodes[ItemLocalId::new(0)].node.as_owner().unwrap()
+ self.nodes[ItemLocalId::ZERO].node.as_owner().unwrap()
}
}
@@ -856,7 +855,7 @@ impl fmt::Debug for OwnerNodes<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OwnerNodes")
// Do not print all the pointers to all the nodes, as it would be unreadable.
- .field("node", &self.nodes[ItemLocalId::from_u32(0)])
+ .field("node", &self.nodes[ItemLocalId::ZERO])
.field(
"parents",
&self
@@ -3762,7 +3761,7 @@ impl<'hir> Node<'hir> {
}
// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
mod size_asserts {
use super::*;
// tidy-alphabetical-start
diff --git a/compiler/rustc_hir/src/hir_id.rs b/compiler/rustc_hir/src/hir_id.rs
index d339075c171d1..0341a482fa8c1 100644
--- a/compiler/rustc_hir/src/hir_id.rs
+++ b/compiler/rustc_hir/src/hir_id.rs
@@ -17,7 +17,7 @@ impl Debug for OwnerId {
impl From for HirId {
fn from(owner: OwnerId) -> HirId {
- HirId { owner, local_id: ItemLocalId::from_u32(0) }
+ HirId { owner, local_id: ItemLocalId::ZERO }
}
}
@@ -110,7 +110,7 @@ impl HirId {
#[inline]
pub fn make_owner(owner: LocalDefId) -> Self {
- Self { owner: OwnerId { def_id: owner }, local_id: ItemLocalId::from_u32(0) }
+ Self { owner: OwnerId { def_id: owner }, local_id: ItemLocalId::ZERO }
}
pub fn index(self) -> (usize, usize) {
@@ -172,6 +172,6 @@ unsafe impl StableOrd for ItemLocalId {
/// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_ID`.
pub const CRATE_HIR_ID: HirId =
- HirId { owner: OwnerId { def_id: CRATE_DEF_ID }, local_id: ItemLocalId::from_u32(0) };
+ HirId { owner: OwnerId { def_id: CRATE_DEF_ID }, local_id: ItemLocalId::ZERO };
pub const CRATE_OWNER_ID: OwnerId = OwnerId { def_id: CRATE_DEF_ID };
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
index a880445a27c1b..739a708699239 100644
--- a/compiler/rustc_hir_analysis/src/check/check.rs
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -899,7 +899,7 @@ pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
struct_span_code_err!(tcx.dcx(), sp, E0075, "SIMD vector cannot be empty").emit();
return;
}
- let e = fields[FieldIdx::from_u32(0)].ty(tcx, args);
+ let e = fields[FieldIdx::ZERO].ty(tcx, args);
if !fields.iter().all(|f| f.ty(tcx, args) == e) {
struct_span_code_err!(tcx.dcx(), sp, E0076, "SIMD vector should be homogeneous")
.with_span_label(sp, "SIMD elements must have the same type")
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 00a0fca490722..bd64621f07738 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -183,7 +183,7 @@ pub fn check_intrinsic_type(
let region = ty::Region::new_bound(
tcx,
ty::INNERMOST,
- ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon },
+ ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon },
);
let env_region = ty::Region::new_bound(
tcx,
@@ -495,7 +495,7 @@ pub fn check_intrinsic_type(
);
let discriminant_def_id = assoc_items[0];
- let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
+ let br = ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon };
(
1,
0,
@@ -555,7 +555,7 @@ pub fn check_intrinsic_type(
}
sym::raw_eq => {
- let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
+ let br = ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon };
let param_ty_lhs =
Ty::new_imm_ref(tcx, ty::Region::new_bound(tcx, ty::INNERMOST, br), param(0));
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrAnon };
@@ -627,7 +627,7 @@ pub fn check_intrinsic_type(
sym::simd_cast
| sym::simd_as
| sym::simd_cast_ptr
- | sym::simd_expose_addr
+ | sym::simd_expose_provenance
| sym::simd_with_exposed_provenance => (2, 0, vec![param(0)], param(1)),
sym::simd_bitmask => (2, 0, vec![param(0)], param(1)),
sym::simd_select | sym::simd_select_bitmask => {
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
index df4db3ec3fbd3..1958a80d47c18 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
@@ -67,7 +67,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
ty::RawPtr(ty, _) if self.is_thin_ptr_ty(ty) => Some(asm_ty_isize),
ty::Adt(adt, args) if adt.repr().simd() => {
let fields = &adt.non_enum_variant().fields;
- let elem_ty = fields[FieldIdx::from_u32(0)].ty(self.tcx, args);
+ let elem_ty = fields[FieldIdx::ZERO].ty(self.tcx, args);
let (size, ty) = match elem_ty.kind() {
ty::Array(ty, len) => {
@@ -146,7 +146,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
"expected first field of `MaybeUnit` to be `ManuallyDrop`"
);
let fields = &ty.non_enum_variant().fields;
- let ty = fields[FieldIdx::from_u32(0)].ty(self.tcx, args);
+ let ty = fields[FieldIdx::ZERO].ty(self.tcx, args);
self.get_asm_ty(ty)
}
_ => self.get_asm_ty(ty),
diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs
index 7a0890e50dac8..70f09dd61758e 100644
--- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs
+++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs
@@ -628,7 +628,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
let projection_ty = pred.skip_binder().projection_ty;
let args_with_infer_self = tcx.mk_args_from_iter(
- std::iter::once(Ty::new_var(tcx, ty::TyVid::from_u32(0)).into())
+ std::iter::once(Ty::new_var(tcx, ty::TyVid::ZERO).into())
.chain(projection_ty.args.iter().skip(1)),
);
diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/object_safety.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/object_safety.rs
index d3ca35ba481e3..97ba946b7e013 100644
--- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/object_safety.rs
+++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/object_safety.rs
@@ -6,9 +6,10 @@ use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_lint_defs::builtin::UNUSED_ASSOCIATED_TYPE_BOUNDS;
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc_middle::ty::{DynKind, ToPredicate};
-use rustc_span::Span;
+use rustc_span::{ErrorGuaranteed, Span};
use rustc_trait_selection::traits::error_reporting::report_object_safety_error;
use rustc_trait_selection::traits::{self, hir_ty_lowering_object_safety_violations};
@@ -228,12 +229,17 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
if arg == dummy_self.into() {
let param = &generics.params[index];
missing_type_params.push(param.name);
- return Ty::new_misc_error(tcx).into();
+ Ty::new_misc_error(tcx).into()
} else if arg.walk().any(|arg| arg == dummy_self.into()) {
references_self = true;
- return Ty::new_misc_error(tcx).into();
+ let guar = tcx.dcx().span_delayed_bug(
+ span,
+ "trait object trait bounds reference `Self`",
+ );
+ replace_dummy_self_with_error(tcx, arg, guar)
+ } else {
+ arg
}
- arg
})
.collect();
let args = tcx.mk_args(&args);
@@ -288,18 +294,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
let guar = tcx
.dcx()
.span_delayed_bug(span, "trait object projection bounds reference `Self`");
- let args: Vec<_> = b
- .projection_ty
- .args
- .iter()
- .map(|arg| {
- if arg.walk().any(|arg| arg == dummy_self.into()) {
- return Ty::new_error(tcx, guar).into();
- }
- arg
- })
- .collect();
- b.projection_ty.args = tcx.mk_args(&args);
+ b.projection_ty = replace_dummy_self_with_error(tcx, b.projection_ty, guar);
}
ty::ExistentialProjection::erase_self_ty(tcx, b)
@@ -357,3 +352,18 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
Ty::new_dynamic(tcx, existential_predicates, region_bound, representation)
}
}
+
+fn replace_dummy_self_with_error<'tcx, T: TypeFoldable>>(
+ tcx: TyCtxt<'tcx>,
+ t: T,
+ guar: ErrorGuaranteed,
+) -> T {
+ t.fold_with(&mut BottomUpFolder {
+ tcx,
+ ty_op: |ty| {
+ if ty == tcx.types.trait_object_dummy_self { Ty::new_error(tcx, guar) } else { ty }
+ },
+ lt_op: |lt| lt,
+ ct_op: |ct| ct,
+ })
+}
diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl
index fbdc3d1adb81a..1d51101c94031 100644
--- a/compiler/rustc_hir_typeck/messages.ftl
+++ b/compiler/rustc_hir_typeck/messages.ftl
@@ -91,7 +91,7 @@ hir_typeck_lossy_provenance_int2ptr =
hir_typeck_lossy_provenance_ptr2int =
under strict provenance it is considered bad style to cast pointer `{$expr_ty}` to integer `{$cast_ty}`
.suggestion = use `.addr()` to obtain the address of a pointer
- .help = if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead
+ .help = if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_provenance()` instead
hir_typeck_method_call_on_unknown_raw_pointee =
cannot call a method on a raw pointer with an unknown pointee type
diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs
index 5841392dbcf16..59a043d1d6996 100644
--- a/compiler/rustc_hir_typeck/src/check.rs
+++ b/compiler/rustc_hir_typeck/src/check.rs
@@ -182,7 +182,7 @@ fn check_panic_info_fn(tcx: TyCtxt<'_>, fn_id: LocalDefId, fn_sig: ty::FnSig<'_>
ty::Region::new_bound(
tcx,
ty::INNERMOST,
- ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon },
+ ty::BoundRegion { var: ty::BoundVar::ZERO, kind: ty::BrAnon },
),
panic_info_ty,
);
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index 5f5ff40fb9f47..f1feffcc82cc3 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -17,7 +17,8 @@ use itertools::Itertools;
use rustc_ast as ast;
use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::{
- codes::*, pluralize, Applicability, Diag, ErrorGuaranteed, MultiSpan, StashKey,
+ a_or_an, codes::*, display_list_with_comma_and, pluralize, Applicability, Diag,
+ ErrorGuaranteed, MultiSpan, StashKey,
};
use rustc_hir as hir;
use rustc_hir::def::{CtorOf, DefKind, Res};
@@ -818,6 +819,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
call_expr,
None,
Some(mismatch_idx),
+ &matched_inputs,
+ &formal_and_expected_inputs,
is_method,
);
suggest_confusable(&mut err);
@@ -904,6 +907,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
err.span_label(full_call_span, format!("arguments to this {call_name} are incorrect"));
+ self.label_generic_mismatches(
+ &mut err,
+ fn_def_id,
+ &matched_inputs,
+ &provided_arg_tys,
+ &formal_and_expected_inputs,
+ is_method,
+ );
+
if let hir::ExprKind::MethodCall(_, rcvr, _, _) = call_expr.kind
&& provided_idx.as_usize() == expected_idx.as_usize()
{
@@ -932,6 +944,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
call_expr,
Some(expected_ty),
Some(expected_idx.as_usize()),
+ &matched_inputs,
+ &formal_and_expected_inputs,
is_method,
);
suggest_confusable(&mut err);
@@ -1270,6 +1284,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
+ self.label_generic_mismatches(
+ &mut err,
+ fn_def_id,
+ &matched_inputs,
+ &provided_arg_tys,
+ &formal_and_expected_inputs,
+ is_method,
+ );
+
// Incorporate the argument changes in the removal suggestion.
// When a type is *missing*, and the rest are additional, we want to suggest these with a
// multipart suggestion, but in order to do so we need to figure out *where* the arg that
@@ -1317,7 +1340,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
// Call out where the function is defined
- self.label_fn_like(&mut err, fn_def_id, callee_ty, call_expr, None, None, is_method);
+ self.label_fn_like(
+ &mut err,
+ fn_def_id,
+ callee_ty,
+ call_expr,
+ None,
+ None,
+ &matched_inputs,
+ &formal_and_expected_inputs,
+ is_method,
+ );
// And add a suggestion block for all of the parameters
let suggestion_text = match suggestion_text {
@@ -2094,6 +2127,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expected_ty: Option>,
// A specific argument should be labeled, instead of all of them
expected_idx: Option,
+ matched_inputs: &IndexVec>,
+ formal_and_expected_inputs: &IndexVec, Ty<'tcx>)>,
is_method: bool,
) {
let Some(mut def_id) = callable_def_id else {
@@ -2185,21 +2220,164 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
let mut spans: MultiSpan = def_span.into();
- let params = self
+ let params_with_generics = self.get_hir_params_with_generics(def_id, is_method);
+ let mut generics_with_unmatched_params = Vec::new();
+
+ let check_for_matched_generics = || {
+ if matched_inputs.iter().any(|x| x.is_some())
+ && params_with_generics.iter().any(|x| x.0.is_some())
+ {
+ for (idx, (generic, _)) in params_with_generics.iter().enumerate() {
+ // Param has to have a generic and be matched to be relevant
+ if matched_inputs[idx.into()].is_none() {
+ continue;
+ }
+
+ let Some(generic) = generic else {
+ continue;
+ };
+
+ for unmatching_idx in idx + 1..params_with_generics.len() {
+ if matched_inputs[unmatching_idx.into()].is_none()
+ && let Some(unmatched_idx_param_generic) =
+ params_with_generics[unmatching_idx].0
+ && unmatched_idx_param_generic.name.ident() == generic.name.ident()
+ {
+ // We found a parameter that didn't match that needed to
+ return true;
+ }
+ }
+ }
+ }
+ false
+ };
+
+ let check_for_matched_generics = check_for_matched_generics();
+
+ for (idx, (generic_param, param)) in
+ params_with_generics.iter().enumerate().filter(|(idx, _)| {
+ check_for_matched_generics
+ || expected_idx.map_or(true, |expected_idx| expected_idx == *idx)
+ })
+ {
+ let Some(generic_param) = generic_param else {
+ spans.push_span_label(param.span, "");
+ continue;
+ };
+
+ let other_params_matched: Vec<(usize, &hir::Param<'_>)> = params_with_generics
+ .iter()
+ .enumerate()
+ .filter(|(other_idx, (other_generic_param, _))| {
+ if *other_idx == idx {
+ return false;
+ }
+ let Some(other_generic_param) = other_generic_param else {
+ return false;
+ };
+ if matched_inputs[idx.into()].is_none()
+ && matched_inputs[(*other_idx).into()].is_none()
+ {
+ return false;
+ }
+ if matched_inputs[idx.into()].is_some()
+ && matched_inputs[(*other_idx).into()].is_some()
+ {
+ return false;
+ }
+ other_generic_param.name.ident() == generic_param.name.ident()
+ })
+ .map(|(other_idx, (_, other_param))| (other_idx, *other_param))
+ .collect();
+
+ if !other_params_matched.is_empty() {
+ let other_param_matched_names: Vec = other_params_matched
+ .iter()
+ .map(|(_, other_param)| {
+ if let hir::PatKind::Binding(_, _, ident, _) = other_param.pat.kind {
+ format!("`{ident}`")
+ } else {
+ "{unknown}".to_string()
+ }
+ })
+ .collect();
+
+ let matched_ty = self
+ .resolve_vars_if_possible(formal_and_expected_inputs[idx.into()].1)
+ .sort_string(self.tcx);
+
+ if matched_inputs[idx.into()].is_some() {
+ spans.push_span_label(
+ param.span,
+ format!(
+ "{} {} to match the {} type of this parameter",
+ display_list_with_comma_and(&other_param_matched_names),
+ format!(
+ "need{}",
+ pluralize!(if other_param_matched_names.len() == 1 {
+ 0
+ } else {
+ 1
+ })
+ ),
+ matched_ty,
+ ),
+ );
+ } else {
+ spans.push_span_label(
+ param.span,
+ format!(
+ "this parameter needs to match the {} type of {}",
+ matched_ty,
+ display_list_with_comma_and(&other_param_matched_names),
+ ),
+ );
+ }
+ generics_with_unmatched_params.push(generic_param);
+ } else {
+ spans.push_span_label(param.span, "");
+ }
+ }
+
+ for generic_param in self
.tcx
.hir()
.get_if_local(def_id)
- .and_then(|node| node.body_id())
- .into_iter()
- .flat_map(|id| self.tcx.hir().body(id).params)
- .skip(if is_method { 1 } else { 0 });
-
- for (_, param) in params
+ .and_then(|node| node.generics())
.into_iter()
- .enumerate()
- .filter(|(idx, _)| expected_idx.map_or(true, |expected_idx| expected_idx == *idx))
+ .flat_map(|x| x.params)
+ .filter(|x| {
+ generics_with_unmatched_params.iter().any(|y| x.name.ident() == y.name.ident())
+ })
{
- spans.push_span_label(param.span, "");
+ let param_idents_matching: Vec = params_with_generics
+ .iter()
+ .filter(|(generic, _)| {
+ if let Some(generic) = generic {
+ generic.name.ident() == generic_param.name.ident()
+ } else {
+ false
+ }
+ })
+ .map(|(_, param)| {
+ if let hir::PatKind::Binding(_, _, ident, _) = param.pat.kind {
+ format!("`{ident}`")
+ } else {
+ "{unknown}".to_string()
+ }
+ })
+ .collect();
+
+ if !param_idents_matching.is_empty() {
+ spans.push_span_label(
+ generic_param.span,
+ format!(
+ "{} all reference this parameter {}",
+ display_list_with_comma_and(¶m_idents_matching),
+ generic_param.name.ident().name,
+ ),
+ );
+ }
}
err.span_note(spans, format!("{} defined here", self.tcx.def_descr(def_id)));
@@ -2260,6 +2438,115 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
}
+
+ fn label_generic_mismatches(
+ &self,
+ err: &mut Diag<'_>,
+ callable_def_id: Option,
+ matched_inputs: &IndexVec>,
+ provided_arg_tys: &IndexVec, Span)>,
+ formal_and_expected_inputs: &IndexVec, Ty<'tcx>)>,
+ is_method: bool,
+ ) {
+ let Some(def_id) = callable_def_id else {
+ return;
+ };
+
+ let params_with_generics = self.get_hir_params_with_generics(def_id, is_method);
+
+ for (idx, (generic_param, _)) in params_with_generics.iter().enumerate() {
+ if matched_inputs[idx.into()].is_none() {
+ continue;
+ }
+
+ let Some((_, matched_arg_span)) = provided_arg_tys.get(idx.into()) else {
+ continue;
+ };
+
+ let Some(generic_param) = generic_param else {
+ continue;
+ };
+
+ let mut idxs_matched: Vec = vec![];
+ for (other_idx, (_, _)) in params_with_generics.iter().enumerate().filter(
+ |(other_idx, (other_generic_param, _))| {
+ if *other_idx == idx {
+ return false;
+ }
+ let Some(other_generic_param) = other_generic_param else {
+ return false;
+ };
+ if matched_inputs[(*other_idx).into()].is_some() {
+ return false;
+ }
+ other_generic_param.name.ident() == generic_param.name.ident()
+ },
+ ) {
+ idxs_matched.push(other_idx.into());
+ }
+
+ if idxs_matched.is_empty() {
+ continue;
+ }
+
+ let expected_display_type = self
+ .resolve_vars_if_possible(formal_and_expected_inputs[idx.into()].1)
+ .sort_string(self.tcx);
+ let label = if idxs_matched.len() == params_with_generics.len() - 1 {
+ format!(
+ "expected all arguments to be this {} type because they need to match the type of this parameter",
+ expected_display_type
+ )
+ } else {
+ format!(
+ "expected some other arguments to be {} {} type to match the type of this parameter",
+ a_or_an(&expected_display_type),
+ expected_display_type,
+ )
+ };
+
+ err.span_label(*matched_arg_span, label);
+ }
+ }
+
+ fn get_hir_params_with_generics(
+ &self,
+ def_id: DefId,
+ is_method: bool,
+ ) -> Vec<(Option<&hir::GenericParam<'_>>, &hir::Param<'_>)> {
+ let fn_node = self.tcx.hir().get_if_local(def_id);
+
+ let generic_params: Vec